query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Extract demo parameters from the model definition. | def get_pars(model_info, use_demo=False):
# Get the default values for the parameters
pars = dict((p.name, p.default) for p in model_info['parameters'])
# Fill in default values for the polydispersity parameters
for p in model_info['parameters']:
if p.type in ('volume', 'orientation'):
pars[p.name+'_pd'] = 0.0
pars[p.name+'_pd_n'] = 0
pars[p.name+'_pd_nsigma'] = 3.0
pars[p.name+'_pd_type'] = "gaussian"
# Plug in values given in demo
if use_demo:
pars.update(model_info['demo'])
return pars | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extractModelParam(self):\n copasi_filename = self.genPathCopasi(\"extractor\")\n self.recentModel = model.loada(self.antString, copasi_filename)\n return self.recentModel.parameters.copy().squeeze().to_dict()",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")",
"def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")",
"def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')",
"def trainable_params(model, feature_extract):\n params_to_update = model.parameters()\n print(\"Params to learn:\")\n if feature_extract:\n params_to_update = []\n for name, param in model.named_parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\n print(\"\\t\", name)\n else:\n for name, param in model.named_parameters():\n if param.requires_grad == True:\n print(\"\\t\", name)\n return params_to_update",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')",
"def paramDetails(cls):\n return {\n 'dim': (10, 20, 2, 20),\n 'nIter': (1, 10, 2, 5),\n 'lamb': (.1, 1., .1, .05),\n 'alph': (30, 50, 5, 40)\n }",
"def get_embed_params(model) -> List:\r\n return [param for name, param in model.named_parameters() if \"embed\" in name]",
"def model_args(self) -> Optional[Dict]:\n return self.config.get('model_args')",
"def initial_params(self):\r\n prototype_samples = {}\r\n trace = poutine.trace(self.model).get_trace(self.args, self.kwargs)\r\n for name, node in trace.iter_stochastic_nodes():\r\n if (node['type'] == 'sample' and node['is_observed'] == False):\r\n prototype_samples[name] = node[\"value\"].detach()\r\n\r\n return prototype_samples",
"def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)",
"def params():\n return utils.Params('../experiments/base-model/params.json')",
"def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm",
"def parameters(self):",
"def gather_experiment_parameters(self):\n consts = win32com.client.constants.__dicts__[0]\n exp_params = [r for r in consts.keys() if len(r.split(\"EXP_\")) > 1]\n dm_params = [r for r in consts.keys() if len(r.split(\"DM_\")) > 1]\n self.app_param = {} \n self.appdoc_param = {} \n for p in exp_params:\n self.app_param.update({p:self.app.GetParam(consts[p])})\n\n for p in dm_params:\n #self.appdoc_param.update({p:self.app.GetParam(consts[p])}) bug? call appdoc? CP\n\n self.appdoc_param.update({p:self.app.GetParam(consts[p])})",
"def get_params(self):",
"def attributes(self):\n params = self.model.param_array\n return {'parameters': params}",
"def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm",
"def get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin, primers_origin,\n taxonomy_level, selected_primer, test_size, logger) -> int:\n\n # Global information on the model\n logger.log(title='Parameter information for {}'.format(model_name))\n # Data Origins\n logger.log(subtitle='Data Origins')\n logger.log(text='Sequence origin: {}'.format(sequence_origin))\n logger.log(text='Primers origin: {}'.format(primers_origin))\n # Chosen levels for classification\n logger.log(subtitle='Chosen HyperVariable Region and Taxonomy Rank')\n logger.log(text='HyperVariable Region: {}'.format(str(selected_primer)))\n logger.log(text='Taxonomy Rank: {}'.format(str(taxonomy_level)))\n # Applied Preprocessing\n logger.log(subtitle='Preprocessing')\n logger.log(text='Preprocessing description: ' + model_preprocessing)\n # Model parameters\n logger.log(subtitle='Model parameters')\n logger.log(text='Parameter dict: {}'.format(str(model_parameters)))\n logger.log(text='Size of test set: {}'.format(len(y_test)))\n logger.log(text='Part of test size compared to total: {}'.format(test_size))\n\n return len(y_test)",
"def parameters(self):\n return self.model.parameters()",
"def densenet_params(model_name):\n params_dict = {\n # Coefficients: growth_rate, num_init_features, res\n 'densenet121': (32, 64, 224),\n 'densenet161': (48, 96, 224),\n 'densenet169': (32, 64, 224),\n 'densenet201': (32, 64, 224),\n }\n return params_dict[model_name]",
"def get_model_params(self):\n w1 = self.w1\n b1 = self.b1\n w2 = self.w2\n b2 = self.b2\n w3 = self.w3\n b3 = self.b3\n w4 = self.w4\n b4 = self.b4\n w5 = self.w5\n b5 = self.b5\n w6 = self.w6\n b6 = self.b6\n\n return w1, b1, w2, b2, w3, b3, w4, b4, w5, b5, w6, b6"
] | [
"0.64837956",
"0.6315743",
"0.6273915",
"0.6248899",
"0.62303966",
"0.6206407",
"0.61934096",
"0.6082434",
"0.6003491",
"0.5971099",
"0.5959919",
"0.5900141",
"0.5893965",
"0.5847058",
"0.5823629",
"0.58202666",
"0.5698285",
"0.56955665",
"0.568675",
"0.56371653",
"0.55673975",
"0.5549717",
"0.5514285",
"0.5450794",
"0.5446351",
"0.5445893",
"0.5425366",
"0.54116726",
"0.5404604",
"0.5403827"
] | 0.6731628 | 0 |
Explore the model using the Bumps GUI. | def explore(opts):
import wx
from bumps.names import FitProblem
from bumps.gui.app_frame import AppFrame
problem = FitProblem(Explore(opts))
is_mac = "cocoa" in wx.version()
app = wx.App()
frame = AppFrame(parent=None, title="explore")
if not is_mac: frame.Show()
frame.panel.set_model(model=problem)
frame.panel.Layout()
frame.panel.aui.Split(0, wx.TOP)
if is_mac: frame.Show()
app.MainLoop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def demo(name):\n x, y = demo_data[name]\n\n# _models = [models.Const1D(0), models.Gaussian1D(3, 1, .3), models.Gaussian1D(3, 2, .3)]\n# _models = [models.Const1D(0)]\n _models = None\n\n mv = ModelBrowser(x, y, y * 0, _models)\n\n mv.show()\n\n from glue.qt import get_qapp\n get_qapp().exec_()",
"def explorative_manual(self):\n try:\n webbrowser.open(\"https://openeo.org/documentation/1.0/qgis/#exploring-a-backend\")\n except:\n pass",
"def explore(self):\n\t\tpause = False\n\t\tprint(\"He yo! I am on an exploration mission!\")\n\t\tself.driver.go()\n\n\t\twhile self.OK():\n\t\t\tself.sensors.update()\n\t\t\tself.obstacles.check(self.sensors, self.driver)\n\t\t\tself.location.update()\n\t\t\tself.checkForPOI()\n\t\t\tself.driver.go()",
"def visualize_model(self):\n if self.model is None:\n print(\"%s.visualize: implement me\" % (self.__class__.__name__))",
"def run(self):\r\n # Close any open models\r\n self.cleanUp()\r\n # Dynamically select file to preview\r\n self.selectTrial() \r\n # Add adjusted COM (RRA/CMC) model\r\n self.loadAdjustedModel()\r\n # Hide the markers from view\r\n self.hideModelMarkers()\r\n # Load CMC motion to model\r\n self.loadCMCMotion()",
"def show(self) -> None:",
"def main(self):\r\n\r\n #Train the GEN and DISC\r\n self.modelTrain.main()\r\n self.disp.show()",
"def loadModel(self):\n#FLAG: still waiting on boost image\n\t\tself.form = loader.loadModel(\"models/panda-model\")\n\t\tself.form.setScape(.005)\n\t\tself.form.reparentTo(render)",
"def review_model(model): \n \n diagnose_model(model)\n \n plot_param_coef(model)\n \n plot_p_values(model)\n \n return",
"def view(self):",
"def show(self):",
"def main():\n model = Calculator()",
"def printModel(self):\n print(self.model)",
"def show(self):\n pass",
"def detail(model_id: str = typer.Argument(..., help='Model ID')):\n with requests.get(f'{app_settings.api_v1_prefix}/model/{model_id}') as r:\n data = r.json()\n model_detailed_view(MLModel.parse_obj(data))",
"def browse(notebook):\n nb = select_notebook(notebook)\n click.launch('http://localhost:{0}/{1}/'.format(conf.PORT, nb.path.rel))",
"def explore(self, *args):",
"def show(self):\n\n pass",
"def explore(self, board, args):\n self.tree.explore(board, *args)",
"def run(self):\n model = self.model\n self.summary_cards(model)\n self.hospitalizations_chart(model)\n self.available_beds_chart(model)\n self.write_population_info(model)\n self.write_age_distribution_chart(model)\n self.write_fatalities_chart(model)\n self.write_healthcare_parameters(model)\n self.write_epidemiological_parameters(model)\n self.write_footnotes(model)",
"def tab_model(self, root):\n frame = Frame(root)\n frame.pack(side=TOP)\n label_frame = ttk.LabelFrame(frame)\n label_frame.pack(side=TOP)\n self.model_label = ttk.Label(label_frame, textvariable=self.model_label_text)\n self.model_label.pack(side=TOP)\n label_img = ttk.Label(label_frame, image=self.img_wait)\n label_img.pack()\n\n #loca = LocalModelCommunication(data= self.dataframe)\n\n frame = Frame(root)\n frame.pack(side=BOTTOM)\n Button(frame, text='Next>>', command=self.go_next_step4, width=16).pack(side=RIGHT)\n Button(frame, text='<<Back', command=self.go_back_step2, width=16).pack(side=LEFT)",
"def vp():\n if g.active.is_empty:\n txt = F('advise search') if g.model.is_empty else F('advise add')\n g.message = F('pl empty') + \" \" + txt\n\n else:\n g.browse_mode = \"normal\"\n g.model.songs = g.active.songs\n g.message = F('current pl')\n\n g.content = generate_songlist_display(zeromsg=g.message)",
"def explore(self):\n img_dir = self._config[\"image_dir\"]\n # Generate flight plan\n views = self.generate_explore_views()\n # Arm and takeoff\n self._uav.armDisarm(True)\n self._uav.takeoffAsync().join()\n current_pos = self._uav.simGetGroundTruthKinematics().position\n self._move_to(Vector3r(current_pos.x_val, current_pos.y_val, -60))\n # Fly to each view and take photos\n for idx, view in enumerate(views):\n self._observe_at_view(view, os.path.join(img_dir, \"%s.png\" % idx))",
"def goto_browse_list(self):\n\n self.browse.click()",
"def goto_browse_list(self):\n\n self.browse.click()",
"def main():\n \n cities, coordinates, speedlimits, adjlist = data_for_app()\n \n ui(cities, coordinates, speedlimits, adjlist)",
"def launch(self):",
"def model(self):",
"def model(self):",
"def model(self):"
] | [
"0.5889574",
"0.5794204",
"0.5676047",
"0.5631792",
"0.5585122",
"0.5573371",
"0.55641425",
"0.5525804",
"0.5505557",
"0.5493777",
"0.5486398",
"0.54668134",
"0.54602724",
"0.54134226",
"0.53638995",
"0.53637457",
"0.53484154",
"0.5342063",
"0.5337206",
"0.53012127",
"0.5286184",
"0.5274095",
"0.5271949",
"0.52669346",
"0.52669346",
"0.52545255",
"0.52162516",
"0.5210832",
"0.5210832",
"0.5210832"
] | 0.594703 | 0 |
Draw a nice housing to hold the traffic lights | def draw_housing():
green.pensize(3)
green.color("black", "darkgrey")
green.begin_fill()
green.forward(80)
green.left(90)
green.forward(200)
green.circle(40, 180)
green.forward(200)
green.left(90)
green.end_fill() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()",
"def draw_housing():\n tess.pensize(3)\n tess.color(\"black\", \"darkgrey\")\n tess.begin_fill()\n tess.forward(80)\n tess.left(90)\n tess.forward(200)\n tess.circle(40, 180)\n tess.forward(200)\n tess.left(90)\n tess.end_fill()\n tess.hideturtle()",
"def draw_housing_2():\r\n tom.pensize(3)\r\n tom.color(\"black\", \"darkgrey\")\r\n tom.begin_fill()\r\n tom.forward(80)\r\n tom.left(90)\r\n tom.forward(200)\r\n tom.circle(40, 180)\r\n tom.forward(200)\r\n tom.left(90)\r\n tom.end_fill()\r\n tom.hideturtle()",
"def draw_heaters(ax, windtunnel):\n draw_heater(ax, windtunnel.heater_l)\n draw_heater(ax, windtunnel.heater_r)",
"def demo(self):\n self.clear()\n\n white = neo.Color(255, 255, 255)\n black = neo.Color(0, 0, 0)\n red = neo.Color(120, 0, 0)\n green = neo.Color(0, 255, 0)\n blue = neo.Color(0, 0, 255)\n pink = neo.Color(255, 102, 178)\n \n state = [[[0,0,0]] * self.width] * self.height\n stepsize = (1.0/self.n_leds)\n lednr = 0\n for x in range(self.width):\n for y in range(self.height):\n h_start = (0 + lednr * (2*stepsize)) % 1 #* (y*self.width + x)\n lednr = lednr + 1\n s_start = 0\n v_start = 1\n hsv = [h_start,s_start,v_start]\n state[x][y] = hsv\n self.set([x,y], hsv_to_neopixel_color(hsv[0], hsv[1], hsv[2]))\n\n tint = 0\n while(True): \n for x in range(self.width):\n for y in range(self.height):\n hsv = state[x][y]\n\n new_h = (hsv[0] + stepsize/60.0) % 1.0\n new_s = (hsv[1] + stepsize/20.0) % 1.0\n new_v = hsv[2] #+ stepsize/20.0) % 1.0\n\n state[x][y][0] = new_h\n state[x][y][1] = new_h\n state[x][y][2] = new_v\n\n self.set([x,y], hsv_to_neopixel_color(\n (translate(new_h, 0.0, 1.0, 0.0, 0.1) + tint) % 1.0, \n to_sine(new_s), \n new_v))\n \n tint = (tint + stepsize/20.0) % 1\n\n self.draw()\n sleep(1.0/40)",
"def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)",
"def led_theaterChaseRainbow(strip, wait_ms=25):\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color_wheel((i+j) % 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up()\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down()",
"def Haut():\r\n X1, Y1, X2, Y2 = canvas.coords(boule)\r\n canvas.coords(boule,X1,Y1-20,X2,Y2-20)",
"def draw_walker(indx):\n chart_1.create_oval(hips[indx]-6, hips[indx+1]-6,hips[indx]+6, hips[indx+1]+6, fill= \"magenta\", width = 1, tag = 'line_1') \n chart_1.create_line(hips[indx], hips[indx+1], knee_a[indx], knee_a[indx+1], fill= \"blue\", width = 2, tag = 'line_1') \n chart_1.create_line(hips[indx], hips[indx+1], knee_b[indx], knee_b[indx+1], fill= \"green\", width = 2, tag = 'line_1') \n chart_1.create_line(knee_a[indx], knee_a[indx+1], heel_a[indx], heel_a[indx+1], fill= \"blue\", width = 2, tag = 'line_1') \n chart_1.create_line(knee_b[indx], knee_b[indx+1], heel_b[indx], heel_b[indx+1], fill= \"green\", width = 2, tag = 'line_1')",
"def place_headlamp_light():\n\n lx = 1.0\n ly = light_height\n lz = 2.0\n #light_position = [lx, ly, lz, 1.0]\n light_position = [0.0, 0.0, 0.0, 1]\n light_ambient = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_diffuse = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_specular = [ 1*brightness, 1*brightness, 1*brightness, 1.0 ]\n light_direction = [1.0, -1.0, 1.0, 0.0] # Light points down\n # glViewport(0, 0, win_width, win_height)\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # gluPerspective(40.0, float(win_width) / float(win_height), 0.01, 100.0)\n #\n # glMatrixMode(GL_MODELVIEW)\n # glLoadIdentity()\n # glPushMatrix()\n glLightfv(GL_LIGHT4, GL_POSITION, light_position)\n\n\n\n #glLightfv(GL_LIGHT4, GL_POSITION, (GLfloat * 4)(0.0, 0.0, 0.0, 1))\n glLightfv(GL_LIGHT4, GL_AMBIENT, light_ambient)\n glLightfv(GL_LIGHT4, GL_DIFFUSE, light_diffuse)\n glLightfv(GL_LIGHT4, GL_SPECULAR, light_specular)\n\n # Constant attenuation (for distance, etc.)\n # Only works for fixed light locations! Otherwise disabled\n # glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 2.0)\n # glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.0)\n # glLightf(GL_LIGHT1, GL_QUADRATIC_ATTENUATION, 0.0)\n\n glLightf(GL_LIGHT4, GL_CONSTANT_ATTENUATION, 3.0)\n glLightf(GL_LIGHT4, GL_LINEAR_ATTENUATION, 0.0)\n glLightf(GL_LIGHT4, GL_QUADRATIC_ATTENUATION, 0.0)\n\n # Create a spotlight effect (none at the moment)\n if headlamp_is_on:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 30.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n glLightfv(GL_LIGHT4, GL_SPOT_DIRECTION, light_direction)\n else:\n glLightf(GL_LIGHT4, GL_SPOT_CUTOFF, 180.0)\n glLightf(GL_LIGHT4, GL_SPOT_EXPONENT, 0.0)\n\n glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, use_lv)\n glLightModeli(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE)\n # Try GL_TRUE - but then watch what happens when light is low\n\n glEnable(GL_LIGHT4)\n\n # This part draws a SELF-COLORED sphere (in spot where light is!)\n glPushMatrix()\n glTranslatef(lx, ly, lz)\n glDisable(GL_LIGHTING)\n glColor3f(brightness, brightness, brightness)\n glutSolidSphere(0.5, 20, 20)\n glEnable(GL_LIGHTING)\n glPopMatrix()",
"def render_shore_noise(self, points):\n point_list = [(x + 50, -y + 800) for x, y in points] # Up is -ve\n pygame.draw.line(self.surface, CYAN, (50, 800), (410, 800), 1) # x-axis\n pygame.draw.line(self.surface, CYAN, (50, 800), (50, 700), 1) # y-axis\n\n for x, y in point_list: # points\n self.surface.set_at((int(x), int(y)), RED)",
"def visualize(self):\n import matplotlib.pyplot as plt\n import numpy as np\n\n plt.figure()\n sw_ = np.linspace(0.0, 1.0, 50)\n plt.plot(sw_, self.krw(sw_), label=\"Water\")\n plt.plot(sw_, self.kro(sw_), label=\"Oil\")\n plt.xlabel(\"Water saturation\")\n plt.ylabel(\"Relative permeability\")\n plt.legend()",
"def home(xh, yh, h):\n rect(screen, (150, 75, 0), (xh, yh, 150 * h, 100 * h), 0) # house\n polygon(screen, (255, 0, 0), [(xh + 150 * h / 2, yh - 100 * h / 2), (xh, yh), (xh + 150 * h, yh)], 0) # roof\n rect(screen, (0, 191, 255), (xh + 50 * h, yh + 30 * h, 50 * h, 30 * h), 0) # window",
"def draw_wheel():\r\n\touter_radius = 1\r\n\tthickness = .4\r\n\tif wireframe:\r\n\t\tglutWireTorus(thickness,outer_radius - thickness,8,8)\r\n\telse:\r\n\t\tglutSolidTorus(thickness,outer_radius - thickness,8,8)\r\n\t\tglPushAttrib(GL_CURRENT_BIT)\r\n\t\tglPushAttrib(GL_LIGHTING_BIT)\r\n\t\tglDisable(GL_LIGHTING)\r\n\t\tglColor3f(0,0,0)\r\n\t\tglutWireTorus(thickness+.01,outer_radius - thickness + 0.005,8,8)\t\r\n\t\tglPopAttrib()\r\n\t\tglPopAttrib()",
"def shade(l):\n t.color(\"black\",\"gray\")\n t.right(180)\n t.begin_fill()\n for i in range(4):\n t.circle(l / 2, 90)\n t.right(180)\n t.end_fill()",
"def draw_teapot(size):\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glEnable(GL_DEPTH_TEST)\n glClear(GL_DEPTH_BUFFER_BIT)\n \n # draw red teapot\n glMaterialfv(GL_FRONT,GL_AMBIENT,[0,0,0,0])\n glMaterialfv(GL_FRONT,GL_DIFFUSE,[0.5,0.0,0.0,0.0])\n glMaterialfv(GL_FRONT,GL_SPECULAR,[0.7,0.6,0.6,0.0])\n glMaterialf(GL_FRONT,GL_SHININESS,0.25*128.0)\n # Needed to add this because it complained about not calling it before\n # calling glutsolidteapot.\n glutInit()\n glutSolidTeapot(size)",
"def dimmer_switch(turtle, color):\n turtle.fillcolor(color + \"4\")",
"def flicker_lights(self):\n print 'Lights Set'",
"def draw_bg (self):\n self.health = max(0.0, min(1.0, (self.healthsteps + self.mud.value) / self.healthsteps))\n healthycolor = (0x11, 0x22, 0x44)\n pollutedcolor = (0x66, 0x66, 0)\n self.watercolor = [int((a - b) * self.health + b)\n for a,b in zip(healthycolor, pollutedcolor)]\n colorname = \"rgb({},{},{})\".format(*self.watercolor)\n w, h = self.width, self.height\n self.draw.rectangle((0,0,w-1,self.level_px-1), \"#000000\")\n self.draw.rectangle((0,self.level_px,w-1,h-1), colorname)",
"def led_theaterChase(strip, color, wait_ms=50, iterations=5):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def main():\n\n # connect to the hue bridge\n bridge = phue.Bridge()\n bridge.connect() # throw an exception if connection was not established\n\n tracker = beat_tracker.BeatTracker()\n tracker.start()\n try:\n\n # obtain a list of lights to control\n lights = get_lights(bridge)\n\n x = 0\n ids = [l.light_id for l in lights]\n\n while True:\n\n time_between_beats = (60.0 / tracker.tempo)\n\n combos = [\n [1, 0],\n [1, 254],\n [1, 0],\n [500, 254],\n ]\n x = (x + 1) % 4\n\n temp, _brightness = combos[x]\n\n adjust = int(_brightness * (int(tracker.volume / 1500.0) * 2))\n\n if tracker.volume < 1000:\n adjust = 0\n\n brightness = int(min(adjust, 254))\n on = bool(tracker.volume > 800)\n command = {\"ct\": temp, \"bri\": brightness, \"transitiontime\": 1, \"on\": on}\n bridge.set_light(ids, command)\n\n if time_between_beats > 1:\n time.sleep(1)\n else:\n time.sleep(time_between_beats)\n\n finally:\n tracker.stop()",
"def brighter_switch(turtle, color):\n turtle.fillcolor(color + \"1\")",
"def theaterChaseRainbow(strip, wait_ms=30):\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, wheel((i+j) % 255))\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)",
"def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up() #Raise pen for movement\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down() #lower pen for drawing",
"def drawEyes(win, winW, winH):\n# leftEye = Oval(Point(300-120-40, 300-80-20), Point(300-120+40, 300-80+20))\n leftEye = Oval(Point(winW/2-winW/5-winW/15, winH/2-winH/7.5-winH/30),\n Point(winW/2-winW/5+winW/15, winH/2-winH/7.5+winH/30))\n leftEye.setFill(\"white\")\n leftEye.setOutline(\"black\")\n leftEye.draw(win)\n leftIris = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/40)\n leftIris.setOutline(\"black\")\n leftIris.setFill(\"darkcyan\")\n leftIris.draw(win)\n leftPupil = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/120)\n leftPupil.setOutline(\"black\")\n leftPupil.setFill(\"black\")\n leftPupil.draw(win)\n rightEye = leftEye.clone()\n rightEye.move(winW/2-winW/10,0)\n rightEye.draw(win)\n rightIris = leftIris.clone()\n rightIris.move(winW/2-winW/10,0)\n rightIris.draw(win)\n rightPupil = leftPupil.clone()\n rightPupil.move(winW/2-winW/10,0)\n rightPupil.draw(win)",
"def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)",
"def shade_hills(slope, aspect):\n diffuse = shade_hills_onelight(slope, aspect, 315.0, 30.0)\n specular = shade_hills_onelight(slope, aspect, 315.0, 85.0)\n \n # sharpen specular shading on slopes\n specular = numpy.power(specular, 4)\n\n # 40% diffuse and 60% specular\n shaded = .4 * diffuse + (.6 * specular)\n \n return shaded",
"def draw_long_shape():\n turtle.fillcolor('blue')\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.forward(50)\n draw_block()\n turtle.back(150)",
"def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()"
] | [
"0.757192",
"0.7526881",
"0.7487201",
"0.6984903",
"0.6798373",
"0.66007304",
"0.60361487",
"0.599414",
"0.5903113",
"0.58729994",
"0.5810986",
"0.5800031",
"0.57894075",
"0.578831",
"0.5784329",
"0.5757554",
"0.5753362",
"0.5752967",
"0.5747853",
"0.5739003",
"0.5734231",
"0.5732691",
"0.57227784",
"0.57009876",
"0.5697092",
"0.5695448",
"0.5679806",
"0.5679047",
"0.56529444",
"0.56526697"
] | 0.7746798 | 0 |
Modify previous program then we can still realize the rest of lights when they turn off. Green, green and orange, orange, red. We number these states 0, 1, 2, 3. With timer like exercise requisition. | def advance_state_machine():
global state_num
if state_num == 0:
red.color("black")
orange.color("black")
green.color("green")
state_num = 1
wn.ontimer(advance_state_machine, 3000)
elif state_num == 1:
red.color("black")
orange.color("orange")
green.color("green")
state_num = 2
wn.ontimer(advance_state_machine, 1000)
elif state_num == 2:
red.color("black")
orange.color("orange")
green.color("black")
state_num = 3
wn.ontimer(advance_state_machine, 1000)
else:
red.color("red")
orange.color("black")
green.color("black") # Show up our green
state_num = 0
wn.ontimer(advance_state_machine, 2000) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)",
"def light_standby():\n for led in leds:\n led.on()\n\n rgb_driver.pulse(on_color=(scale[\"R\"], scale[\"G\"], scale[\"B\"]), off_color=(0,0,0))",
"def advance_state_machine():\n\n global state_num \n if state_num == 0: #transition from state 0 to state 1\n green_light.hideturtle()\n amber_light.showturtle()\n state_num = 1\n wn.ontimer(advance_state_machine, 1000)\n elif state_num == 1: # transition from state 1 to state 2\n amber_light.hideturtle()\n red_light.showturtle()\n state_num = 2\n wn.ontimer(advance_state_machine, 1000)\n else:\n red_light.hideturtle()\n green_light.showturtle()\n state_num = 0",
"def control_lights(state):\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led],state[led])",
"def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down",
"def advance_state_machine():\n global state_num\n\n if state_num == 0:\n brighter_switch(jess, \"orange\")\n dimmer_switch(ness, \"red\")\n dimmer_switch(tess, \"green\")\n state_num = 1\n\n elif state_num == 1:\n brighter_switch(ness, \"red\")\n dimmer_switch(jess, \"orange\")\n dimmer_switch(tess, \"green\")\n\n state_num = 2\n\n else:\n brighter_switch(tess, \"green\")\n dimmer_switch(ness, \"red\")\n dimmer_switch(jess, \"orange\")\n\n state_num = 0",
"def flicker_lights(self):\n print 'Lights Set'",
"def power_up(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(5)\n self.light_led(6)",
"def transition(red, green, blue, new_red, new_green, new_blue):\n while (red != new_red) or (green != new_green) or (blue != new_blue):\n while red != new_red:\n if red > new_red:\n red = red - 1\n break\n else:\n red = red + 1\n break\n while green != new_green:\n if green > new_green:\n green = green - 1\n break\n else:\n green = green + 1\n break\n while blue != new_blue:\n if blue > new_blue:\n blue = blue - 1\n break\n else:\n blue = blue + 1\n break\n logi_led.logi_led_set_lighting(red, green, blue)\n time.sleep(0.01)",
"def run_states(self):\n if (self.state == \"off\"):\n if (self.in_power.value == 1):\n self.off_to_on()\n \n elif self.state == \"on\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 1):\n self.on_to_alert()\n \n elif self.state == \"alert\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 0):\n self.alert_to_was_alert()\n\n elif self.state == \"was_alert\":\n if (self.in_power.value == 0):\n self.any_to_off()",
"def update_leds():\n # zaświeć\n for i in range(current_leds):\n volume_leds[i].configure(background=TURN_ON_COLOR)\n\n # zgaś\n for i in range(current_leds, LEDS):\n volume_leds[i].configure(background=TURN_OFF_COLOR)",
"def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])",
"def run(self):\n global moving_line\n global change_requested\n global thick_1_a, thick_1_b, thick_1_c, thick_2_a, thick_2_b, thick_2_c\n global lap_to_go\n global lights\n line = moving_line\n if line == 1:\n thick_1_a = 1\n lights[2].write(1)\n thick_1_b = -1\n lights[1].write(0)\n time.sleep(self.interval)\n thick_1_b = 1\n lights[1].write(1)\n thick_1_c = -1\n lights[0].write(0)\n thick_2_a = -1\n lights[5].write(0)\n thick_2_b = 1\n lights[4].write(1)\n thick_2_c = 1\n lights[3].write(1)\n line = 2\n else:\n thick_2_a = 1\n lights[5].write(1)\n thick_2_b = -1\n lights[4].write(0)\n time.sleep(self.interval)\n thick_2_b = 1\n lights[4].write(1)\n thick_2_c = -1\n lights[3].write(0)\n thick_1_a = -1\n lights[2].write(0)\n thick_1_b = 1\n lights[1].write(1)\n thick_1_c = 1\n lights[0].write(1)\n line = 1\n\n moving_line = line\n change_requested = 0\n lap_to_go = lap_period_sec",
"def do_light(self,count):\n if (count == ''):\n count=\"1\"\n for i in range(0,int(count)):\n light=RCtime(12)\n print \"*\"*(light/4000)+\": %d\" % light",
"def event_m20_11_5000():\n \"\"\"State 0,2: [Preset] Living Altar_SubState\"\"\"\n assert event_m20_11_x82()\n \"\"\"State 1: Rerun\"\"\"\n RestartMachine()",
"def demo(self):\n self.clear()\n\n white = neo.Color(255, 255, 255)\n black = neo.Color(0, 0, 0)\n red = neo.Color(120, 0, 0)\n green = neo.Color(0, 255, 0)\n blue = neo.Color(0, 0, 255)\n pink = neo.Color(255, 102, 178)\n \n state = [[[0,0,0]] * self.width] * self.height\n stepsize = (1.0/self.n_leds)\n lednr = 0\n for x in range(self.width):\n for y in range(self.height):\n h_start = (0 + lednr * (2*stepsize)) % 1 #* (y*self.width + x)\n lednr = lednr + 1\n s_start = 0\n v_start = 1\n hsv = [h_start,s_start,v_start]\n state[x][y] = hsv\n self.set([x,y], hsv_to_neopixel_color(hsv[0], hsv[1], hsv[2]))\n\n tint = 0\n while(True): \n for x in range(self.width):\n for y in range(self.height):\n hsv = state[x][y]\n\n new_h = (hsv[0] + stepsize/60.0) % 1.0\n new_s = (hsv[1] + stepsize/20.0) % 1.0\n new_v = hsv[2] #+ stepsize/20.0) % 1.0\n\n state[x][y][0] = new_h\n state[x][y][1] = new_h\n state[x][y][2] = new_v\n\n self.set([x,y], hsv_to_neopixel_color(\n (translate(new_h, 0.0, 1.0, 0.0, 0.1) + tint) % 1.0, \n to_sine(new_s), \n new_v))\n \n tint = (tint + stepsize/20.0) % 1\n\n self.draw()\n sleep(1.0/40)",
"def all_off():\n Leds.red_left.brightness = 0\n Leds.red_right.brightness = 0\n Leds.green_left.brightness = 0\n Leds.green_right.brightness = 0\n Leds.blue_left.brightness = 0\n Leds.blue_right.brightness = 0",
"def begin_blue_state(self):\n if not self.state['return']:\n self.state['blue'] = True\n self.image, _ = self.blue_images.get_image()\n self.blue_start = time.get_ticks()\n self.sound_manager.stop()\n self.sound_manager.play_loop('blue')",
"def all_off():\n print(\"Climate is within set parameters; toggling systems off if any are on\")\n GPIO.output(HEATPIN, RELAYOFF)\n GPIO.output(COOLPIN, RELAYOFF)\n GPIO.output(FANPIN, RELAYOFF)\n time.sleep(30)",
"def demo(s_delay=2):\n for i in flags:\n print(i)\n show_leds(flag=i)\n show_display(flag=i)\n utime.sleep_ms(s_delay * 1000)\n leds.clear()\n with display.open() as disp:\n disp.clear().update()\n disp.close()",
"def fireworks():\n\n sleep_speed = 0.025\n\n # Turn on white\n PYGLOW.color(\"white\", 60)\n sleep(sleep_speed)\n # Turn on blue\n PYGLOW.color(\"blue\", 60)\n sleep(sleep_speed)\n # Fade white\n PYGLOW.color(\"white\", 50)\n sleep(sleep_speed)\n # Turn on green\n PYGLOW.color(\"green\", 60)\n sleep(sleep_speed)\n # Fade white and blue\n PYGLOW.color(\"white\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 50)\n sleep(sleep_speed)\n # Turn on yellow\n PYGLOW.color(\"yellow\", 60)\n sleep(sleep_speed)\n # Fade white, blue, and green\n PYGLOW.color(\"white\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 50)\n sleep(sleep_speed)\n # Turn on orange\n PYGLOW.color(\"orange\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, and yellow\n PYGLOW.color(\"white\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 50)\n sleep(sleep_speed)\n # Turn on red\n PYGLOW.color(\"red\", 60)\n sleep(sleep_speed)\n # Fade white, blue, green, yellow, and orange\n PYGLOW.color(\"white\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 50)\n sleep(sleep_speed)\n # Fade all\n PYGLOW.color(\"white\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"blue\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 40)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 50)\n sleep(sleep_speed)\n # Fade blue, green, yellow, orange, and red\n PYGLOW.color(\"blue\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"green\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 30)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 40)\n sleep(sleep_speed)\n # Fade green, yellow, orange, and red\n PYGLOW.color(\"green\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"yellow\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 20)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 30)\n sleep(sleep_speed)\n # Fade yellow, orange, and red\n PYGLOW.color(\"yellow\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"orange\", 10)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 20)\n sleep(sleep_speed)\n # Fade orange, and red\n PYGLOW.color(\"orange\", 0)\n sleep(sleep_speed)\n PYGLOW.color(\"red\", 10)\n sleep(sleep_speed)\n # Fade red\n PYGLOW.color(\"red\", 0)\n sleep(sleep_speed)\n # Pause 1 second before the next one\n sleep(1)",
"def power_down(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(4)\n self.light_led(6)",
"def led(red: int, green: int, blue: int, /) -> None:",
"def event_m10_29_1010():\r\n \"\"\"State 0,2: [Preset] Switch global flags related to gimmick doors_SubState\"\"\"\r\n assert (event_m10_29_x20(z40=10, z41=20, z42=30, z43=10292020, z44=10292010, z45=10292000, z46=10291000,\r\n z47=10291010))\r\n \"\"\"State 1: Rerun\"\"\"\r\n RestartMachine()\r\n Quit()",
"def program(self,c, loops, states, delays):\r\n # states=\"*w\", delays= \"*v\",\r\n \r\n self.board.reset()\r\n states = [int(x) for x in states]\r\n states = [s & (2**24 - 1) for s in states]\r\n loops = [int(x) for x in loops]\r\n \r\n self.board.program(loops, states, delays)\r\n self.board.start()\r\n \r\n return True",
"def theaterChase(strip, color, state, maxBrightness, wait_ms=50):\n for q in range(3):\n for i in range(0, strip.numPixels() / 2, 3):\n strip.setPixelColor(i + q, color)\n strip.setPixelColor(strip.numPixels() - (i + q), color)\n if (STATE != state):\n break\n brightness = int((LED_BRIGHTNESS * maxBrightness) / 255)\n strip.setBrightness(brightness)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n for i in range(0, strip.numPixels() / 2, 3):\n strip.setPixelColor(i + q, 0)\n strip.setPixelColor(strip.numPixels() - (i + q), 0)\n if (STATE != state):\n break\n if (STATE != state):\n off(strip)\n break",
"def test_light_interface(light_name='head_green_light'):\n l = Lights()\n rospy.loginfo(\"All available lights on this robot:\\n{0}\\n\".format(\n ', '.join(l.list_all_lights())))\n rospy.loginfo(\"Blinking Light: {0}\".format(light_name))\n on_off = lambda x: 'ON' if l.get_light_state(x) else 'OFF'\n rospy.loginfo(\"Initial state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn off light\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # turn on light\n l.set_light_state(light_name, True)\n rospy.sleep(1)\n rospy.loginfo(\"New state: {0}\".format(on_off(light_name)))\n # reset output\n l.set_light_state(light_name, False)\n rospy.sleep(1)\n rospy.loginfo(\"Final state: {0}\".format(on_off(light_name)))",
"def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False",
"def change_timer_color(timez):\r\n\r\n if timez > 90:\r\n stopwatch.configure(bg='green')\r\n if 31 <= timez <= 90:\r\n stopwatch.configure(bg='yellow')\r\n elif 0 <= timez <= 30:\r\n stopwatch.configure(bg='red')",
"def dimmer_switch(turtle, color):\n turtle.fillcolor(color + \"4\")"
] | [
"0.72298914",
"0.6718091",
"0.67174655",
"0.6687307",
"0.65495807",
"0.64910793",
"0.6281461",
"0.62786615",
"0.62331176",
"0.6230395",
"0.62226593",
"0.61985695",
"0.619567",
"0.6166632",
"0.6125692",
"0.6088462",
"0.60882527",
"0.608752",
"0.60666007",
"0.6051194",
"0.6024952",
"0.59979653",
"0.5997446",
"0.59778327",
"0.5945928",
"0.5931832",
"0.59054154",
"0.5891457",
"0.5851372",
"0.583927"
] | 0.7125816 | 1 |
The check to be performed on an AST (a design file). | def check(self, input, ast):
assert False # Must be redefined | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_advance_ast_avaliable():\n assert _test_advanced_ast_presence()",
"def check(self):\n try:\n if self.is_compiled:\n # skip compiled (Cythonized) files because pyanalyze will misinterpret the\n # AST in some cases (for example, if a function was cdefed)\n return []\n if self.module is None:\n # If we could not import the module, other checks frequently fail.\n return self.all_failures\n with qcore.override(self, \"state\", VisitorState.collect_names):\n self.visit(self.tree)\n with qcore.override(self, \"state\", VisitorState.check_names):\n self.visit(self.tree)\n # This doesn't deal correctly with errors from the attribute checker. Therefore,\n # leaving this check disabled by default for now.\n self.show_errors_for_unused_ignores(ErrorCode.unused_ignore)\n self.show_errors_for_bare_ignores(ErrorCode.bare_ignore)\n if self.unused_finder is not None and not self.has_file_level_ignore():\n self.unused_finder.record_module_visited(self.module)\n except node_visitor.VisitorError:\n raise\n except Exception as e:\n self.show_error(\n None,\n \"%s\\nInternal error: %r\" % (traceback.format_exc(), e),\n error_code=ErrorCode.internal_error,\n )\n # Recover memory used for the AST. We keep the visitor object around later in order\n # to show ClassAttributeChecker errors, but those don't need the full AST.\n self.tree = None\n self._lines.__cached_per_instance_cache__.clear()\n self._argspec_to_retval.clear()\n return self.all_failures",
"def check(self, node):\n # do the necessary setup/arguments and call self.visit (node, args)\n self.visit(node, defined=set())",
"def type_check(self):\n self.link_all_refs()\n self.check_ast()",
"def check(self):\n badTransformName = list()\n\n # prog = re.compile(\"^[A-Z]{4}[0-9]{2}_C_[0-9]{3}_GAST$\")\n prog = re.compile(\"^[A-Z]{4}[0-9]{2}_C_[0-9]{3}_G[A-Z]{3}$\")\n progFx = re.compile(\"^[A-Z]{4}[0-9]{2}_C_[0-9]{3}_G[A-Z]{2}$\")\n\n for assetTransform in pm.ls(type=\"gAsset\"):\n nodename = assetTransform.getParent().nodeName(stripNamespace=True)\n if not prog.match(nodename):\n if not progFx.match(nodename):\n badTransformName.append(assetTransform)\n\n if not badTransformName:\n self.status = \"OK\"\n else:\n self.status = self.errorMode\n self.errorNodes = badTransformName\n for mesh in badTransformName:\n self.addError(\n \"%s is not a legal asset node transform name\" % mesh)\n self.errorMessage = \"%s illegal asset node transform name(s)\" % (\n len(badTransformName))",
"def check():",
"def _validate_for_expr(self, astr,debug=False):\n import os\n import ast\n try: tree=ast.parse(astr)\n except SyntaxError: raise ValueError(\n \"Could not parse code expression : \\\"{}\\\" \".format(astr)+\n \" \")\n for node in ast.walk(tree):\n if isinstance(node,(ast.Module,\n ast.Expr,\n ast.Dict,\n ast.Str,\n ast.Attribute,\n ast.Num,\n ast.Name,\n ast.Load,\n ast.BinOp,\n ast.Compare,\n ast.Eq,\n ast.Import,\n ast.alias,\n ast.Call\n )): \n continue\n if (isinstance(node,ast.Call)\n and isinstance(node.func, ast.Attribute)\n and node.func.attr == 'datetime'): \n continue\n if debug:\n attrs=[attr for attr in dir(node) if not attr.startswith('__')]\n print(node)\n for attrname in attrs:\n print(' {k} ==> {v}'.format(k=attrname,v=getattr(node,attrname)))\n raise ValueError(\"Bad node {} in {}. This code is not allowed to execute\".format(node,astr))\n return True",
"def check(self, runtime):",
"def check(self) -> None:",
"def check(self, input, node):\n assert False # Must be redefined",
"def validate(self, node):",
"def check(self, description: Description) -> bool:",
"def check(self):\n pass",
"def check(self):\n pass",
"def check(self):\n pass",
"def check(self):\n pass",
"def check(self, dgraph, **params):\n raise NotImplementedError",
"def check_ast(self):\n map(lambda x: self.check_func(x) if x.kind is PTN.FUN_DEC else None,\n self.tree)",
"def checkAstIntegrity(instruction):\n try:\n for se in instruction.getSymbolicExpressions():\n str(se.getAst())\n\n for x, y in instruction.getLoadAccess():\n str(y)\n\n for x, y in instruction.getStoreAccess():\n str(y)\n\n for x, y in instruction.getReadRegisters():\n str(y)\n\n for x, y in instruction.getWrittenRegisters():\n str(y)\n\n for x, y in instruction.getReadImmediates():\n str(y)\n\n return True\n\n except:\n return False",
"def check_analysis(self, analysis, ecosystem, package, version):\n try:\n assert analysis is not None, \"Analysis not available\"\n assert \"result\" in analysis, \"Can not find the 'result' node.\"\n result = analysis[\"result\"]\n self.check_recommendation_part(result)\n self.check_data_part(result, ecosystem, package, version)\n return \"OK\"\n except Exception as e:\n return \"Failed: \" + str(e)",
"def check(self):\n raise NotImplementedError",
"def test_validate_valid_crisis(self):\r\n assert self.crisis_tree != 0",
"def validate_syntax(self):\n resolves_present = False\n uses_present = False\n if not self.wf.get('workflow', None):\n pu.fail('A workflow block must be present\\n')\n else:\n for _, wf_block in dict(self.wf['workflow']).items():\n if wf_block.get('resolves', None):\n resolves_present = True\n if not resolves_present:\n pu.fail('[resolves] attribute must be present\\n')\n if not self.wf.get('action', None):\n pu.fail('Atleast one action block must be present\\n')\n else:\n for _, a_block in self.wf['action'].items():\n if a_block.get('uses', None):\n uses_present = True\n if not uses_present:\n pu.fail('[uses] attribute must be present\\n')",
"def run_check(self, ctx: RunContext):\n params = ctx.get_params(\"mccabe\")\n options = ctx.options\n if options:\n params.setdefault(\"max-complexity\", options.max_complexity)\n\n McCabeChecker.max_complexity = int(params.get(\"max-complexity\", 10))\n McCabeChecker._error_tmpl = \"%r is too complex (%d)\"\n number = McCabeChecker._code\n for lineno, offset, text, _ in McCabeChecker(ctx.ast, ctx.filename).run():\n ctx.push(\n col=offset + 1,\n lnum=lineno,\n number=number,\n text=text,\n type=\"C\",\n source=\"mccabe\",\n )",
"def test_validate_valid_org(self):\r\n assert self.org_tree != 0",
"def check(self, code, filename, ignore=None):\n\n class FakeLoc:\n lineno = 0\n\n try:\n fname = ''\n if filename is not None:\n fname = filename.encode('utf8') or ''\n code = code.encode('utf8') + b'\\n'\n tree = compile(code, fname, 'exec', _ast.PyCF_ONLY_AST)\n except (SyntaxError, IndentationError):\n return self._handle_syntactic_error(code, filename)\n except ValueError as error:\n return [PyFlakesError(filename, FakeLoc(), 'E', error.args[0]), []]\n else:\n # the file is syntactically valid, check it now\n w = pyflakes.Checker(tree, filename, ignore)\n\n return w.messages",
"def verify(code=None, filename=DEFAULT_STUDENT_FILENAME, report=MAIN_REPORT,\n muted=False):\n if code is None:\n code = report.submission.main_code\n filename = report.submission.main_file\n if report.submission.load_error:\n source_file_not_found(filename, None)\n report[TOOL_NAME]['success'] = False\n return False\n if code.strip() == '':\n blank_source()\n report[TOOL_NAME]['success'] = False\n try:\n parsed = ast.parse(code, filename)\n report[TOOL_NAME]['ast'] = parsed\n except SyntaxError as e:\n syntax_error(e.lineno, e.filename, code, e.offset, e,\n sys.exc_info(), report=report, muted=muted)\n report[TOOL_NAME]['success'] = False\n report[TOOL_NAME]['ast'] = ast.parse(\"\")\n report[TOOL_NAME]['success'] = True\n return report[TOOL_NAME]['success']",
"def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'",
"def __can_be_part_of_assert(cfunc, ctree_item):\n # type: (idaapi.cfunc_t, idaapi.ctree_item_t) -> bool\n\n if ctree_item.citype != idaapi.VDI_EXPR:\n return False\n\n expression = ctree_item.it.to_specific_type\n if expression.op != idaapi.cot_obj:\n return False\n\n parent = cfunc.body.find_parent_of(expression).to_specific_type\n if parent.op != idaapi.cot_call or parent.x.op != idaapi.cot_obj:\n return False\n\n obj_ea = expression.obj_ea\n if not helper.is_code_ea(obj_ea) and idc.get_str_type(obj_ea) == idc.STRTYPE_C:\n str_potential_name = idc.get_strlit_contents(obj_ea)\n if type(str_potential_name) is not str:\n # convert bytes to str (python 3)\n str_potential_name = str_potential_name.decode('ascii')\n return idaapi.is_valid_typename(str_potential_name)\n return False",
"def verify_expression(tree, nam):\r\n o, A, B=tree\r\n if o in Internal_Functions and not A and not B:\r\n print(\"\\n\", Err_no_arg) # *** Syntax error: function without arguments *** \r\n print(o)\r\n print(nam)\r\n raise ReferenceError\r\n \r\n if Space in nam: # expressions should not contain spaces\r\n bad=False\r\n inside=False\r\n for c in nam: # check all spaces are inside quotes\r\n if c==Quote: inside=not inside\r\n if c==Space and not inside: bad=True\r\n if bad:\r\n print(\"\\n\", Err_space_in_name) # *** Syntax error: incorrect expression *** \r\n print(nam, Col)\r\n print(tree)\r\n raise ReferenceError"
] | [
"0.648272",
"0.6393194",
"0.62702",
"0.613588",
"0.60619",
"0.60233366",
"0.5977728",
"0.597741",
"0.59475607",
"0.5865792",
"0.58510435",
"0.58335364",
"0.58133894",
"0.58133894",
"0.58133894",
"0.58133894",
"0.57385635",
"0.5718586",
"0.5697994",
"0.56697005",
"0.5647083",
"0.5627275",
"0.5623563",
"0.5585968",
"0.5572305",
"0.5545799",
"0.55307484",
"0.5519392",
"0.5514039",
"0.5487828"
] | 0.6982063 | 0 |
>>> a,b = getnextbyte(binascii.unhexlify('1210233445'), 1) >>> print(hex(a)) 0x23 >>> print(b) 3 >>> a,b = getnextbyte(binascii.unhexlify('1210233445'), 0) >>> print(hex(a)) 0x12 >>> print(b) 1 | def getnextbyte(data, index):
v = data[index]
index += 1
if v == 0x10:
v = data[index]
index += 1
return v,index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_byte(self):\r\n return self.next_bytes(1)",
"def _hexbyte(b):\n return _hexchar(b[0]) * 16 + _hexchar(b[1])",
"def _read_next_bytes(\n fid, num_bytes, format_char_sequence, endian_character=\"<\"\n ):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)",
"def getInt(string, radix, needHexPrefix):\n return (0)",
"def get_byte(self):\n if self.num == None:\n # since extract_number gives 4 bytes, we return one byte at a time and update a count\n # the count is used to return the appropriate byte \n self.num = self.extract_number()\n self.count = 4\n mask = 1 << (self.count-1)\n byte = self.num & (mask)\n self.count = self.count - 1\n if self.count == 0:\n self.num = None\n return byte",
"def get_bytes(data: str) -> int:\n data = str(data)\n return int(len(sanatize_hex(data)) / 2)",
"def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character=\"<\"):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)",
"def next_byte(data_socket):\r\n return data_socket.recv(1)",
"def next_byte(data_socket):\r\n return data_socket.recv(1)",
"def hex2int(r: str) -> int:",
"def getbytes(data, offset):\n if data[offset] == 0xFF and data[offset + 1] == 0xFF:\n return b'', offset + 2 # Blank string\n length, offset = getint(data, offset, 2) # 2-byte length\n value = data[offset:offset + length]\n return value, (offset + length)",
"def _extract_first_bytes(self, func):\n return str(Web3.toHex(Web3.sha3(text=func)[0:4]))[2:]",
"def bytes_increment(b):\n assert isinstance(b, six.binary_type)\n b = bytearray(b) # Used subset of its API is the same on Python 2 and 3.\n for i in range(len(b) - 1, -1, -1):\n if b[i] != 0xff:\n b[i] += 1\n return bytes(b[:i+1])\n return None",
"def BYTE(dump,a):\n return int((dump.ROM[4 * (a // 4)] >> (8 * (a%4))) & 0xFF)",
"def Decodingfunc(Codebyte):\r\n Decodedint=struct.unpack('b',Codebyte)[0]\r\n N=0 #number of repetitions\r\n L=0 # length of single/multiple sequence\r\n if Decodedint >= 0: #single\r\n N = 1\r\n L = Decodedint+1\r\n else: #multiple\r\n L = -Decodedint//16+1\r\n N = -Decodedint-(L-1)*16+1\r\n #print(\"N =\",N,\" L =\",L)\r\n return (N,L)",
"def getNextValue(self):\n\n if self.si >= len(self.str):\n return -1\n\n # First, count the number of zero chunks until we come to a nonzero chunk.\n zeroCount = 0\n b = ord(self.str[self.si])\n if self.zero_expands:\n bmask = (1 << self.n) - 1\n bv = b & (bmask << (self.bi - self.n))\n while bv == 0:\n zeroCount += 1\n self.bi -= self.n\n if self.bi <= 0:\n self.si += 1\n self.bi = 8\n if self.si >= len(self.str):\n return -1\n\n b = ord(self.str[self.si])\n bv = b & (bmask << (self.bi - self.n))\n\n # Infer from that the number of chunks, and hence the number\n # of bits, that make up the value we will extract.\n numChunks = (zeroCount + 1)\n bitCount = numChunks * self.n\n\n # OK, now we need to extract the next bitCount bits into a word.\n result = 0\n while bitCount >= self.bi:\n mask = (1 << self.bi) - 1\n value = (b & mask)\n result = (result << self.bi) | value\n bitCount -= self.bi\n\n self.si += 1\n self.bi = 8\n if self.si >= len(self.str):\n b = 0\n break\n\n b = ord(self.str[self.si])\n\n if bitCount > 0:\n # A partial word in the middle of the byte.\n bottomCount = self.bi - bitCount\n assert bottomCount > 0\n mask = ((1 << bitCount) - 1)\n value = ((b >> bottomCount) & mask)\n result = (result << bitCount) | value\n self.bi -= bitCount\n\n return result",
"def read_byte():\n try:\n result = ord(self._buffer[read_cursor[0]])\n read_cursor[0] += 1\n return result\n except IndexError:\n raise ASN1WantMore('Premature end of input.')",
"def _from_bytes(value, dummy, _int=int, _hexlify=_hexlify):\n return _int(_hexlify(value), 16)",
"def readHexByte(cls, in_):\n return (cls.readHexDigit(in_) << 4) | cls.readHexDigit(in_)",
"def readbyte(f):\n return struct.unpack(\">B\", f.read(1))[0]",
"def bytes_to_int(bs):\n v = 0\n p = 0\n for b in reversed(bs):\n v += b * (2 ** p)\n p += 8\n return v",
"def a2b(a):\n return binascii.unhexlify(a)",
"def _bytes_to_int32(b):\n\n\treturn b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)",
"def decode_bytes_hex(buf, pos):\n value, pos = decode_bytes(buf, pos)\n return binascii.hexlify(value), pos",
"def pick_byte2(input):\n val = int(input) >> 8\n val = val & 255\n return val",
"def unpack_bytes(n:bytes) -> (int, bytes):\n\tresult = 0\n\tn = n.decode()\n\twhile len(n) > 0:\n\t\tbyte, n = ord(n[0]), n[1:]\n\t\tcont, byte = byte & 1, byte >> 1\n\t\t#continuation bit is lsb, and then get rid of it.\n\t\tresult <<= 7\n\t\tresult += byte\n\t\tif cont:\n\t\t\tbreak\n\telse:\n\t\traise ValueError('Varint passed to unpack did not terminate!')\n\treturn result, n.encode()",
"def byte(b):\n if isinstance(b, basestring):\n if len(b) == 1:\n return ord(b)\n raise ValueError()\n elif isinstance(b, int):\n if 0 <= b < 256:\n return b\n raise ValueError()\n raise TypeError()",
"def next_greater(s):\n assert s\n # Based on the Plyvel `bytes_increment()` function.\n s2 = s.rstrip('\\xff')\n return s2 and (s2[:-1] + chr(ord(s2[-1]) + 1))",
"def unpackb(value):\n return load(io.BytesIO(value))",
"def getByte(self, int: int, int2: int) -> int:\n ..."
] | [
"0.6322323",
"0.611362",
"0.60862136",
"0.6062787",
"0.59771585",
"0.5948756",
"0.5931488",
"0.5912274",
"0.5912274",
"0.58405614",
"0.5801832",
"0.5792458",
"0.57521725",
"0.57397574",
"0.5722311",
"0.569004",
"0.568189",
"0.56619096",
"0.5647636",
"0.56459695",
"0.56325865",
"0.56178635",
"0.55951315",
"0.55750513",
"0.55614454",
"0.5548691",
"0.55450684",
"0.554091",
"0.5515064",
"0.5486513"
] | 0.71101743 | 0 |
Setting should fail due to name conflict. | def test_set_conflict(self):
with RandomKeyTmpFile() as fname:
command_line = self._MENU + [self._KEYNAME, "--keyfile-path", fname]
RUNNER(command_line)
self.check_error(StratisCliNameConflictError, command_line, _ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_bad_name(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4, shape=(4, 4))\n with pytest.raises(TypeError):\n dim.name = 4",
"def test_set_invalid_scope(self):\n setting_name = 'user_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)",
"def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)",
"def test_name_false(self):\r\n self.name = False",
"def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)",
"def refine_conflict(self):\n self._raise_not_supported()",
"def test_set_value_not_str(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.assertRaises(TypeError, lambda: self.helper.set_value([\"Hello\", \"World!\"]))",
"def test_overwrite_raises_an_error_by_default(self):\n set_default_for_missing_keys('test')\n\n with pytest.raises(ValueError) as e:\n set_default_for_missing_keys(None)\n\n # confirm that error message correctly indicates the fix/resolution\n assert 'pass `overwrite=True`' in str(e.value)",
"def test_set_invalid_scope_project(self):\n setting_name = 'project_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)",
"def test_set_invalid_app_name(self):\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': 'NON-EXISTING-APP',\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def test_wrong_mode(self):\n self.assertRaises(ComponentErrorsEx, self.dp.setRewindingMode, 'FOO')",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def testFilenameSetBadType(self):\n def setFilename():\n self.mr.filename = 12345\n\n self.assertRaises(\n TypeError,\n setFilename\n )",
"def testFunctionName(self):\n with self.assertRaises(AttributeError):\n Manager.Properties.Version.Set(get_object(TOP_OBJECT), {})",
"def test_name_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.name = 'bar'\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)",
"def test_bad_property_setting(self):\n s = State(substance=\"water\")\n with pytest.raises(AttributeError):\n # Should be lowercase p\n s.TP = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")",
"def test_set_invalid_scope_project_user(self):\n setting_name = 'project_user_str_setting'\n url = reverse('projectroles:api_user_setting_set')\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': 'value',\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)",
"def testProtocolSetBadType(self):\n def setProtocol():\n self.mr.protocol = 12345\n\n self.assertRaises(\n TypeError,\n setProtocol\n )",
"def testSetWithBadString(self):\n def setSat():\n self.node.sat = 'banana'\n\n self.assertRaises(\n TypeError,\n setSat\n )",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def test_setting_failure(self):\n with mock.patch.object(ip_lib, 'set_ip_nonlocal_bind', return_value=1):\n ip_lib.set_ip_nonlocal_bind_for_namespace('foo', value=1)",
"def test_Alpha_setter_invalid(self):\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', -5)\r\n self.assertRaises(ValueError, setattr, self.mc, 'Alpha', 2)",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_format_name_attribute():\n formatter = TabularOutputFormatter(format_name=\"plain\")\n assert formatter.format_name == \"plain\"\n formatter.format_name = \"simple\"\n assert formatter.format_name == \"simple\"\n\n with pytest.raises(ValueError):\n formatter.format_name = \"foobar\"",
"def check_set_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def test_set_invalid_project_type(self):\n setting_name = 'project_category_bool_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': True,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(AppSetting.objects.count(), 0)",
"def test_useless_alias():\n with pytest.raises(ValueError, match='duplicate'):\n alias('name', ('name',))",
"def on_cls_setting_myname(value):\n raise NotImplementedError()",
"def on_setting_myname(self, value):\n raise NotImplementedError()"
] | [
"0.64577913",
"0.62993205",
"0.6259956",
"0.6173455",
"0.6092742",
"0.6091933",
"0.6072255",
"0.60711807",
"0.60665685",
"0.603069",
"0.6024282",
"0.6014658",
"0.5978283",
"0.5969654",
"0.59553856",
"0.5953395",
"0.5953062",
"0.59395117",
"0.590152",
"0.5899473",
"0.58943343",
"0.58864605",
"0.58713806",
"0.58436096",
"0.5827674",
"0.58167595",
"0.5803298",
"0.5799774",
"0.57958794",
"0.57781714"
] | 0.7105276 | 0 |
Setting should fail due to the length of the key. | def test_set_key_too_long(self):
with RandomKeyTmpFile(128) as fname:
command_line = self._MENU + [self._KEYNAME, "--keyfile-path", fname]
self.check_error(StratisCliEngineError, command_line, _ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")",
"def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True",
"def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))",
"def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)",
"def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')",
"def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)",
"def _check_key(self, key):\n raise NotImplementedError",
"def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)",
"def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )",
"def _validate_and_split_key(self, key):\n if self._len_keys == 1:\n return self._validate_and_split_len_one(key)\n else:\n return self._validate_and_split_len(key)",
"def test_neg_list_size_with_nonexistent_key(self):\n charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'\n minLength = 5\n maxLength = 30\n length = random.randint(minLength, maxLength)\n key = ('test', 'demo', ''.join(map(lambda unused:\n random.choice(charSet),\n range(length))) + \".com\")\n try:\n self.as_connection.list_size(key, \"contact_no\")\n except e.RecordNotFound as exception:\n assert exception.code == 2",
"def __validateIndex(self, index, lenght):\r\n if index >= lenght or index < 0:\r\n raise Exception(\"Can't change these letters\")",
"def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)",
"def testOffsetBadLength(self):\n def setOffset():\n self.node.offset = ['banana']\n\n self.assertRaises(\n ValueError,\n setOffset\n )",
"def testOffsetBadLength(self):\n def setOffset():\n self.cc.offset = ['banana']\n\n self.assertRaises(\n ValueError,\n setOffset\n )",
"def key_size(self) -> int:\n pass",
"def key_size(self) -> int:\n pass",
"def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key",
"def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"",
"def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')",
"def test_wrong_total_number_of_keys(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n policy = replace(self.policy, num_different_keys_in_all_bundles=2)\n with self.assertRaises(KSR_POLICY_KEYS_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Unacceptable number of key sets in request test, (1 keys instead of 2)\",\n str(exc.exception),\n )",
"def validate(self, key, val):\n return True",
"def validate(self, key, val):\n return True",
"def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)",
"def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True",
"def test_set_key():\n\n assert symmetric.set_key(\"test\") == \"test\"",
"def test_bit_set_bit_value_size_too_large(self):\n value = bytearray()\n for x in range(0, 5):\n value.append(255)\n ops = [bitwise_operations.bit_set(self.test_bin_zeroes, 0, 48, 6, value, None)]\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)",
"def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key"
] | [
"0.7063274",
"0.69667125",
"0.6398266",
"0.6392544",
"0.6392544",
"0.6260004",
"0.6244157",
"0.62292206",
"0.622276",
"0.6218091",
"0.6210933",
"0.6209377",
"0.61843306",
"0.6152999",
"0.61401016",
"0.6111454",
"0.6110578",
"0.6082006",
"0.6082006",
"0.60524166",
"0.6032132",
"0.6026457",
"0.60223085",
"0.6021257",
"0.6021257",
"0.5995463",
"0.59921443",
"0.5974598",
"0.5969886",
"0.5954922"
] | 0.70860624 | 0 |
Test that specifying a filename that does not exist raises a StratisCliKeyfileNotFoundError. | def test_set_key_filename_missing(self):
command_line = self._MENU + [self._KEYNAME, "--keyfile-path", "/bogus"]
self.check_error(StratisCliKeyfileNotFoundError, command_line, _ERROR) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_config_filename_given_file_doesnt_exist():\n config_file_name = \"i_dont_exist.ini\"\n assert not os.path.exists(config_file_name)\n\n with pytest.raises(Exception):\n Config(filename=config_file_name)",
"def test_path_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_path = \"foobar\"\n self.assertRaises(InvalidInputError, self.command.run)",
"def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))",
"def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')",
"def test_read_no_file():\n filename = 'asdf'\n with pytest.raises(FileNotFoundError):\n read_file(filename)",
"def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)",
"def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]",
"def test_valid_file_raises():\n with pytest.raises(ValueError):\n cli._valid_file(__file__)",
"def verify_non_existing_path(self) -> None:\n path = \"/some/non/existing/path\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def test_get_invalid_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n exceptionKeys = ['Hello', 'spam']\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n tempconfig.write('ham: eggs'.encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, value), value)\n\n for key in exceptionKeys:\n with self.assertRaises(easydms.config.ErrorConfigKeyNotFound):\n config.getRequiredKey(key)\n finally:\n os.remove(tempconfig.name)",
"def test_get_config_does_not_exist():\n with pytest.raises(\n FileNotFoundError, match=\"Oops! I can't seem to find a config.yml\"\n ):\n abcconfig.get_config(\"dirthatdoesntexist\")",
"def test_encrypt_non_existent_file(self):\n encryptor = self.test_init()\n\n with patch('os.path.exists', return_value=False):\n with self.assertRaises(IceItException):\n encryptor.encrypt('blah', 'blah-again')",
"def test_get_authz_file_notfound_raises(self):\n authz_file = os.path.join(self.env.path, 'some-nonexistent-file')\n self.env.config.set('authz_policy', 'authz_file', authz_file)\n self.assertRaises(ConfigurationError, self.check_permission,\n 'WIKI_VIEW', 'änon', None, None)",
"def _is_valid_file(arg: str) -> str:\n if not os.path.exists(arg):\n raise FileNotFoundError(\"%s does not exist\" % arg)\n return arg",
"def test_fail_launch_file(self):\n args = self.args.copy()\n # Pass a string instead of a list\n args[\"traj_file\"] = \"nofile.xtc\"\n with pytest.raises(FileNotFoundError) as err:\n UI.launch(**args)\n assert \"nofile.xtc does not exist.\" in str(err.value)",
"def test_id_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"e-dad\"\n self.assertRaises(InvalidInputError, self.command.run)",
"def get_existing_filename_or_die(self, key) -> str:\n filename = self.get_or_default(key, None)\n if filename is None:\n print(\"Error, '\" + key + \"' is required.\")\n sys.exit(1)\n elif not os.path.isfile(filename):\n print(\"'\" + str(filename) + \"' is not a file.\")\n sys.exit(1)\n else:\n return filename",
"def test_invalid_path() -> None:\n path = rsc / \"does-not-exist.ods\"\n with pytest.raises(FileNotFoundError, match=\"does not exist\"):\n read_ods(path)",
"def test_exceptions_init_nonexistent():\n with pytest.raises(IOError):\n Exceptions(os.path.join(os.path.dirname(__file__),\n 'nonexistent_exceptions.yaml'))",
"def test_load_configuration_raises_an_exception_when_file_does_not_exist():\n with pytest.raises(FileNotFoundError):\n config.load_configuration(invalid_configuration_path)",
"def test_filename_required():\n with pytest.raises(SystemExit):\n cli.parse_args(['-f'])",
"def test_init__no_sdk_key_no_datafile__fails(self, _):\n self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException,\n enums.Errors.MISSING_SDK_KEY,\n config_manager.PollingConfigManager,\n sdk_key=None,\n datafile=None,\n )",
"def test_verifies_token_file_exists(self):\n\n with self.assertRaises(exceptions.TokenFileNotFoundError):\n badgr = BadgrLite(token_filename='./non_existent_token_file.json')\n badgr.load_token()",
"def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.quick,\n [\"0.0.0.0\"],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def test_readable_error_if_file_not_found(self):\n fake_path = 'this/path/is/not/real'\n self.assertEqual(LoadJsonConfig.read_config_file(LoadJsonConfig(), fake_path), 'File not found at ' + fake_path)",
"def test_bad_config_file_path_or_name():\n cli_result = subprocess.run(\n ['kaiba', 'config.js', 'input.json'],\n capture_output=True,\n )\n assert b'FileNotFoundError' in cli_result.stderr\n assert b'config.js' in cli_result.stderr",
"def test_raise_missing_file(self) -> None:\n with pytest.raises(FileNotFoundError):\n YAMLParser().parse(\"test/missing_file.yaml\")",
"def verify_restricted_path(self) -> None:\n path = \"/usr\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def test_init_nonexistent_file(self, mock_creds):\n with mock.patch.object(moves.builtins, 'open', side_effect=IOError()):\n self.assertRaises(errors.Credentials,\n credentials.Credentials, 'key.json')\n self.assertFalse(mock_creds.called)"
] | [
"0.7394184",
"0.69968396",
"0.6942686",
"0.69373065",
"0.69018644",
"0.68976176",
"0.68946975",
"0.6786818",
"0.6780421",
"0.6769902",
"0.6741125",
"0.6615576",
"0.66148007",
"0.6611362",
"0.6587929",
"0.6565255",
"0.65609694",
"0.6548419",
"0.6540407",
"0.65388423",
"0.6524873",
"0.65083605",
"0.65020424",
"0.64982635",
"0.64881086",
"0.6481979",
"0.64667326",
"0.6459877",
"0.6452121",
"0.64478517"
] | 0.8606106 | 0 |
Receives an interval from the frequency of root_pitch root_pitch['freq']), expressed in mils. A positive value means the interval extends above root_pitch while a negative value means the interval extends below. Returns the frequency of the pitch that interval away from root_pitch. | def mils_to_freq(m):
f = root_pitch['freq']*(2**(float(m)/12000))
return f; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def freq_to_mils(f):\n if f == 0:\n raise Exception('0 not a valid frequency, what you playing at!?')\n else:\n m = int(round(12000* log(f/root_pitch['freq'],2)))\n return m;",
"def getFundFreq(self, data, sampleRate):\n sp = SignalProc.SignalProc(256, 128)\n sp.data = data\n sp.sampleRate = sampleRate\n # spectrogram is not necessary if we're not returning segments\n segment = Segment.Segmenter(sp, sampleRate)\n pitch, y, minfreq, W = segment.yin(minfreq=100, returnSegs=False)\n # we use NaNs to represent \"no F0 found\"\n if pitch.size == 0:\n return float(\"nan\"), float(\"nan\")\n\n segs = segment.convert01(pitch > minfreq)\n segs = segment.deleteShort(segs, 5)\n if len(segs) == 0:\n return float(\"nan\"), float(\"nan\")\n else:\n pitch = pitch[np.where(pitch>minfreq)]\n return round(np.min(pitch)), round(np.max(pitch))",
"def miditofreq(midinote):\n return 440 * (2 ** ((midinote - 69.0)/12.0))",
"def get_midi_pitch(self) -> (int, float):\n pitch_id = 69 + int(self._cents/100)\n detune = self._cents - 100*(pitch_id - 69)\n while detune >= 50:\n pitch_id += 1\n detune -= 100\n while detune < -50:\n pitch_id -= 1\n detune += 100\n return (pitch_id, detune)",
"def estimate_root_note(fn, start=0, end=None):\n data = detect_pitch(fn, unit=\"midi\")\n if start or end:\n if end is None:\n end = len(data)\n data = data[start:end]\n\n return statistics.harmonic_mean(\n remove_outliers([i[1] for i in data if i[0] != 0.0])\n )",
"def freq_at_octave(freq_at_zero, target_octave):\n target_frequency = 0\n\n if target_octave<0:\n b = (target_octave*-2)/2\n else:\n b = target_octave\n\n\n for a in range(0,b):\n if target_octave>0:\n target_frequency *=2\n else:\n target_frequency /=2\n target_frequency = freq_at_zero\n return target_frequency;",
"def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0",
"def get_tuning_freq(self, freq):\n if freq < self.freq_lo:\n return\n\n while freq < self.NOTES.min() or freq > self.NOTES.max():\n while freq > self.NOTES.max():\n self.NOTES *= 2\n while freq < self.NOTES.min():\n self.NOTES /= 2\n tuning_freq = min(self.NOTES, key=lambda x: abs(x-freq))\n return tuning_freq",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))",
"def note2pitch(note, cents=0):\n freq = 440 * note2ratio(note - 69, cents)\n return freq",
"def getUpperFrequencyBound(self) -> int:\n return self.upper_frequency_bound",
"def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)",
"def ltp(sp):\n n = len(sp)\n # upper and lower pitch limits (fs ~ 8KHz- 16KHz).\n pmin = 50\n pmax = 200\n sp2 = sp ** 2 # Pre-calculate the square.\n E = np.zeros(pmax+1) # Creates an array to store the values, not using the first elements.\n for M in range(pmin, pmax + 1):\n e_del = sp[0 : n-M]\n e = sp[M : n]\n e2 = sp2[M : n]\n E[M] = np.sum((e_del * e) ** 2) / np.sum(e2)\n\n # Find M, the optimum pitch period.\n M = np.argmax(E) # Not max value mas Max index.\n\n # Find B, the pitch gain factor\n e_del = sp[0 : n-M]\n e = sp[M : n]\n e2 = sp2[M : n]\n B = np.sum(e_del * e) / sum(e2)\n\n return (B, M)",
"def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n if os.path.isfile(filename) is False:\n raise Exception('File not found with filename = %s' % filename)\n\n print(\"====> reading pitch from sound file\")\n win_s = 4096 // DOWN_SAMPLE # fft size\n hop_s = 512 // DOWN_SAMPLE # hop size\n\n s = source(filename, samplerate, hop_s)\n samplerate = s.samplerate\n\n tolerance = 0.8\n\n pitch_o = pitch(\"yin\", win_s, hop_s, samplerate)\n pitch_o.set_unit(\"midi\")\n pitch_o.set_tolerance(tolerance)\n\n result = []\n\n # total number of frames read\n total_frames = 0\n while True:\n samples, read = s()\n # the pitch value is not rounded and many zeroes occur\n that_pitch = pitch_o(samples)[0]\n confidence = pitch_o.get_confidence()\n result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))\n total_frames += read\n if read < hop_s:\n break\n\n group_result_with_log_density = compute_density_from_pitch_result(result)\n density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> density level list length %s\" % len(density_level_list))\n proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> emphasis proportion list length = %d\" % len(proportion_list))\n return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)",
"def midi_pitch_fraction(self) -> int:\n return self.__midi_pitch_fraction",
"def wavelength(self,freq):\n return self.phase_velocity()/freq",
"def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440",
"def interval(self, pitch):\n return Base40Interval(self, pitch)",
"def hz2mel(freq):\n return 2595. * np.log10(1+freq/700.0)",
"def get_EUL_Pitch(self):\n eul_raw = self.i2c.mem_read(2, self.addr, OUT_EUL_PITCH_LSB)\n eul_pitch = self.sign_val(((eul_raw[1]<<8) + eul_raw[0]))/16.0\n return (eul_pitch)\n #print(eul_pitch)",
"def _frequency_to_wavelength(freq):\n return ifc.SPEED_OF_LIGHT_METRES_PER_SECOND / freq",
"def pitch(self, pitch):\n pass",
"def midi_to_frequency(midi_note: Union[float, int]) -> float:\n half_tone = 2 ** (1 / 12)\n return 440. * half_tone ** (midi_note - 69.)",
"def Hz(self,kx,ky):\n return self.m-2.*self.t2*np.sin(self.phi)*(np.sin(3.*kx/2.+np.sqrt(3.)*ky/2.)+np.sin(-3.*kx/2.+np.sqrt(3.)*ky/2.)+np.sin(-np.sqrt(3.)*ky))",
"def perceptual_amplitude_dbb(frequency: float) -> float:\n # See http://www.sengpielaudio.com/BerechnungDerBewertungsfilter.pdf\n\n num = 12200.0 ** 2. * frequency ** 3\n den = (frequency ** 2. + 20.6) * (frequency ** 2. + 12200. ** 2.) * np.sqrt(frequency ** 2. + 158.5 ** 2.)\n return num / den",
"def from_frequency(frequency:float, detune=0) -> 'Pitch':\n return Pitch(1200*np.log2(frequency/440) + detune)",
"def getInterval(self, note):\n note = note.copy() # Shouldn't need this. I believe it passes a copy\n note.setOctave(self.getOctave())\n selfpitch = self.getMIDIByte()\n notepitch = note.getMIDIByte()\n interval = notepitch - selfpitch\n if abs(interval) < 7:\n return interval\n else:\n if interval < 0:\n # Note is too low, go up an octave\n sign = 1\n else:\n # Note is too high, go down an octave\n sign = -1\n while abs(interval) > 6:\n interval += sign*12\n return interval",
"def probing_frequency(dur: int) -> float:\n freq = min(dur / 8.0, 1.0)\n freq = max(dur / 64.0, freq)\n return max(freq, 0.1)",
"def midi_to_frequency(midi_note):\n return round(440.0 * 2 ** ((midi_note - 69) * (1.0 / 12.0)), 1)"
] | [
"0.6005354",
"0.5816876",
"0.5760161",
"0.56855977",
"0.5584908",
"0.55389327",
"0.5513072",
"0.5502047",
"0.5458081",
"0.5408832",
"0.5342084",
"0.53296864",
"0.5326595",
"0.5306963",
"0.52949834",
"0.5241739",
"0.5227384",
"0.521453",
"0.5211651",
"0.5193988",
"0.51438063",
"0.51416236",
"0.5096149",
"0.50911295",
"0.50786877",
"0.50677353",
"0.50676423",
"0.5053614",
"0.5019287",
"0.5016287"
] | 0.6486499 | 0 |
Receives a frequency, returns interval in mils between that frequency and the frequency of root_pitch (root_pitch['freq']). Returns positive value if given frequency is higher than root_pitch, negative if lower | def freq_to_mils(f):
if f == 0:
raise Exception('0 not a valid frequency, what you playing at!?')
else:
m = int(round(12000* log(f/root_pitch['freq'],2)))
return m; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def miditofreq(midinote):\n return 440 * (2 ** ((midinote - 69.0)/12.0))",
"def perceptual_amplitude_dbb(frequency: float) -> float:\n # See http://www.sengpielaudio.com/BerechnungDerBewertungsfilter.pdf\n\n num = 12200.0 ** 2. * frequency ** 3\n den = (frequency ** 2. + 20.6) * (frequency ** 2. + 12200. ** 2.) * np.sqrt(frequency ** 2. + 158.5 ** 2.)\n return num / den",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - self.fr[fc_ix + n - 1:fc_ix - 1:-1]))",
"def get_tuning_freq(self, freq):\n if freq < self.freq_lo:\n return\n\n while freq < self.NOTES.min() or freq > self.NOTES.max():\n while freq > self.NOTES.max():\n self.NOTES *= 2\n while freq < self.NOTES.min():\n self.NOTES /= 2\n tuning_freq = min(self.NOTES, key=lambda x: abs(x-freq))\n return tuning_freq",
"def band_penalty(self):\n fc_ix = np.argmin(np.abs(self.f - self.fc)) # Index to frequency array closes to center frequency\n # Number of indexes on each side of center frequency, not extending outside, only up to 10 kHz\n n = min(fc_ix, self.ix10k - fc_ix)\n if n == 0:\n return 0.0\n return np.mean(np.square(self.fr[fc_ix - n:fc_ix] - (self.gain - self.fr[fc_ix + n - 1:fc_ix - 1:-1])))",
"def freq2erb(freq_hz):\n return 9.265 * np.log(1 + freq_hz / (24.7 * 9.265))",
"def DSS28_beamwidth(freq):\n return 0.54/freq",
"def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)",
"def _frequency_to_wavelength(freq):\n return ifc.SPEED_OF_LIGHT_METRES_PER_SECOND / freq",
"def ltp(sp):\n n = len(sp)\n # upper and lower pitch limits (fs ~ 8KHz- 16KHz).\n pmin = 50\n pmax = 200\n sp2 = sp ** 2 # Pre-calculate the square.\n E = np.zeros(pmax+1) # Creates an array to store the values, not using the first elements.\n for M in range(pmin, pmax + 1):\n e_del = sp[0 : n-M]\n e = sp[M : n]\n e2 = sp2[M : n]\n E[M] = np.sum((e_del * e) ** 2) / np.sum(e2)\n\n # Find M, the optimum pitch period.\n M = np.argmax(E) # Not max value mas Max index.\n\n # Find B, the pitch gain factor\n e_del = sp[0 : n-M]\n e = sp[M : n]\n e2 = sp2[M : n]\n B = np.sum(e_del * e) / sum(e2)\n\n return (B, M)",
"def mils_to_freq(m):\n f = root_pitch['freq']*(2**(float(m)/12000))\n return f;",
"def sample_interval(self):\n\n if self.sample_rate != 0:\n return 1.0 / self.sample_rate\n return 0.0",
"def getFundFreq(self, data, sampleRate):\n sp = SignalProc.SignalProc(256, 128)\n sp.data = data\n sp.sampleRate = sampleRate\n # spectrogram is not necessary if we're not returning segments\n segment = Segment.Segmenter(sp, sampleRate)\n pitch, y, minfreq, W = segment.yin(minfreq=100, returnSegs=False)\n # we use NaNs to represent \"no F0 found\"\n if pitch.size == 0:\n return float(\"nan\"), float(\"nan\")\n\n segs = segment.convert01(pitch > minfreq)\n segs = segment.deleteShort(segs, 5)\n if len(segs) == 0:\n return float(\"nan\"), float(\"nan\")\n else:\n pitch = pitch[np.where(pitch>minfreq)]\n return round(np.min(pitch)), round(np.max(pitch))",
"def getUpperFrequencyBound(self) -> int:\n return self.upper_frequency_bound",
"def wavelength(self,freq):\n return self.phase_velocity()/freq",
"def get_midi_pitch(self) -> (int, float):\n pitch_id = 69 + int(self._cents/100)\n detune = self._cents - 100*(pitch_id - 69)\n while detune >= 50:\n pitch_id += 1\n detune -= 100\n while detune < -50:\n pitch_id -= 1\n detune += 100\n return (pitch_id, detune)",
"def hz2mel(freq):\n return 2595. * np.log10(1+freq/700.0)",
"def getInterval(self, note):\n note = note.copy() # Shouldn't need this. I believe it passes a copy\n note.setOctave(self.getOctave())\n selfpitch = self.getMIDIByte()\n notepitch = note.getMIDIByte()\n interval = notepitch - selfpitch\n if abs(interval) < 7:\n return interval\n else:\n if interval < 0:\n # Note is too low, go up an octave\n sign = 1\n else:\n # Note is too high, go down an octave\n sign = -1\n while abs(interval) > 6:\n interval += sign*12\n return interval",
"def probing_frequency(dur: int) -> float:\n freq = min(dur / 8.0, 1.0)\n freq = max(dur / 64.0, freq)\n return max(freq, 0.1)",
"def estimate_root_note(fn, start=0, end=None):\n data = detect_pitch(fn, unit=\"midi\")\n if start or end:\n if end is None:\n end = len(data)\n data = data[start:end]\n\n return statistics.harmonic_mean(\n remove_outliers([i[1] for i in data if i[0] != 0.0])\n )",
"def perfectrefl(wavelength):\n return 1.0",
"def rangerate_from_freq(freq,Fc):\n # print(f'freq {freq} Fc {Fc}')\n dopp_at_IF = freq - Fc\n return dopp_at_IF*scipy.constants.speed_of_light/Fc",
"def calc(self, wavelength):\n if wavelength < self.minWavelength or wavelength > self.maxWavelength:\n return 0\n mm=wavelength%self.interval\n s=self._calcd(wavelength-mm)\n if mm==0:\n return s\n m=mm*1.0/self.interval\n e=self._calcd((wavelength-mm)+self.interval)\n return s+(e-s)*m",
"def ftom(f):\n return 69 + 12 * log(f / 440.0, 2)",
"def element_effective_area(freq_hz):\n freqs = np.array([0.05e9, 0.07e9, 0.11e9, 0.17e9, 0.25e9, 0.35e9, 0.45e9,\n 0.55e9, 0.65e9])\n a_eff = np.array([1.8791, 1.8791, 1.8694, 1.3193, 0.6080, 0.2956, 0.2046,\n 0.1384, 0.0792])\n f_cut = 2\n f1 = interp1d(np.log10(freqs[:f_cut+1]), np.log10(a_eff[:f_cut+1]),\n kind='slinear')\n f2 = interp1d(np.log10(freqs[f_cut:]), np.log10(a_eff[f_cut:]),\n kind='cubic')\n if freq_hz <= freqs[f_cut]:\n return 10**f1(np.log10(freq_hz))\n else:\n return 10**f2(np.log10(freq_hz))",
"def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0",
"def getLowerFrequencyBound(self) -> int:\n return self.lower_frequency_bound",
"def get_scale_freq():\n return sf / 2 / (num_freq-1)",
"def freq2den(freq):\n\n return freq * freq * k_2",
"def lp_factor(self):\n num = 1 + np.cos(2 * self.angle) ** 2\n den = np.cos(self.angle) * np.sin(self.angle) ** 2\n return num / den"
] | [
"0.6333899",
"0.61145616",
"0.6066061",
"0.6028613",
"0.60101444",
"0.5847054",
"0.5843562",
"0.5816642",
"0.58075583",
"0.57927597",
"0.5756494",
"0.5755922",
"0.5688351",
"0.56856346",
"0.5646544",
"0.56452155",
"0.5620738",
"0.5614233",
"0.5599333",
"0.5584674",
"0.55586463",
"0.5547951",
"0.5547706",
"0.5530681",
"0.5529097",
"0.55132854",
"0.5485917",
"0.5462633",
"0.54621255",
"0.544559"
] | 0.6282281 | 1 |
Receives a value in mils between 0 and 999. If this is more than 500 it subtracts 1000 | def pos_neg(mils):
if mils > 500:
mils -= 1000
return mils; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def limitValue(self, value, lowerLimit, upperLimit):\n if value > upperLimit:\n return upperLimit\n elif value < lowerLimit:\n return lowerLimit\n else:\n return value",
"def limit_speed(speed):\n if speed > 1000:\n speed = 1000\n elif speed < -1000:\n speed = -1000\n return speed",
"def clamp(value, mn, mx):\n\n return max(min(value, mx), mn)",
"def limit_speed(speed):\n if speed > 900:\n speed = 900\n elif speed < -900:\n speed = -900\n return -speed",
"def approximate(val):\r\n if val >=2 or val == 3:\r\n return 250\r\n elif val >=1:\r\n return 150\r\n elif val >=0:\r\n return 50",
"def _limit(value, min_value, max_value):\n\n if value < min_value:\n return min_value\n if value > max_value:\n return max_value\n return value",
"def clamp(value, mini, maxi):\n if value < mini:\n return mini\n elif maxi < value:\n return maxi\n else:\n return value",
"def if_value_higher_3000():\n res = requests.get(\"https://www.nasdaq.com/\")\n SnP500_value = extractor.findall(res.text)[0]\n # You can see this result from the log\n print(SnP500_value)\n if float(SnP500_value) > 3000:\n return 'send_email'\n else:\n return 'do_nothing'",
"def pwm_limit(self, value):\n self._write(MX_PWM_LIMIT, value)",
"def subtract(self, value):\n return self.number - value",
"def how_many_100s(self, amount):\n return amount // 100",
"def shorten_number(self, number):\n if number < 1000:\n return number\n elif number >= 1000 and number < 1000000:\n num = self.rounded_number(number, 1000)\n val = \"1M\" if num == \"1000\" else num + \"K\"\n return val\n elif number >= 1000000 and number < 1000000000:\n num = self.rounded_number(number, 1000000)\n val = \"1B\" if num==\"1000\" else num + \"M\"\n return val\n elif number >= 1000000000 and number < 1000000000000:\n num = self.rounded_number(number, 1000000000)\n val = \"1T\" if num==\"1000\" else num + \"B\"\n return val\n else:\n num = self.rounded_number(number, 1000000000000)\n return num + \"T\"",
"def normalize(value):\n while value > 1:\n value = value / 10\n return value",
"def clamp(num, min, max): \n if num < min:\n num = min\n elif num > max:\n num = max\n return num",
"def absolute_value(val):\n if val < 0:\n return val * -1\n else:\n return val",
"def range1000(self):\r\n return self.init(1000)",
"def clamp(lower, value, upper):\n if lower > value:\n return lower\n if upper < value:\n return upper\n return value",
"def constrain(inputVal, lower_limit, upper_limit):\n \n if (inputVal < lower_limit):\n return lower_limit\n elif (inputVal > upper_limit):\n return upper_limit\n else:\n return inputVal",
"def constrain(value):\n size = 2**m\n return (value%size)",
"def electricity_bill(unit):\r\n unit = unit\r\n if unit <= 100:\r\n return unit * 1.5 + 25\r\n elif unit <= 200:\r\n return 100 * 1.5 + (unit - 100) * 2.5 + 50\r\n elif unit <= 300:\r\n return 100 * 1.5 + 100 * 2.5 + (unit - 200) * 4 + 75\r\n elif unit <= 350:\r\n return 100 * 1.5 + 100 * 2.5 + 100 * 4 + (unit - 300) * 5 + 100\r\n else:\r\n return 1500",
"def sign_val(self,value):\n if value >= 0x8000:\n value -= 0x10000\n return value",
"def ml(milliliters):\n return ul(milliliters*1000)",
"def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0",
"def item_um(n):\n if n <= 0.250:\n return 0\n elif n > 0.250 and n <= 0.500:\n return 1\n elif n > 0.500 and n <= 0.750:\n return 2\n elif n > 0.750 and n <= 1.000:\n return 3",
"def range_limit(val, minv, maxv):\n\tif (val < minv):\n\t\tval = minv\n\telif (val > maxv):\n\t\tval = maxv\n\treturn val",
"def constrain(amt,low,high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt",
"def ge(value, limit):\n return value >= limit",
"def pop_round_short(value, usemillion=False):\n if value < 1000:\n return str(int(value))\n suffixdict = {'k': 1000, 'm': 1000000}\n if value >= suffixdict['m'] and usemillion:\n suffix = 'm'\n else:\n suffix = 'k'\n\n roundValue = suffixdict[suffix]\n roundnum = round_to_nearest(value) // roundValue\n if roundnum == 0:\n return str(roundnum)\n else:\n return commify(roundnum) + suffix",
"def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val",
"def remaining_cash_without_50s(self, amount):\n return amount % 50"
] | [
"0.6407028",
"0.6335328",
"0.62473875",
"0.6163593",
"0.6136198",
"0.602561",
"0.57908463",
"0.560255",
"0.5591744",
"0.55848867",
"0.55823934",
"0.5577144",
"0.556901",
"0.55591995",
"0.5556475",
"0.55531615",
"0.55418956",
"0.5537635",
"0.55336213",
"0.55145276",
"0.5510138",
"0.54965866",
"0.5494251",
"0.5478421",
"0.5471152",
"0.5463677",
"0.5462753",
"0.5453269",
"0.5453266",
"0.5448712"
] | 0.68922126 | 0 |
Given the frequency at octave 0, returns the frequency at target_octave. | def freq_at_octave(freq_at_zero, target_octave):
target_frequency = 0
if target_octave<0:
b = (target_octave*-2)/2
else:
b = target_octave
for a in range(0,b):
if target_octave>0:
target_frequency *=2
else:
target_frequency /=2
target_frequency = freq_at_zero
return target_frequency; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_freq(self, octave):\n return notes[f'{self.note}{octave}']",
"def source_freq(self) -> int:",
"def days_in_frequency_target(target: int) -> int:\n return int((target / 12) * 365)",
"def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0",
"def freq(self, freq: Optional[int] = None) -> Optional[int]:\n ...",
"def get_frequency(self, detune=0) -> float:\n return np.power(2, (self._cents + detune)/1200) * 440",
"def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency",
"def mils_to_freq(m):\n f = root_pitch['freq']*(2**(float(m)/12000))\n return f;",
"def freq(self, frequency: Optional[int]):",
"def get_cw_freq(self):\n return self.get_frequency(self.synth)",
"def get_freq(self, surface_option=None, a=[]):\n\n if (surface_option is None) or (len(a) == 0): return self.modes['freq']\n return self.modes['freq'] + self.get_surface_correction(surface_option, a)",
"def get_frequency(time_series):\n if len(time_series.index) == 0:\n return 0\n ft = np.fft.rfft(time_series)\n return np.fft.fftfreq(len(time_series))[np.argmax(abs(ft))]",
"def to_frequency(self, tuning=440.0):\n\n NOTES = 'CcDdEFfGgAaB'\n base = NOTES.find('A')\n\n octave_delta = self.octave - Note.BASE_OCTAVE # 0\n octave_halfsteps = octave_delta * 12 # 0\n offset = NOTES.find(self.name) - base # -1\n halfsteps = octave_halfsteps + offset # -2\n freq = tuning * (1.059463 ** halfsteps)\n\n return freq",
"def freq():",
"def get_tuning_freq(self, freq):\n if freq < self.freq_lo:\n return\n\n while freq < self.NOTES.min() or freq > self.NOTES.max():\n while freq > self.NOTES.max():\n self.NOTES *= 2\n while freq < self.NOTES.min():\n self.NOTES /= 2\n tuning_freq = min(self.NOTES, key=lambda x: abs(x-freq))\n return tuning_freq",
"def note_to_freq(note: str) -> float:\n try:\n octave = int(note[-1])\n except ValueError:\n raise Exception('Octave must be an integer')\n\n if len(note) == 2:\n n = note[0]\n try:\n distance = NOTES_FLAT.index(n)\n except ValueError:\n raise Exception('Invalid note: \"\". Make sure note is '.format(note))\n elif len(note) == 3:\n n = note[:2]\n try:\n distance = [NOTES_FLAT, NOTES_SHARP][int(n[1] == '#')].index(n)\n except:\n raise Exception(\"Invalid note: \" + note)\n else:\n raise Exception(\"Incorrectly formatted note input\")\n\n C_freq = 261.63 * 2**(octave - 4)\n freq = C_freq * 2**(distance / 12)\n\n return freq",
"def freq(self) -> int:",
"def freq(self, value: int, /) -> None:",
"def midi_to_frequency(midi_note: Union[float, int]) -> float:\n half_tone = 2 ** (1 / 12)\n return 440. * half_tone ** (midi_note - 69.)",
"def midi_to_frequency(midi_note):\n return round(440.0 * 2 ** ((midi_note - 69) * (1.0 / 12.0)), 1)",
"def get_frequency(self):\r\n x = self.query('FREQ?')\r\n if x == None: return None\r\n return float(x)",
"def in_freq(self):\n if self._in_freq is None:\n self._in_freq = np.fft.rfft(self._in_time)\n return self._in_freq",
"def match_target_amplitude(audio, target_volume):\r\n if audio.dBFS < target_volume:\r\n required_gain = target_volume - audio.dBFS\r\n return audio.apply_gain(required_gain)\r\n else:\r\n return audio",
"def freq(self, x):\n return self.d.get(x, 0)",
"def note_freq(note: str):\n # general purpose function to convert a note in standard notation\n # to corresponding frequency\n if len(note) < 2 or len(note) > 3 or \\\n note[0] < 'A' or note[0] > 'G':\n return 0\n if len(note) == 3:\n if note[1] == 'b':\n acc = -1\n elif note[1] == '#':\n acc = 1\n else:\n return 0\n octave = int(note[2])\n else:\n acc = 0\n octave = int(note[1])\n SEMITONES = {'A': 0, 'B': 2, 'C': -9, 'D': -7, 'E': -5, 'F': -4, 'G': -2}\n n = 12 * (octave - 4) + SEMITONES[note[0]] + acc\n f = 440 * (2 ** (float(n) / 12.0))\n return f",
"def get_scale_freq():\n return sf / 2 / (num_freq-1)",
"def adjust_octave(midi_hz, measured_hz):\n # find non-silent frames\n singing_region = np.where((measured_hz > 1.0) & (midi_hz > 1.0))[0]\n cent_differences = np.log2((midi_hz + 1e-10) / (measured_hz + 1e-10)) * 1200\n octaves = np.arange(-3, 4) * 1200\n octave_error = octaves[np.argmin(np.abs(octaves - np.median(cent_differences[singing_region])))]\n midi_hz = np.power(2, (cent_differences - octave_error) / 1200) * measured_hz\n return midi_hz",
"def fft_frequency(fft, index):\n\treturn index * AUDIO_RATE / len(fft) / 2 # Same as in fft_index, see above",
"def getFundFreq(self, data, sampleRate):\n sp = SignalProc.SignalProc(256, 128)\n sp.data = data\n sp.sampleRate = sampleRate\n # spectrogram is not necessary if we're not returning segments\n segment = Segment.Segmenter(sp, sampleRate)\n pitch, y, minfreq, W = segment.yin(minfreq=100, returnSegs=False)\n # we use NaNs to represent \"no F0 found\"\n if pitch.size == 0:\n return float(\"nan\"), float(\"nan\")\n\n segs = segment.convert01(pitch > minfreq)\n segs = segment.deleteShort(segs, 5)\n if len(segs) == 0:\n return float(\"nan\"), float(\"nan\")\n else:\n pitch = pitch[np.where(pitch>minfreq)]\n return round(np.min(pitch)), round(np.max(pitch))",
"def wavelength(self,freq):\n return self.phase_velocity()/freq"
] | [
"0.66253114",
"0.6102212",
"0.6060947",
"0.59513015",
"0.5842971",
"0.57598406",
"0.5730503",
"0.5708239",
"0.5680097",
"0.5648331",
"0.56461424",
"0.56106067",
"0.55054444",
"0.5502096",
"0.5499251",
"0.545656",
"0.5456237",
"0.5444813",
"0.53967893",
"0.5381159",
"0.53799397",
"0.53780127",
"0.5368411",
"0.53502464",
"0.5349442",
"0.5332731",
"0.5316685",
"0.53161365",
"0.5312634",
"0.5297628"
] | 0.8766376 | 0 |
Prepare the calculator when a new qgrid is passed. | def _prepare(self, q):
# store it in _lastr which is consulted in BasePDFGenerator.__call__
self._lastr = q
self._calc.qstep = q[1] - q[0]
self._calc.qmin = q[0]
self._calc.qmax = q[-1] + 0.5*self._calc.qstep
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _prepare_grid(self):\n raise NotImplementedError",
"def _build_integration_grid(self):\n pass",
"def _prepare(self, setup):\n # Initialise cell\n self.cell = self.celltype(model=self._model)\n for rec in setup.record_variables:\n self.cell.record(*rec)\n if 'injected_currents' in setup.conditions:\n for loc, current in setup.conditions['injected_currents'].items():\n getattr(self.cell, loc).inject_current(current)\n if 'voltage_clamps' in setup.conditions:\n for loc, voltages in setup.conditions['voltage_clamps'].items():\n getattr(self.cell, loc).voltage_clamp(voltages)\n if 'synaptic_spikes' in setup.conditions:\n for loc, syn, spkes in setup.conditions['synaptic_spikes'].items():\n getattr(self.cell, loc).synaptic_stimulation(spkes, syn)",
"def createWidgets(self):\n num_positions = [ (3,0), (2,0), (2,1), (2,2), (1,0), \\\n (1,1), (1,2), (0,0), (0,1), (0,2) ] \n\n op_info = { '+': (3, 3), '-': (2, 3), '*': (1, 3), '/': (0, 3) }\n self.operators = {}\n\n #Creates the 10 number buttons.\n for i in range(10):\n button = Button(self, text=str(i), height=2, width=5)\n button['command'] = lambda i=i: self.handle_num_general(str(i))\n r, c = num_positions[i][0], num_positions[i][1]\n button.grid(row=r, column=c)\n\n #Creates the 4 operator buttons.\n for op,position in op_info.iteritems():\n button = Button(self, text=str(op), height=2, width=5)\n button['command'] = lambda op=op: self.handle_op_general(op)\n button.grid(row=position[0], column=position[1])\n self.operators[op] = button\n\n self.equals = Button(self, text='=', height=2, width=5)\n self.equals['command'] = self.perform_op\n self.equals.grid(row=3, column=2)\n\n self.clear = Button(self, text='C', height=2, width=5)\n self.clear['command'] = lambda: self.reset('0') \n self.clear.grid(row=3, column=1)\n\n self.change_equals_state(DISABLED)\n self.change_ops_state(DISABLED)",
"def main():\n #------------------------------------- Functions\n def add(text):\n \"\"\"\n This will add to the display, and be the go to function of most buttons.\n We'll want to add in conditions for what buttons go.\n \"\"\"\n orig = dispb[\"text\"]\n new = orig + text\n ops = [\"+\",\"-\",\"*\",\"/\"]\n # conditions\n # length 21\n if len(new) > 21:\n dispb[\"text\"] = orig\n return 0\n \n # one calc at a time\n if len(orig) > 0:\n if (orig[-1] in ops) & (text in ops):\n dispb[\"text\"] = orig\n return 0\n\n dispb[\"text\"] = new\n return 0\n \n def clear():\n dispb[\"text\"] = \"\"\n return 0\n \n def backspace():\n dispb[\"text\"] = dispb[\"text\"][:len(dispb[\"text\"])-1]\n return 0\n \n def equals():\n try:\n dispb[\"text\"] = str(eval(dispb[\"text\"]))\n except:\n dispb[\"text\"]=\"ERROR, clear display\"\n \n #------------------------------------- UI\n \n # title and start\n calc = tk.Tk()\n calc.title(\"Calculator\")\n # size\n calc.geometry(\"255x235\")\n #calc.columnconfigure(range(3), weight=1, minsize=50)\n #calc.rowconfigure(range(1,4), weight=1, minsize=48)\n \n # Icon\n calc.iconbitmap('Icon.ico')#'Icon.ico')\n \n \n calcarea = tk.Frame(master=calc)\n calcarea.pack(padx=5, pady=10)\n \n # display box\n disp = tk.Frame(\n master = calcarea\n )\n disp.grid(row = 0, column = 0, columnspan = 3)\n dispb = tk.Label(\n master = disp,\n text = '',\n fg = 'black',\n bg = 'white',\n borderwidth = 1,\n relief = 'solid',\n height = 2,\n width = 19\n )\n dispb.pack()\n \n # number buttons\n num1 = tk.Frame(\n master=calcarea\n )\n num1.grid(row = 3, column = 0)\n num1b = tk.Button(\n master = num1,\n text = 1,\n width = 5,\n height = 2,\n command = lambda: add(\"1\")\n ).pack()\n # the pack is what adds it to the UI\n # two \n num2 = tk.Frame(\n master=calcarea\n )\n num2.grid(row = 3, column = 1)\n num2b = tk.Button(\n master = num2,\n text = \"2\",\n width = 5,\n height = 2,\n command = lambda: add(\"2\")\n ).pack()\n \n # three \n num3 = tk.Frame(\n master=calcarea\n )\n num3.grid(row = 3, column = 2)\n num3b = tk.Button(\n master = num3,\n text = \"3\",\n width = 5,\n height = 2,\n command = lambda: add(\"3\")\n ).pack()\n \n # four \n num4 = tk.Frame(\n master=calcarea\n )\n num4.grid(row = 2, column = 0)\n num4b = tk.Button(\n master = num4,\n text = \"4\",\n width = 5,\n height = 2,\n command = lambda: add(\"4\")\n ).pack()\n \n # five \n num5 = tk.Frame(\n master=calcarea\n )\n num5.grid(row = 2, column = 1)\n num5b = tk.Button(\n master = num5,\n text = \"5\",\n width = 5,\n height = 2,\n command = lambda: add(\"5\")\n ).pack()\n \n # six \n num6 = tk.Frame(\n master=calcarea\n )\n num6.grid(row = 2, column = 2)\n num6b = tk.Button(\n master = num6,\n text = \"6\",\n width = 5,\n height = 2,\n command = lambda: add(\"6\")\n ).pack()\n \n # seven \n num7 = tk.Frame(\n master=calcarea\n )\n num7.grid(row = 1, column = 0)\n num7b = tk.Button(\n master = num7,\n text = \"7\",\n width = 5,\n height = 2,\n command = lambda: add(\"7\")\n ).pack()\n \n # eight \n num8 = tk.Frame(\n master=calcarea\n )\n num8.grid(row = 1, column = 1)\n num8b = tk.Button(\n master = num8,\n text = \"8\",\n width = 5,\n height = 2,\n command = lambda: add(\"8\")\n ).pack()\n \n # nine \n num9 = tk.Frame(\n master=calcarea\n )\n num9.grid(row = 1, column = 2)\n num9b = tk.Button(\n master = num9,\n text = \"9\",\n width = 5,\n height = 2,\n command = lambda: add(\"9\")\n ).pack()\n \n # zero\n num0 = tk.Frame(\n master = calcarea\n )\n num0.grid(row = 4, column = 0)\n num0b = tk.Button(\n master = num0,\n text = 0,\n width = 5,\n height = 2,\n command = lambda: add(\"0\")\n ).pack()\n \n # period\n dot = tk.Frame(\n master = calcarea\n )\n dot.grid(row = 4, column = 1)\n dotb = tk.Button(\n master = dot,\n text = \".\",\n width = 5,\n height = 2,\n command = lambda: add(\".\")\n ).pack()\n \n # equal sign\n eq = tk.Frame(\n master = calcarea\n )\n eq.grid(row = 4, column = 2, columnspan = 2)\n eqb = tk.Button(\n master = eq,\n text = \"=\",\n width = 11,\n height = 2,\n command = equals\n ).pack()\n \n # plus sign\n plus = tk.Frame(\n master = calcarea\n )\n plus.grid(row = 3, column = 4, rowspan = 2)\n plusb = tk.Button(\n master = plus,\n text = \"+\",\n width = 5,\n height = 5,\n command = lambda: add(\"+\")\n ).pack()\n \n # minus sign\n minu = tk.Frame(\n master = calcarea\n )\n minu.grid(row = 3, column = 3)\n minub = tk.Button(\n master = minu,\n text = \"-\",\n width = 5,\n height = 2,\n command = lambda: add(\"-\")\n ).pack()\n \n # multiplication\n mult = tk.Frame(\n master = calcarea\n )\n mult.grid(row = 2, column = 3)\n multb = tk.Button(\n master = mult,\n text = \"*\",\n width = 5,\n height = 2,\n command = lambda: add(\"*\")\n ).pack()\n \n # division\n div = tk.Frame(\n master = calcarea\n )\n div.grid(row = 2, column = 4)\n divb = tk.Button(\n master = div,\n text = \"/\",\n width = 5,\n height = 2,\n command = lambda: add(\"/\")\n ).pack()\n \n # left parentheses\n lefp = tk.Frame(\n master = calcarea\n )\n lefp.grid(row = 1, column = 3)\n lefpb = tk.Button(\n master = lefp,\n text = \"(\",\n width = 5,\n height = 2,\n command = lambda: add(\"(\")\n ).pack()\n \n # right paraentheses\n rigp = tk.Frame(\n master = calcarea\n )\n rigp.grid(row = 1, column = 4)\n rigpb = tk.Button(\n master = rigp,\n text = \")\",\n width = 5,\n height = 2,\n command = lambda: add(\")\")\n ).pack()\n \n # Clear button\n Clr = tk.Frame(\n master = calcarea\n )\n Clr.grid(row = 0, column = 3)\n Clrb = tk.Button(\n master = Clr,\n text = \"C\",\n width = 5,\n height = 2,\n command = clear\n ).pack()\n \n # backspace\n bck = tk.Frame(\n master = calcarea\n )\n bck.grid(row = 0, column = 4)\n bckb = tk.Button(\n master = bck,\n text = \"\\N{RIGHTWARDS BLACK ARROW}\",\n width = 5,\n height = 2,\n command = backspace\n ).pack()\n \n # This is what kicks the whole thing off, lets it wait for commands.\n calc.mainloop()",
"def _init_calculation(self):\n # count number of previous calculations\n self.ctx.running_calc += 1\n\n # set the structure\n self.ctx.inputs.structure = self.inputs.structure\n\n # # deal with oxidation states\n # if self.ctx.running_calc > 1 and self.ctx.try_oxi:\n # self.report('Trying to guess oxidation states')\n # self.ctx.inputs.guess_oxistates = Bool(True)\n # self.ctx.inputs.high_spin_preferred = Bool(self.ctx.high_spin_preferred)\n\n # set metadata\n label = self.inputs.metadata.get('label', DEFAULT_TITLE)\n description = self.inputs.metadata.get('description', '')\n self.ctx.inputs.metadata = AttributeDict({'options': self.ctx.options,\n 'label': '{} [{}]'.format(label, self.ctx.running_calc),\n 'description': description})",
"def __init__(self):\n\n #Creates the window.\n EasyFrame.__init__(self, \"Calculator\", resizable = False)\n\n #Creates the data model Calculator.\n self.calculator = Calculator()\n\n #Keeps track of when the user enters an input.\n self.operatorEntered = False\n\n #Creates the colored panels.\n for column in range(5):\n numberBarPanel = self.addPanel(row = 0, column = column, background = \"black\")\n\n for row in range(1, 6):\n symbolPanel = self.addPanel(row = row, column = 4, background = 'orange')\n\n for column in range (4):\n topPanel = self.addPanel(row = 1, column = column, background = \"snow3\")\n\n for row in range (2, 6):\n for column in range (0, 3):\n numberPanel = self.addPanel(row = row, column = column, background = \"snow2\")\n\n #Creates fonts.\n barFont = Font(family = \"San Francisco\", size = 20)\n \n #Creates the number bar at the top of the calculator.\n self.digits = self.addLabel(\"0\", row= 0, column= 0, columnspan= 5, sticky= \"E\", background = 'black', foreground = 'white', font = barFont)\n\n #Creates button for clear.\n self.clearButton= self.addButton(text= \"AC\", row= 1, column= 0, command= self.clearCommand)\n self.clearButton['background'] = 'snow3'\n \n #Creates button for +/-.\n negativeButton= self.addButton(text= \"+/-\", row= 1, column= 1, command= self.negativeCommand)\n negativeButton['background'] = 'snow3'\n \n #Creates button for %\n percentButton= self.addButton(text= \"%\", row= 1, column= 2, command= self.percentCommand)\n percentButton['background'] = 'snow3'\n\n #Creates side row of operator symbols.\n sideSymbols= [\"/\", \"X\", \"-\", \"+\", \"=\"]\n row= 1\n for symbol in sideSymbols:\n symbolButton = self.addButton(text= symbol, row= row, column= 4)\n symbolButton[\"command\"] = self.operatorCommand(symbol)\n symbolButton['foreground'] = 'white'\n symbolButton['background'] = 'orange'\n row += 1\n \n #Goes through and creates a grid with numbers 1-9.\n digit= 7\n for row in range(2, 5):\n for column in range(3):\n numberButton = self.addButton(str(digit), row, column)\n numberButton[\"command\"] = self.numberCommand(str(digit))\n numberButton['background'] = 'snow2'\n if digit == 9:\n digit = 3\n elif digit == 6:\n digit = 0\n digit += 1\n\n #Creates 0 button.\n zeroButton = self.addButton(text= \"0 \", row= 5, column = 0, columnspan = 2, command = self.numberCommand(\"0\"))\n zeroButton['background'] = 'snow2'\n\n #Creates . button.\n self.decimalButton = self.addButton(text= \".\", row = 5, column = 2, command = self.decimalCommand, state = 'normal')\n self.decimalButton['background'] = 'snow2'",
"def process_grid(self, grid: Grid) -> Grid:",
"def __init__(self, grid_tuples):\n super(ParamGrid, self).__init__()\n self.grid = OrderedDict(grid_tuples)",
"def __init__(self, parent=None):\n super(Dialog, self).__init__(parent)\n self.setupUi(self)\n \n \n number = [self.one, self.two, self.three, \\\n self.four, self.five, self.six, self.seven,self.eight,\\\n self.nine, self.zero]\n\n self.pointButton.clicked.connect(self.pointclicked)\n self.squareRootButton.clicked.connect(self.squareRoot)\n self.plusminusButton.clicked.connect(self.changesign)\n self.clearMemoryButton.clicked.connect(self.clearMemory)\n self.readMemoryButton.clicked.connect(self.readMemory)\n self.setMemoryButton.clicked.connect(self.setMemory)\n self.addToMemoryButton.clicked.connect(self.addToMemory)\n self.lessToMemoryButton.clicked.connect(self.lessToMemory)\n self.clearButton.clicked.connect(self.clear)\n self.clearAllButton.clicked.connect(self.clearAll)\n self.equalButton.clicked.connect(self.equalClicked)\n self.pendingadditiveoperator = ''\n for i in number :\n i.clicked.connect(self.digitclicked)\n self.waiting = True\n self.sum = 0.0\n plus_minus = [self.plusButton, self.minusButton]\n \n for i in plus_minus:\n i.clicked.connect(self.additiveoperator)\n multiply_divide = [self.timesButton, self.divisionButton]\n \n for i in multiply_divide:\n i.clicked.connect(self.multiplicativeoperator)\n self.pendingmultiplicativeoperator = ''\n self.factor = 0.0",
"def init_grid_plot(self):\n\n # Create filters to plot households and schools sequentially\n self.household_view = self.agent_filter(self.source, 'household')\n self.school_view = self.agent_filter(self.source, 'school')\n self.neighbourhood_view = self.agent_filter(self.source,\n 'neighbourhood')\n self.customise_grid()",
"def initialize_grid(self):\n if self.Qh is not None and self.Qk is not None and self.Ql is not None:\n self.h_start, self.h_step, self.h_stop = (\n self.Qh[0], self.Qh[1]-self.Qh[0], self.Qh[-1])\n self.k_start, self.k_step, self.k_stop = (\n self.Qk[0], self.Qk[1]-self.Qk[0], self.Qk[-1])\n self.l_start, self.l_step, self.l_stop = (\n self.Ql[0], self.Ql[1]-self.Ql[0], self.Ql[-1])\n else:\n\n def round(value):\n import math\n return math.ceil(np.round(value) / 2.) * 2\n\n self.h_stop = round(0.8 * self.Qmax / self.astar)\n h_range = np.round(2*self.h_stop)\n self.h_start = -self.h_stop\n self.h_step = self.stepsize(h_range/1000)\n self.k_stop = round(0.8 * self.Qmax / self.bstar)\n k_range = np.round(2*self.k_stop)\n self.k_start = -self.k_stop\n self.k_step = self.stepsize(k_range/1000)\n self.l_stop = round(0.8 * self.Qmax / self.cstar)\n l_range = np.round(2*self.l_stop)\n self.l_start = -self.l_stop\n self.l_step = self.stepsize(l_range/1000)\n self.define_grid()",
"def _reset_integration_grid(self):\n pass",
"def _set_grid_params(self, param_grid, grid_search_args):\n \n # If key word arguments for the grid search are included in the request, get the parameters and values\n if len(grid_search_args) > 0:\n # Transform the string of arguments into a dictionary\n grid_search_args = utils.get_kwargs(grid_search_args)\n \n # Get the metric parameters, converting values to the correct data type\n self.model.grid_search_args = utils.get_kwargs_by_type(grid_search_args)\n\n # The refit parameter must be True, so this is ignored if passed in the arguments\n self.model.grid_search_args[\"refit\"] = True\n else:\n self.model.grid_search_args = {}\n \n # If key word arguments for the grid search are included in the request, get the parameters and values\n if len(param_grid) > 0:\n # Transform the parameter grid dataframe into a list of dictionaries\n self.model.param_grid = list(param_grid.apply(utils.get_kwargs).apply(utils.get_kwargs_by_type))\n else:\n err = \"An empty string is not a valid input for the param_grid argument\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(9)",
"def _reset_integration_grid(self):\n if self.needConvergence:\n self.quadm = 0\n tmpeEspread = self.eEspread\n self.eEspread = 0\n self.convergenceSearchFlag = True\n convRes, stats = self._find_convergence_mixed()\n self.convergenceSearchFlag = False\n self.eEspread = tmpeEspread\n self._build_integration_grid()\n if raycing._VERBOSITY_ > 0:\n print(\"Done with integration optimization, {0} points will be used\"\n \" in {1} interval{2}\".format(\n self.quadm, self.gIntervals,\n 's' if self.gIntervals > 1 else ''))",
"def __init__(self, grid):\n \n # In case this slows down simulations that don't need all results\n # we can change to generate the results on demand.\n self.gen = pd.DataFrame(index=grid.gens.keys(), \n columns=[\"p_set\", \"q_set\", \"n_machines\", \"h\",\n \"rating\", \"in_service\",\n \"u_rel_angle\", \"u_mag\", \"xdss\"])\n self._populate_df(self.gen, grid.gens.values())\n\n self.load = pd.DataFrame(index=grid.loads.keys(), \n columns=[\"p_set\", \"q_set\",\n \"in_service\"])\n self._populate_df(self.load, grid.loads.values())\n\n self.line = pd.DataFrame(index=grid.lines.keys(),\n columns=[\"p\", \"loading\"])\n self._populate_df(self.line, grid.lines.values())\n\n self.area = pd.DataFrame(index=grid.areas.keys(),\n columns=[\"loads\", \"gens\"])\n\n for area in self.area.index:\n for column in self.area.columns:\n if column in [\"loads\", \"gens\"]:\n self.area.loc[area,\n column] = grid.areas[area].get_total_var(\n column)\n\n # The interchange between areas used to be included in the area report\n # However, the power factory function for getting inter area flows,\n # require a power flow to be run between each call. It was therefore\n # dropped.",
"def __init__(self, gridsize = (11,11), startPrey = (0,0), startPredator = (5,5)):\n\t\tself.predator = startPredator;\n\t\tself.prey = startPrey;\n\t\tself.gridsize = gridsize;",
"def initUI(self):\n\n grid = QGridLayout()\n grid.addWidget(self.from_currency_label, 0, 0, Qt.AlignRight)\n grid.addWidget(self.from_currency, 0, 1)\n grid.addWidget(self.to_currency_label, 0, 2, Qt.AlignRight)\n grid.addWidget(self.to_currency, 0, 3)\n grid.addWidget(self.from_amount_label, 1, 0)\n grid.addWidget(self.from_amount, 1, 1)\n grid.addWidget(self.to_amount_label, 1, 2)\n grid.addWidget(self.to_amount, 1, 3)\n\n grid.addWidget(self.from_calendar, 2, 0, 1, 2)\n grid.addWidget(self.to_calendar, 2, 2, 1, 2)\n\n grid.addWidget(self.rates_plot, 3, 0, 1, 4)\n grid.addWidget(self.graph_hint, 4, 0, 1, 4)\n\n self.rates_plot.showGrid(x=True, y=True)\n self.rates_plot.setLabel('left', 'Rate')\n self.rates_plot.setLabel('bottom', 'Days')\n self.legend = self.rates_plot.addLegend()\n\n self.setLayout(grid)\n self.setWindowTitle('Currency Converter - Assignment 1 - Arnaud Bourget - 2981151')\n\n self.from_currency.currentIndexChanged.connect(self.updateUI)\n self.to_currency.currentIndexChanged.connect(self.updateUI)\n self.from_amount.valueChanged.connect(self.fromAmountHandler)\n self.from_calendar.selectionChanged.connect(self.fromCalendarHandler)\n self.to_calendar.selectionChanged.connect(self.toCalendarHandler)\n\n self.show()",
"def change_entries(self, grid_sizer, options, border):\n\n if (self.diag_type is None): return grid_sizer # no data selected, don't change anything\n\n # setup the grid of possible values\n header0 = wx.StaticText(self, -1, \"Add/Remove\")\n header1 = wx.StaticText(self, -1, \"Quantity Code\")\n header2 = wx.StaticText(self, -1, \"Name\")\n header3 = wx.StaticText(self, -1, \"LaTeX Formula\")\n grid_sizer.Add(header0, pos=(0,0), flag=options, border=border)\n grid_sizer.Add(header1, pos=(0,1), flag=options, border=border)\n grid_sizer.Add(header2, pos=(0,2), flag=options, border=border)\n grid_sizer.Add(header3, pos=(0,3), flag=options, border=border)\n grid_sizer.Add(wx.StaticLine(self), pos=(1,0), span=(1,4),\n flag=wx.ALL|wx.EXPAND|wx.GROW, border=border)\n\n self.selected_values = [] # keep track of selected quantities\n\n quantities = self.output_quantities.diagnostic_types[self.diag_type]\n\n # choose a good height/width for formulas\n if (self.diag_type in [\"Linear_Forces\", \"Angular_Momentum\", \"Energy_Flux\",\n \"Induction\", \"Inertial_Forces\", \"Lorentz_Forces\",\n \"Poynting_Flux\", \"TurbKE_Budget\"]):\n width = 100\n elif (self.diag_type == \"Thermal_Equation\"):\n width = 150\n else:\n width = 30\n height = 20\n\n row = 2\n iquant = 0\n for Q in quantities:\n but = wx.ToggleButton(self, Q.code, \"Add\") # build button and place it in second column\n but.Bind(wx.EVT_TOGGLEBUTTON, self.OnToggle)\n grid_sizer.Add(but, pos=(row,0), flag=options, border=border)\n\n q_code = wx.StaticText(self, -1, str(Q.code)) # build other column entries\n q_name = wx.StaticText(self, -1, Q.name) # name\n\n formula = self.RenderTeX(Q, size=(width,height))\n\n # place column entries\n grid_sizer.Add(q_code, pos=(row,1), flag=options, border=border)\n grid_sizer.Add(q_name, pos=(row,2), flag=options, border=border)\n grid_sizer.Add(formula, pos=(row,3), flag=options, border=border)\n\n iquant += 1\n\n # add horizontal line every 5 quantities\n if (iquant % 5 == 0):\n grid_sizer.Add(wx.StaticLine(self), pos=(row+1,0), span=(1,4),\n flag=wx.ALL|wx.EXPAND|wx.GROW, border=border)\n row_inc = 2\n else:\n row_inc = 1\n\n row += row_inc\n\n grid_sizer.AddGrowableCol(2,1) # make the name/formula columns \"1\" growable, i.e., grows as necessary\n grid_sizer.AddGrowableCol(3,1)\n\n return grid_sizer",
"def init_button_calc(self):\r\n btn_calc = tk.Button(self.master, text='calculate', font='courier 10 bold',\r\n fg='purple', command=self.update_scores)\r\n btn_calc.grid(row=20, column=1, columnspan=3, sticky=tk.W+tk.E, pady=5)",
"def calculator():\n print(art.logo)\n # Changed 'int' to 'float' to do calculation for floating numbers as well\n num1 = float(input(\"Enter the first number : \"))\n end_calculation = False\n\n while not end_calculation:\n list_operators()\n operator = input(\"Pick an operation : \")\n num2 = float(input(\"Enter the next number : \"))\n calculation_fun = operations[operator]\n answer = round(calculation_fun(num1, num2), 2)\n print(f\"{num1} {operator} {num2} = {answer}\")\n\n wish_to_continue = input(\"Type 'Y' to Continue or Type 'N' to Exit : \").lower()\n if wish_to_continue == \"y\":\n num1 = answer\n else:\n # clear()\n end_calculation = True\n # recursive function call to restart the calculation freshly when user doesn't want to continue\n calculator()",
"def _prepare_parametrized_queue(cls, initial_query=None, **_params):\n ops = {\n '>': operator.gt,\n '<': operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=': operator.eq\n }\n\n if not initial_query:\n query = cls.q\n else:\n query = initial_query\n\n order_by = None\n if 'order_by' in _params.keys():\n order_by = _params.pop('order_by')\n\n for _field in _params.keys():\n param = _params[_field]\n if type(param) == tuple:\n if len(param):\n first_param = param[0]\n if callable(first_param):\n query = query.filter(first_param(getattr(cls, _field), *param[1:]))\n elif type(first_param) == str and first_param in ops.keys():\n op = ops[first_param]\n query = query.filter(op(getattr(cls, _field), param[1]))\n else:\n query = query.filter(getattr(cls, _field) == _params[_field])\n\n if order_by:\n order_by_params = order_by.split(' ')\n order_function = globals()[order_by_params[1]]\n query = query.order_by(order_function(getattr(cls, order_by_params[0]), ))\n return query",
"def set_param_grid(self):\n\n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData', 'strData']\n col_headers = ['model_name', 'estimator_args', 'grid_search_args']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the estimator's hyperparameter grid from the request dataframe\n param_grid = self.request_df.loc[:, 'estimator_args']\n\n # Get the grid search arguments from the request dataframe\n grid_search_args = self.request_df.loc[0, 'grid_search_args']\n\n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)\n\n self._set_grid_params(param_grid, grid_search_args)\n \n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n message = [[self.model.name, 'Hyperparameter grid successfully saved to disk',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]\n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"setup\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def prepare_UI(self):",
"def __init__(self, grid, estimator, parameter_search, **kwargs):\n self.kwargs = kwargs\n self.grid = grid\n self.estimator = estimator\n self.parameter_search = parameter_search",
"def init(q: qreg) -> control:\n\n return",
"def run_calc(self):\n\n from openquake.calculators import base, getters\n from openquake.baselib import config, performance, zeromq\n if self.vtag >= 11:\n from openquake.baselib import version\n else:\n from openquake.baselib import __version__ as version\n\n with self.calculator._monitor:\n self.calculator._monitor.username = ''\n try:\n # Pre-execute setups\n self.calculator.pre_execute()\n\n #self.calculator.datastore.swmr_on()\n oq = self.calculator.oqparam\n dstore = self.calculator.datastore\n self.calculator.set_param()\n self.calculator.offset = 0\n\n # Source model\n #print('self.__dict__ = ')\n #print(self.calculator.__dict__)\n if oq.hazard_calculation_id: # from ruptures\n dstore.parent = self.calculator.datastore.read(\n oq.hazard_calculation_id)\n elif hasattr(self.calculator, 'csm'): # from sources\n self.calculator_build_events_from_sources()\n #self.calculator.build_events_from_sources()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n elif 'rupture_model' not in oq.inputs:\n logging.warning(\n 'There is no rupture_model, the calculator will just '\n 'import data without performing any calculation')\n fake = logictree.FullLogicTree.fake()\n dstore['full_lt'] = fake # needed to expose the outputs\n dstore['weights'] = [1.]\n return {}\n else: # scenario\n self.calculator._read_scenario_ruptures()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n\n # Intensity measure models\n if oq.ground_motion_fields:\n if self.vtag >= 12:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, imts, oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n elif self.vtag == 11:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, len(imts), oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n else:\n pass\n\n # Prepare inputs for GmfGetter\n nr = len(dstore['ruptures'])\n logging.info('Reading {:_d} ruptures'.format(nr))\n if self.vtag >= 12:\n rgetters = getters.get_rupture_getters(dstore, oq.concurrent_tasks * 1.25,\n srcfilter=self.calculator.srcfilter)\n elif self.vtag == 11:\n rgetters = getters.gen_rupture_getters(dstore, oq.concurrent_tasks)\n else:\n rgetters = getters.gen_rupture_getters(dstore, self.calculator.srcfilter, oq.concurrent_tasks)\n\n \n args = [(rgetter, self.calculator.param) for rgetter in rgetters]\n mon = performance.Monitor()\n mon.version = version\n mon.config = config\n rcvr = 'tcp://%s:%s' % (config.dbserver.listen,\n config.dbserver.receiver_ports)\n skt = zeromq.Socket(rcvr, zeromq.zmq.PULL, 'bind').__enter__()\n mon.backurl = 'tcp://%s:%s' % (config.dbserver.host, skt.port)\n mon = mon.new(\n operation='total ' + self.calculator.core_task.__func__.__name__, measuremem=True)\n mon.weight = getattr(args[0], 'weight', 1.) # used in task_info\n mon.task_no = 1 # initialize the task number\n args += (mon,)\n\n self.args = args\n self.mon = mon\n self.dstore = dstore\n\n finally:\n print('FetchOpenQuake: OpenQuake Hazard Calculator defined.')\n # parallel.Starmap.shutdown()",
"def _prepare_grids(self):\n if(self.header['element_infos'][0, 2] == 3):\n print('Triangular grid found')\n self.grid_is_rectangular = False\n\n triangles = self.element_data[3]\n triangles = [x.nodes for x in triangles]\n # python starts arrays with 0, but elem.dat with 1\n triangles = np.array(triangles) - 1\n self.elements = triangles\n tri_x = self.nodes['presort'][triangles, 1]\n tri_z = self.nodes['presort'][triangles, 2]\n self.grid = {}\n self.grid['x'] = tri_x\n self.grid['z'] = tri_z\n\n else:\n print('Rectangular grid found')\n self.grid_is_rectangular = True\n quads_raw = [x.nodes for x in self.element_data[8]]\n quads = np.array(quads_raw) - 1\n self.elements = quads\n quads_x = self.nodes['presort'][quads, 1]\n quads_z = self.nodes['presort'][quads, 2]\n self.grid = {}\n self.grid['x'] = quads_x\n self.grid['z'] = quads_z\n\n # calculate the dimensions of the grid\n try:\n self.calculate_dimensions()\n except Exception as e:\n e\n self.nr_nodes_x = None\n self.nr_nodes_z = None\n self.nr_elements_x = None\n self.nr_elements_z = None\n self.nr_of_elements = self.grid['x'].shape[0]",
"def __init__(self, *args, obj=None, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.setupUi(self)\n\n # real-frequency grid\n self.realgrid = RealFrequencyGrid(wmax=float(self.max_real_freq.text()),\n nw=int(self.num_real_freq.text()),\n type=str(self.grid_type_combo.currentText()))\n self.connect_realgrid_button()\n self.connect_wmax()\n self.connect_nw()\n self.connect_grid_type()\n\n\n # self.connect_select_button()\n # self.connect_load_button()\n # self.connect_show_button()\n self.connect_load_button_text()\n self.connect_show_button_2()\n self.connect_select_button_2()\n\n # text display field and \"Do it\" button\n self.text_output.setReadOnly(True)\n self.connect_doit_button()\n\n # output data\n self.output_data = OutputData()\n self.connect_select_output_button()\n self.connect_save_button()",
"def __init__(self, initial_grid):\n part_1.Grid.__init__(self, initial_grid)\n self.turn_on_corners()"
] | [
"0.6526059",
"0.5305715",
"0.5272461",
"0.5195514",
"0.51765853",
"0.5174405",
"0.5172547",
"0.51565105",
"0.5122712",
"0.5108893",
"0.51086247",
"0.50760674",
"0.5064199",
"0.503173",
"0.5030332",
"0.50220716",
"0.49932542",
"0.4988096",
"0.49769023",
"0.4948785",
"0.49266365",
"0.49147758",
"0.49089846",
"0.48863465",
"0.4885381",
"0.48773617",
"0.4866832",
"0.48621002",
"0.48574713",
"0.48315772"
] | 0.54423946 | 1 |
FONCTION QUI ORGANISE LA FIN DU TOUR | def finTour(self):
print("fin du tour")
self.etat = "Fin"
if self.joueurActif.nbRessource + self.joueurActif.getNbRessourceTour() <= self.joueurActif.nbMaxRessource :
self.joueurActif.nbRessource += self.joueurActif.getNbRessourceTour()
else:
self.joueurActif.nbRessource = self.joueurActif.nbMaxRessource
print(self.joueurActif.nbRessource)
if self.joueurActif == self.joueur1:
self.joueurActif = self.joueur2
print("Au joueur 2 de jouer")
else:
self.joueurActif = self.joueur1
print("Au joueur 1 de jouer")
for iEntite in self.joueurActif.entiteResetDeplacement:
iEntite.setMoove(True)
for iEntite in self.joueurActif.entiteResetCombat:
iEntite.setCanAttack(True)
if self.joueur1.nbRessource >= 2000:
print("FIN DE LA PARTIE LE JOUEUR 1 A GAGNER")
if self.joueur2.nbRessource >= 2000:
print("FIN DE LA PARTIE LE JOUEUR 2 A GAGNER")
self.etat = "En jeu" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def es_satisfecho_por(self, candidata):",
"def get_furniture():",
"def fechou(self):\n return self.tot_rodada == self.rodada",
"def vendre(self, symbole, quantite, une_date=date.today()):\n\n if une_date > date.today():\n raise ErreurDate(\"La date est postérieure à la date d'aujourd'hui\")\n\n else:\n if symbole in self.portefeuille:\n quantite_titre = 0.0\n\n for les_jours in self.portefeuille[symbole]:\n if les_jours <= une_date:\n quantite_titre += self.portefeuille[symbole][les_jours]\n\n if quantite_titre < quantite:\n raise ErreurQuantité(\"Quantité insuffisante pour effectuer la vente\")\n\n else:\n if une_date in self.portefeuille[symbole]:\n self.portefeuille[symbole][une_date] -= float(quantite)\n\n elif une_date not in self.portefeuille[symbole]:\n self.portefeuille[symbole][une_date] = - float(quantite)\n\n cout = self.marche.prix(symbole, une_date) * quantite\n if une_date in self.argent:\n self.argent[une_date] += float(cout)\n\n elif une_date not in self.argent:\n self.argent[une_date] = float(cout)\n\n #Ca sert tu a de quoi ca ? Yes le chum\n else:\n raise ErreurQuantité(\"Le titre ne fait pas partie du portefeuille\")",
"def afficher(self, personnage, jeu, partie):\n en_main = jeu.en_main.get(personnage)\n tableau = jeu.tableau\n if en_main:\n msg = \"Dans votre main, vous avez {} et {}.\".format(\n en_main[0].nom_complet_indefini,\n en_main[1].nom_complet_indefini)\n else:\n msg = \"Vous n'avez encore rien dans votre main.\"\n \n if tableau:\n tableau = [piece.nom_complet_indefini for piece in tableau]\n aff_tableau = \", \".join(tableau[:-1]) + \" et \" + tableau[-1]\n msg += \"\\nSur le tableau se trouve {}.\".format(aff_tableau)\n\n if partie.tour is personnage:\n msg += \"\\nC'est votre tour.\"\n \n return msg",
"def calculDeFraisPortuaire():\n TARIFMENSUEL1 = 100\n TARIFMENSUEL2 = 200\n TARIFMENSUEL3 = 400\n TARIFMENSUEL4 = 600\n TAXESPECIALEANNUELLEVOILIERCATEGORIE1 = 100\n TAXESPECIALEANNUELLEVOILIERCATEGORIE2 = 150\n TAXESPECIALEANNUELLEVOILIERCATEGORIE3 = 250\n \n coutMensuel = 0\n coutAnnuel = 0\n taxeSpecialeAnnuelle = 0\n nomDuVoilier = input(\"ENTREZ le nom du voilier: \")\n longueur = float(input(\"Entrez la longueur du voilier: \"))\n categorie = int(input(\"Entrez la categorie du voilier 1 2 ou 3 : \"))\n \n if(longueur<5):\n coutMensuel = TARIFMENSUEL1\n elif(longueur<=10):\n coutMensuel = TARIFMENSUEL2\n elif(longueur<=12):\n coutMensuel = TARIFMENSUEL3\n else:\n coutMensuel = TARIFMENSUEL4\n \n if(categorie==1):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE1\n elif(categorie==2):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE2\n elif(categorie==3):\n taxeSpecialeAnnuelle = TAXESPECIALEANNUELLEVOILIERCATEGORIE3\n \n coutAnnuel = taxeSpecialeAnnuelle+coutMensuel*12\n \n return \"le coût annuel d’une place au port pour le voilier \"+nomDuVoilier+\" est de \"+ str(coutAnnuel)+\" euros\"",
"def fama (self , diccionario):\n\n decoracion_list = []\n for key , value in diccionario.items():\n a=[]\n a.append(key)\n a.append(value)\n decoracion_list.append (a)\n\n paredes_list = decoracion_list [0:3]\n suelo_list = decoracion_list [3:6]\n reforma_list = decoracion_list [6:]\n\n paredes = 1\n suelo = 1\n reforma = 1\n\n for i in range (len(paredes_list)):\n if paredes_list [i][1] == 1 :\n paredes = i+2 \n\n for i in range (len(suelo_list)):\n if suelo_list [i][1] == 1 :\n suelo = i+2\n\n for i in range (len(reforma_list)):\n if reforma_list [i][1] == 1 :\n reforma = i+2\n\n modificador_fama = 0\n\n if paredes >= 4 and suelo >= 4 and reforma >= 4 :\n modificador_fama = 45\n\n elif paredes >= 3 and suelo >= 3 and reforma >= 3 :\n modificador_fama = 33 \n\n elif paredes >= 2 and suelo >= 2 and reforma >= 2 :\n modificador_fama = 12\n\n fama = (10*paredes)+(10*suelo)+(10*reforma) + modificador_fama + kasino.modificador_fama\n\n \"\"\" FORMULA FAMA : Con esta formula se calcula la fama, que dependera de la decoracion e influira en los visitantes \n Se puede usar modificador_fama para calibrar el juego o añadir niveles de dificulad \"\"\"\n \n return fama , paredes , suelo , reforma",
"def couleur_fond(self):\n return self.fond * self.ka",
"def obtenerFin(self):\n return self.fin",
"def obtem_fila(self):\n\n return self.fila",
"def Estado_final(self,profundidad:int) -> bool:\n\n\t\tself.Evaluar(profundidad)\n\t\tif self.completo:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def decide(self) :\n (self.futurX,self.futurY) = self.randomNextPos()\n if self.fishBreedTimeCPT == 0 :\n self.naissance = True\n self.fishBreedTimeCPT = self.fishBreedTime\n else :\n self.fishBreedTimeCPT = self.fishBreedTimeCPT - 1\n\n if self.env.grille[self.futurY][self.futurX] == None :\n self.bougera = True\n else :\n self.bougera = False\n\n self.update()",
"def getFactura(self): \n return self.caja",
"def getFactura(self): \n return self.caja",
"def conclusion_echantillon(self, liste_foetus):\n compteur = 0\n for lignes in range(1, len(liste_foetus)):\n if liste_foetus[lignes].contamination != 0 and liste_foetus[lignes].taux > self.seuil_taux_conta:\n compteur = compteur + 1\n if compteur > self.seuil_nbre_marqueurs:\n self.conclusion = 1\n else:\n self.conclusion = 0",
"def graf_F(self):\n vert_funktion(self, typ='D', titel='$Empirische\\; Verteilungsfunktion$' + '\\n ')",
"def defineEstadosFinaisAFD(self):\n\n for e in self.estadosFinais:\n for e_AFD in self.afd.estados:\n if e in e_AFD and e_AFD not in self.afd.estadosFinais:\n self.afd.estadosFinais.append(e_AFD)",
"def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()",
"def atender(self):\n\n if self.enfila>0:\n \n self.enfila-=1\n self.fila.pop(0)",
"def imprime(nota_fiscal):\n\n print(\"Imprimindo nota fiscal %s\" % nota_fiscal.cnpj)",
"def vspf_fournier(th,n,mu):\n n = np.real(n)\n d = 4*np.sin(th/2)**2 / (3*(n-1)**2)\n d_180 = 4*np.sin(np.pi/2) / (3*(n-1)**2)\n v = (3-mu)/2\n dv = d**v\n d_180v = d_180**v\n d1 = 1-d\n dv1 = 1-dv\n\n a = 1/(4*np.pi*dv*d1**2)\n b = v*d1-dv1+(d*dv1-v*d1)*np.sin(th/2)**(-2)\n c = (1-d_180v)*(3*np.cos(th)**2 - 1)/(16*np.pi*(d_180-1)*d_180v)\n return a*b+c",
"def desp_inicial(x): #Definición del desplazamiento inicial de la cuerda\r\n return np.exp(-1000*(x - longitud/2)**2)",
"def toAFD(self):\n self.afd = AFD()\n\n self.afd.alfabeto=self.alfabeto\n cont = 0\n\n # Coloca na estadosAFD os estados iniciais\n self.afd.estados = [','.join(self.estadosIniciais)] # Passa do formato ['A','B'] -> ['A,B']\n\n # Cria o AFD\n while cont != len(self.afd.estados):\n\n # Inicia o afd\n if cont == 0:\n self.afd.transicoes = [['-' for i in range(len(self.alfabeto))]]\n else:\n self.afd.transicoes.append(['-' for i in range(len(self.alfabeto))])\n\n # Percorre cada estado em estadosAFD e preenche o afd\n # com as respectivas transicoes\n estados = self.afd.estados[cont].split(',')\n\n for e in estados:\n if e != ',':\n indexEstado = self.estados.index(e)\n for a in self.alfabeto:\n indexAlfabeto = self.alfabeto.index(a)\n transicao = self.transicoes[indexEstado][indexAlfabeto]\n if transicao != '-':\n if self.afd.transicoes[-1][indexAlfabeto] != '-':\n self.afd.transicoes[-1][indexAlfabeto] += ',' + transicao\n else:\n self.afd.transicoes[-1][indexAlfabeto] = transicao\n self.afd.estados = self.afd.atualizaEstados(self.afd.transicoes[-1])\n cont += 1\n\n # Determina o estadoInicial\n self.afd.estadosIniciais = [self.afd.estados[0]]\n\n # Determina os estadosFinaisAFD\n self.defineEstadosFinaisAFD()",
"def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)",
"def falcon():",
"def calcul_next(self):\n if not self.valide: # si c'est plus en fonction pas besoin de calcul complemnetaire\n return None\n initial = self.date\n if self.periodicite == 'u':\n return None\n finale = None\n if self.periodicite == 'j':\n finale = initial + datetime.timedelta(days=self.intervalle)\n if self.periodicite == 's':\n finale = initial + datetime.timedelta(weeks=self.intervalle)\n if self.periodicite == 'm':\n finale = initial + relativedelta(months=self.intervalle)\n if self.periodicite == 'a':\n finale = initial + relativedelta(years=self.intervalle)\n # on verifie que la date limite n'est pas dépasséee\n if self.date_limite is not None and finale > self.date_limite:\n finale = None\n return finale",
"def fusionne(self, new):\n if new == self:\n raise ValueError(\"un exercice ne peut etre fusionné avec lui même\")\n self.alters_data = True\n if not isinstance(new, type(self)):\n raise TypeError(\"pas la même classe d'objet\")\n nb_change = Echeance.objects.filter(exercice=self).update(exercice=new)\n nb_change += Ope.objects.filter(exercice=self).update(exercice=new)\n if self.date_debut != new.date_debut:\n new.date_debut = min(new.date_debut, self.date_debut)\n if self.date_fin != new.date_fin:\n new.date_fin = max(new.date_fin, self.date_fin)\n new.save()\n self.delete()\n return nb_change",
"def resultat(self, concordance_mf, concordance_pf, liste_F, liste_M, liste_P):\n resultat = {\"Marqueur\": [], \"Conclusion\": [], \"Concordance Mere/Foetus\": [], \"Détails M/F\": [],\n \"Concordance Pere/Foetus\": [], \"Détails P/F\": []}\n marqueurs_conta = 0\n marqueurs_non_conta = 0\n somme_conta = 0\n if liste_F[0].allele[1] == 0.0:\n self.set_sexe(\"F\")\n else:\n self.set_sexe(\"M\")\n if concordance_mf != 16 and concordance_pf != 16 and concordance_pf != None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Conclusion\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\" and liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M : \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\n \"P : \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n elif liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F : \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails P/F\"].append(\"\")\n elif liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n resultat[\"Détails M/F\"].append(\"\")\n else:\n resultat[\"Détails M/F\"].append(\"\")\n resultat[\"Détails P/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\",\n \"Concordance Pere/Foetus\", \"Détails P/F\"])\n return resultats, conclusion\n elif concordance_mf != len(liste_F) and concordance_pf == len(liste_F) or concordance_mf != len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"NON\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Conclusion\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n resultat[\"Concordance Mere/Foetus\"].append(liste_F[nbres].concordance_mere_foetus)\n if liste_F[nbres].concordance_mere_foetus == \"NON\":\n resultat[\"Détails M/F\"].append(\n \"M: \" + str(liste_M[nbres].normalisation(liste_M[nbres].allele)) + \" F: \" + str(\n liste_F[nbres].normalisation(liste_F[nbres].allele)))\n else:\n resultat[\"Détails M/F\"].append(\"\")\n conclusion = pd.DataFrame({\"1\": [\"Non calculé\", \"Non calculé\", \"Non calculé\", self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\",\n \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Concordance Mere/Foetus\", \"Détails M/F\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf == len(liste_F) or concordance_mf == len(\n liste_F) and concordance_pf == None:\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"OUI\")\n if concordance_pf == None:\n self.set_concordance_pere_foet(\"ABS\")\n del resultat[\"Concordance Mere/Foetus\"]\n del resultat[\"Concordance Pere/Foetus\"]\n del resultat[\"Détails P/F\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat, columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion\n elif concordance_mf == len(liste_F) and concordance_pf != len(liste_F):\n self.set_concordance_mere_foet(\"OUI\")\n self.set_concordance_pere_foet(\"NON\")\n del resultat[\"Concordance Mere/Foetus\"]\n for nbres in range(1, len(liste_F)):\n resultat[\"Concordance Pere/Foetus\"].append(liste_P[nbres].concordance_pere_foetus)\n if liste_P[nbres].concordance_pere_foetus == \"NON\":\n resultat[\"Détails P/F\"].append(\n \"P: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)) + \" F: \" + str(liste_P[nbres].normalisation(liste_P[nbres].allele)))\n else:\n resultat[\"Détails P/F\"].append(\"\")\n for nbres in range(1, len(liste_F)):\n resultat[\"Marqueur\"].append(str(liste_F[nbres].marqueur))\n if liste_F[nbres].informatif == 0:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Mère homozygote\")\n elif liste_F[nbres].informatif == 1:\n if liste_F[nbres].contamination == 0:\n marqueurs_non_conta += 1\n resultat[\"Conclusion\"].append(\"Non contaminé\")\n resultat[\"Détails M/F\"].append(\"\")\n elif liste_F[nbres].contamination == 1:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n else:\n marqueurs_conta += 1\n somme_conta = somme_conta + liste_F[nbres].taux\n resultat[\"Conclusion\"].append(\"Contaminé\")\n resultat[\"Détails M/F\"].append(\"Taux contamination : \" + str(liste_F[nbres].taux) + \"%\")\n elif liste_F[nbres].informatif == 2:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Allèles semblables\")\n else:\n resultat[\"Conclusion\"].append(\"Non informatif\")\n resultat[\"Détails M/F\"].append(\"Echo\")\n resultats = pd.DataFrame(resultat,\n columns=[\"Marqueur\", \"Conclusion\", \"Détails M/F\", \"Concordance Pere/Foetus\",\n \"Détails P/F\"])\n try:\n moyenne_conta = somme_conta / marqueurs_conta\n except ZeroDivisionError:\n moyenne_conta = 0\n conclusion = pd.DataFrame(\n {\"1\": [int(marqueurs_non_conta), int(marqueurs_conta), round(moyenne_conta, 2), self.get_date()]},\n index=[\"Nombre de marqueurs informatifs non contaminés\", \"Nombre de marqueurs informatifs contaminés\",\n \"Moyenne du pourcentage de contamination\", \"Date\"])\n return resultats, conclusion",
"def calcular_ocupacion():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=0\"\"\")\n\tnormales = cur.fetchall()\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=1\"\"\")\n\tlegendarios = cur.fetchall()\n\t# Calcula la ocupacion como cant_normales * 1 + cant_legendarios * 5\n\tocupado = normales[0][0] + 5 * legendarios[0][0]\n\treturn ocupado",
"def cliquer_sur_unité(self):"
] | [
"0.63391507",
"0.62116545",
"0.6208521",
"0.61218905",
"0.6033425",
"0.6032181",
"0.6018563",
"0.5958281",
"0.5849472",
"0.58314395",
"0.5814928",
"0.5783631",
"0.5661068",
"0.5661068",
"0.5644017",
"0.5642015",
"0.5641191",
"0.56019706",
"0.5591685",
"0.5587276",
"0.5584715",
"0.55318815",
"0.55155265",
"0.55138695",
"0.55052257",
"0.55035794",
"0.54877746",
"0.5463502",
"0.5431301",
"0.54255134"
] | 0.63097227 | 1 |
Takes a filesystem path and returns a sorted list of filenames under that path. | def get_filenames(path):
xs = []
for (dirpath, dirnames, filenames) in os.walk(path):
xs.extend(filenames)
break
xs.sort()
return xs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_files(path: str) -> List:\n files = []\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n files.append(file)\n # Reversed to prevent collision upon renaming\n return sorted(files, reverse=True)",
"def get_dirnames(path):\n storage = DefaultStorage()\n dirnames = storage.listdir(path)[0]\n dirnames.sort()\n return dirnames",
"def get_dirlist(path):\n dirlist = os.listdir(path)\n dirlist.sort()\n return dirlist",
"def read_names(path):\n return SortedSet([os.path.basename(n) for n in glob.glob(path + os.sep + '*')])",
"def _sorted_ls(path):\n def _get_modified_time(f):\n return os.stat(os.path.join(path, f)).st_mtime\n return list(sorted(os.listdir(path), key=_get_modified_time))",
"def get_items_from_dir(path):\n items = os.listdir(path)\n items.sort()\n return items",
"def get_filenames(self, path):\n files_list = list()\n for filename in os.listdir(path):\n files_list.append(os.path.join(path, filename))\n return files_list",
"def list_files(path):\n return list(sorted([f for f in os.listdir(path=path) if\n f.endswith('.json')]))",
"def get_filenames(self, path: str):\n files_list = []\n for filename in os.listdir(path):\n files_list.append(os.path.join(path, filename))\n return files_list",
"def listDir(path):\n real_path = getDirectoryRealPath(path)\n\n return sorted(\n [(os.path.join(path, filename), filename) for filename in os.listdir(real_path)]\n )",
"def list_sorted_filenames(directory):\n with os.scandir(directory) as entries:\n filenames = [entry.name for entry in entries if entry.is_file()]\n filenames.sort()\n return filenames.copy()",
"def find_files(path: str, filename_pattern: str, sort: bool = True) -> list:\n files = list()\n for root, _, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, filename_pattern):\n files.append(os.path.join(root, filename))\n if sort:\n files.sort()\n return files",
"def globn(pathname):\n from glob import glob\n return sortn(glob(pathname))",
"def sortedScanPaths(path_glob):\n return sorted(glob(path_glob), key=lambda x: int(re.match(r'.*r(\\d+)\\..*', x).group(1)))",
"def list_filenames(self):\n l = []\n for path, dirs, files in os.walk(self.archive_path):\n for file in files:\n l.append(os.path.relpath(os.path.join(path,file),self.archive_path))\n l.sort()\n return l",
"def load_filenames_from_path(path: str, extension: str ='.bin') -> List[str]:\n sorted_filenames_list = []\n if(os.path.exists(path)):\n directory_list = load_directory_list_from_path(path)\n \n for directory in directory_list:\n filename_list = [filename for filename in os.listdir(os.path.join(path, directory))\n if (os.path.isfile(\n os.path.join(path, \n os.path.join(directory, filename)\n )) and extension in filename) ]\n \n filename_list = sort_list(filename_list)\n\n sorted_filenames_list += [os.path.join(path, os.path.join(directory, filename)) for filename in filename_list]\n else:\n raise FileNotFoundError\n \n return sorted_filenames_list",
"def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []",
"def list_files(top_path):\n\n results = []\n\n for root, dirs, files in os.walk(top_path, topdown=True):\n\n # Exclude dot files like .git\n dirs[:] = [name for name in dirs if not name.startswith('.')]\n files[:] = [name for name in files if not name.startswith('.')]\n\n for file_name in files:\n results.append(os.path.join(root, file_name))\n\n results.sort()\n return results",
"def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]",
"def get_files(path):\r\n\tfiles = []\r\n\tfor dirpath, _, filenames in os.walk(path):\r\n\t\tfor filename in [f for f in filenames]:\r\n\t\t\tfiles.append(os.path.join(dirpath, filename))\r\n\treturn files",
"def getDirContents(self, path):\r\n return sorted([int(file) for file in os.listdir(os.path.dirname(path))])",
"def list_files(path):\n ls_output = os.listdir(path)\n return ls_output",
"def all_pdf_files_in_directory(path):\n return sorted([filename for filename in os.listdir(path) if pdf_file(filename)])",
"def listDir(path):\n filenames = []\n for root, dirs, files in os.walk(path):\n for i in files:\n filenames.append(os.path.join(root, i))\n return filenames",
"def filenames_from_path(path):\n with open(path) as f:\n filenames = f.read().splitlines()\n\n return filenames",
"def listFiles(path):\n outputList = []\n for root, dirs, files in os.walk(path):\n for f in files:\n outputList.append('/'.join([root, f]))\n return outputList",
"def files_in_dir(path):\n return os.listdir(path)",
"def get_folder_filenames(folder_path: str) -> list:\n\n folder_path = 'images/right'\n folder_file_list = os.listdir(folder_path)\n folder_file_list.sort()\n\n return folder_file_list",
"def listflat(path, ext=None):\n if os.path.isdir(path):\n if ext:\n files = glob.glob(os.path.join(path, '*.' + ext))\n else:\n files = [os.path.join(path, fname) for fname in os.listdir(path)]\n else:\n files = glob.glob(path)\n # filter out directories\n files = [fpath for fpath in files if not os.path.isdir(fpath)]\n return sorted(files)",
"def get_filenames():\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames"
] | [
"0.7952454",
"0.73314255",
"0.7306707",
"0.72185194",
"0.7201891",
"0.71731275",
"0.7164499",
"0.7163715",
"0.7132579",
"0.7100023",
"0.70964265",
"0.7095107",
"0.70786023",
"0.7019149",
"0.69829243",
"0.6979037",
"0.6974068",
"0.69649667",
"0.69457054",
"0.69418824",
"0.6890265",
"0.68673754",
"0.68557036",
"0.6825701",
"0.6810409",
"0.68042356",
"0.6802914",
"0.6789941",
"0.67857516",
"0.67821157"
] | 0.7987954 | 0 |
Takes a sequence p representing a polynomial and a number x and returns the value of p at x. This version is Numbacompatible; NumPy's version is not. | def polyval(p, x):
val = 0
ii = len(p) - 1
for i in range(len(p) - 1):
val += p[i] * (x ** ii)
ii -= 1
return val + p[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def polyval_vec(p, x, prec=None):\n p = np.atleast_2d(p)\n x = np.atleast_1d(x).flatten()\n # for modest to large arrays, faster to find unique values and\n # only evaluate those. Have to cast to float because np.unique\n # can't handle object types like python native int\n unq_x, xidx = np.unique(x, return_inverse=True)\n _, pidx, outidx = np.unique(\n p.astype(float), return_index=True, return_inverse=True, axis=0\n )\n unq_p = p[pidx]\n\n if prec is not None and prec > 18:\n # TODO: possibly multithread this bit\n mpmath.mp.dps = prec\n y = np.array([np.asarray(mpmath.polyval(list(pi), unq_x)) for pi in unq_p])\n else:\n npoly = unq_p.shape[0] # number of polynomials\n order = unq_p.shape[1] # order of polynomials\n nx = len(unq_x) # number of coordinates\n y = np.zeros((npoly, nx))\n\n for k in range(order):\n y = y * unq_x + np.atleast_2d(unq_p[:, k]).T\n\n return y[outidx][:, xidx].astype(float)",
"def evaluate_poly(poly: Sequence[float], x: float) -> float:\n return sum(c * (x**i) for i, c in enumerate(poly))",
"def _evalPoly(self,a,x):\n y = a[0]\n for i in range(1,len(a)):\n y = self.F.Multiply(y, x)\n y = self.F.Add(y, a[i])\n return y",
"def PBpoly(n, x):\n n = int(n)\n return Bpoly(n, x-math.floor(x))",
"def polynomial(a, x):\n\n sum = 0\n\n for i in range(len(a)):\n sum += a[i] * x**i\n return sum",
"def poly_func(x):\n \n # Create the polynomial object\n f = np.poly1d([1, -2, -28, 28, 12, -26, 100])\n\n # Return the value of the polynomial\n return f(x) * 0.05",
"def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly",
"def evaluate_poly(poly, x):\n value_of_poly = 0\n for i in range(0, len(poly)):\n var = x\n power = i\n coeff = poly[i]\n value_of_poly += (coeff * (var**power))\n return value_of_poly",
"def polyeval(self, x):\n return NotImplemented",
"def evaluate_polynomial(f,x):\n degree = len(f)-1\n ans = 0\n for i in f:\n ans += i*x**degree\n degree -= 1\n return(ans)",
"def algorithm_1_2(p, c, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (1 - x) * q[j] + x * q[j + 1]\n return q[0]",
"def evaluate_poly(poly, x):\n if len(poly) == 1:\n\t\t#base case\n\t\treturn poly[0]\n else:\n #recursive case\n #the first item in the tuple is the coefficient of X**0, so it's the final value\n #the rest of the items in the tuple need multiplied by X and put in new tuple\n #Yes, I'm cheating and casting a list to a tuple. GFY and your immutability.\n return poly[0] + evaluate_poly(tuple([x * coeff for coeff in poly[1:]]), x)",
"def phasepoly14(param, x):\n # 2011-09-26 10:42 IJMC: Created from phaselamb14\n N = len(param) - 14\n cparam = array(param[N::], copy=True)\n cparam[0] = 1. / prod(1. + cparam[1::]) - 1.\n\n if len(x.shape)==1:\n was1d = True\n x = x.reshape(14, len(x)/14.)\n else:\n was1d = False\n\n ret = polyval(param[0:N], x)\n ret *= (1. + cparam.reshape(14,1))\n\n if was1d:\n ret = ret.ravel()\n\n return ret",
"def poly_int(params: PolyParams, x: NDArray, order: int) -> NDArray:\n\n return np.polyval(np.polyint(params, -order), x)",
"def f(t,x,p,q):\n return p[1] + q[0]*x",
"def evalComponent(self, x, p):\n if p > 0 and p <= self.n:\n p = str(p)\n y = self[\"off\"] + self[\"lin\"] * x\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y\n else:\n raise(PE.PyAValError(\"No such component (no. \" + str(p) + \")\", where=\"MultiVoigt1d::evalComponent\",\n solution=\"Use value between 1 and \" + str(self.n)))",
"def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2",
"def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]",
"def evalPol(pol, *x):\n\n return np.array(list(map(\n lambda _x: None if _x == None else np.polyval(pol, _x),\n x)))",
"def _get_y(i: int128, j: int128, x: uint256, _xp: uint256[N_COINS]) -> uint256:\n # x in the input is converted to the same price/precision\n\n assert i != j # dev: same coin\n assert j >= 0 # dev: j below zero\n assert j < N_COINS # dev: j above N_COINS\n\n # should be unreachable, but good for safety\n assert i >= 0\n assert i < N_COINS\n\n A: uint256 = self._A()\n D: uint256 = self._get_D(_xp, A)\n Ann: uint256 = A * N_COINS\n c: uint256 = D\n S: uint256 = 0\n _x: uint256 = 0\n\n for _i in range(N_COINS):\n if _i == i:\n _x = x\n elif _i != j:\n _x = _xp[_i]\n else:\n continue\n S += _x\n c = c * D / (_x * N_COINS)\n c = c * D * A_PRECISION / (Ann * N_COINS)\n b: uint256 = S + D * A_PRECISION / Ann # - D\n y: uint256 = D\n for _i in range(255):\n y_prev: uint256 = y\n y = (y*y + c) / (2 * y + b - D)\n # Equality with the precision of 1\n if y > y_prev:\n if y - y_prev <= 1:\n return y\n else:\n if y_prev - y <= 1:\n return y\n raise",
"def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p",
"def fitfunc_AP(x, *p):\n val = p[0]\n for n in range(0, len(p) - 1, 2):\n ind = n + 1\n mode = (n / 2) + 1\n val = val + p[ind] * np.cos(2 * np.pi * mode * (x - p[ind + 1]) / 360.0)\n return val",
"def beta_gen_posmnt(p):\n return np.array([0.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))",
"def Bpoly(n, x):\n n = int(n)\n out = 0\n for k in xrange(0, n+1):\n out += comb(n,k)*Bnum(n-k)*x**float(k)\n return out",
"def evaluate_poly(poly, x):\n exp = 0\n total = 0\n for coef in poly:\n total += coef * (x ** exp)\n exp += 1\n\n return total",
"def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)",
"def f(x):\n return ((x[0] - 1) ** 2) + ((x[1] + 3) ** 2)",
"def __getPolynomial(self) -> 'func':\n return lambda x: sum(self.pts[i]*base(x)\n for i, base in enumerate(self.basis))",
"def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p",
"def eval_poly(self, p):\n A = self\n m, n = A.shape\n\n if m != n:\n raise DMNonSquareMatrixError(\"Matrix must be square\")\n\n if not p:\n return self.zeros(self.shape, self.domain)\n elif len(p) == 1:\n return p[0] * self.eye(self.shape, self.domain)\n\n # Evaluate p(A) using Horner's method:\n # XXX: Use Paterson-Stockmeyer method?\n I = A.eye(A.shape, A.domain)\n p_A = p[0] * I\n for pi in p[1:]:\n p_A = A*p_A + pi*I\n\n return p_A"
] | [
"0.7189769",
"0.6388188",
"0.6349685",
"0.6349405",
"0.6301175",
"0.62954414",
"0.62514293",
"0.6217231",
"0.61732244",
"0.61730355",
"0.61638635",
"0.6040098",
"0.6035656",
"0.59895843",
"0.589283",
"0.58467543",
"0.5791899",
"0.5781007",
"0.5776011",
"0.5772723",
"0.57710177",
"0.57657105",
"0.5763215",
"0.57417476",
"0.57344425",
"0.5730924",
"0.5730301",
"0.57119614",
"0.5680804",
"0.56492496"
] | 0.788395 | 0 |
Takes two pairs of values, v1, v2 and w1, w2 and returns a boolean result indicating whether the range v1, v2 (inclusive) contains any values in the range w1, w2 (inclusive). | def is_overlap_sorted_values(v1, v2, w1, w2):
if (v2 < w1) or (v1 > w2):
return False
else:
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False",
"def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2",
"def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True",
"def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)",
"def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)",
"def overlap(p1: Tuple, p2: Tuple) -> bool:\n if (p2[1] - p1[0]) * (p2[0] - p1[1]) <= 0:\n return True\n else:\n return False",
"def intersect(start1, stop1, start2, stop2):\n\tassert isinstance(start1, int)\n\tassert isinstance(stop2, int)\n\tassert isinstance(start2, int)\n\tassert isinstance(stop2, int)\n\tassert start1 <= stop1\n\tassert start2 <= stop2\n\t\n\t# if interval 1 is completely to the left of interval 2\n\tif stop1 < start2:\n\t\treturn False\n\t\n\t# if interval 1 is completely to the right of interval2\n\tif stop2 < start1:\n\t\treturn False\n\t\t\n\treturn True",
"def overlap(start_idx1, end_idx1, start_idx2, end_idx2):\n head = min(end_idx1, end_idx2)\n tail = max(start_idx1, start_idx2)\n return head >= tail",
"def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True",
"def check_in_range(value, lim_1, lim_2):\n lo_lim = min(lim_1, lim_2)\n hi_lim = max(lim_1, lim_2)\n \n if (abs(value) > abs(lo_lim)) and (abs(value) < abs(hi_lim)):\n return True\n else:\n return False",
"def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])",
"def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2",
"def check_overlap(lorentz_params_1, lorentz_params_2):\n if lorentz_params_1 is None or lorentz_params_2 is None:\n return False\n [low_lorentz, high_lorentz] = sorted(\n [lorentz_params_1, lorentz_params_2], key=lambda l: l[1])\n low_fit_range = find_single_fit_range(low_lorentz)\n high_fit_range = find_single_fit_range(high_lorentz)\n return low_fit_range[2] > high_fit_range[1]",
"def iOverlap (a1, a2, b1, b2):\n if b1<=a1<=b2 or b1<=a2<=b2 or a1<=b1<=a2 or a1<=b2<=a2:\n return True\n elif a1>a2 or b1>b2:\n return False\n else:\n return False",
"def overlap(indices1, indices2):\n assert (len(indices1) == 2 and len(indices2) == 2)\n indices1 = sorted(indices1)\n indices2 = sorted(indices2)\n if (indices2[0] <= indices1[0] <= indices2[1]) or \\\n (indices2[0] <= indices1[1] <= indices2[1]) or \\\n (indices1[0] <= indices2[0] <= indices1[1]) or \\\n (indices1[0] <= indices2[1] <= indices1[1]):\n return True\n else:\n return False",
"def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]",
"def isShiftInRange(shiftValue, shiftRanges):\n\n for (minVal, maxVal) in shiftRanges:\n if shiftValue >= minVal:\n if shiftValue <= maxVal:\n return True\n \n return False",
"def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1",
"def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True",
"def isInRange(val, minv, maxv):\n\treturn val >= minv and val <= maxv",
"def isDominated(wvalues1, wvalues2):\n not_equal = False\n for self_wvalue, other_wvalue in zip(wvalues1, wvalues2):\n print(\"self_wvalue: \"+str(self_wvalue))\n print(\"other_wvalue: \"+str(other_wvalue))\n if self_wvalue > other_wvalue:\n return False\n elif self_wvalue < other_wvalue:\n not_equal = True\n return not_equal",
"def check_out_range(value, lim_1, lim_2):\n lo_lim = min(lim_1, lim_2)\n hi_lim = max(lim_1, lim_2)\n \n if (abs(value) > abs(hi_lim)) or (abs(value) < abs(lo_lim)):\n return True\n else:\n return False",
"def between(check:float, boundary_1:float, boundary_2:float)->bool:\n if boundary_1 > boundary_2:\n boundary_1, boundary_2 = boundary_2, boundary_1\n return boundary_1 <= check and check <= boundary_2",
"def range_matches(self, other):\n return (\n self.begin == other.begin and \n self.end == other.end\n )",
"def is_between(value, start, end, including_start=False, including_end=False):\n if not including_start and not including_end: # not include both start and end\n if (start < value < end):\n return True\n elif (start > end) and (start < value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif not including_start and including_end: # include end but not the start\n if value == end:\n return True\n elif (start < value <= end):\n return True\n elif (start > end) and ((start < value <= (2**m - 1)) or (0 <= value <= end)):\n return True\n elif (start == end) and (value != start):\n return True\n return False\n elif including_start and not including_end: # include start but not the end\n if value == start:\n return True\n elif (start <= value < end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value < end):\n return True\n elif (start == end) and (value != end):\n return False\n return False\n else: # include both start and end\n if (start <= value <= end):\n return True\n elif (start > end) and (start <= value <= (2**m - 1) or 0 <= value <= end):\n return True\n elif start == end:\n return True\n return False",
"def range_match(verifield, ranges):\n return verifield[0] >= ranges[0][0] and verifield[0] <= ranges[0][1] and verifield[1] >= ranges[1][0] and verifield[1] <= ranges[1][1]",
"def check_overlap(x1,x2,x3,x4):\r\n if x3<x2 and x4>=x2:\r\n return True\r\n elif x3<=x1 and x4>x1:\r\n return True\r\n elif x3>x1 and x4<x2:\r\n return True\r\n elif x3<=x1 and x4>=x2:\r\n return True\r\n else:\r\n return False",
"def interval_intersect(a, b, c, d):\n if (c <= b) and (a <= d):\n return True\n else:\n return False",
"def in_range(cls, lhs, rhs):\n return rhs[0] <= lhs <= rhs[1]",
"def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])"
] | [
"0.75839585",
"0.7468334",
"0.73032653",
"0.7254913",
"0.7099083",
"0.7005381",
"0.6924589",
"0.6836781",
"0.68154067",
"0.67770654",
"0.6689429",
"0.66749865",
"0.6669765",
"0.66663224",
"0.66637504",
"0.665274",
"0.6625992",
"0.65885615",
"0.6519127",
"0.6514677",
"0.649486",
"0.6494724",
"0.6485713",
"0.6466873",
"0.64493686",
"0.64350426",
"0.64330715",
"0.64260924",
"0.6411381",
"0.6386672"
] | 0.79552335 | 0 |
Takes two nested two dimensional arrays, a_ary and b_ary, representing a bounding box in ll, ur format [[lon, lat], [lon, lat]]. Returns a boolean result as to whether the bounding boxes overlap. | def bbox_intersect(a_ary, b_ary):
# Do any of the 4 corners of one bbox lie inside the other bbox?
# bbox format of [ll, ur]
# bbx[0] is lower left
# bbx[1] is upper right
# bbx[0][0] is lower left longitude
# bbx[0][1] is lower left latitude
# bbx[1][0] is upper right longitude
# bbx[1][1] is upper right latitude
# Detect longitude and latitude overlap
if is_overlap_sorted_values(a_ary[0][0], a_ary[1][0], b_ary[0][0], b_ary[1][0]) \
and is_overlap_sorted_values(a_ary[0][1], a_ary[1][1], b_ary[0][1], b_ary[1][1]):
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bbox_overlap(bbox_1: Sequence, bbox_2: Sequence) -> bool:\n if (bbox_1[0] > bbox_2[0]) or (bbox_1[1] > bbox_2[1]):\n return False\n if (bbox_1[2] < bbox_2[2]) or (bbox_1[3] < bbox_2[3]):\n return False\n\n return True",
"def is_overlap(bb1, bb2):\n l1, t1, r1, b1 = bb1['x'], bb1['y'], bb1['x']+bb1['w'], bb1['y']+bb1['h']\n l2, t2, r2, b2 = bb2['x'], bb2['y'], bb2['x']+bb2['w'], bb2['y']+bb2['h']\n\n if r1 > l2 and r2 > l1 and b2 > t1 and b1 > t2:\n return True\n else:\n return False",
"def check_overlap(a, b):\n if a[0] >= b[2] or a[1] >= b[3] or a[2] <= b[0] or a[3] <= b[1]:\n return False\n return True",
"def has_intersection(bboxa, bboxb):\n yamin, xamin, ha, wa = bboxa\n ybmin, xbmin, hb, wb = bboxb\n yamax, xamax = yamin+ha, xamin+wa\n ybmax, xbmax = ybmin+hb, xbmin+wb\n xs, ys = max(xamin, xbmin), max(yamin, ybmin)\n xe, ye = min(xamax, xbmax), min(yamax, ybmax)\n return xe > xs and ye > ys",
"def bbox_collision(bbox1, bbox2):\n\n bbox1 = np.asarray(bbox1)\n bbox2 = np.asarray(bbox2)\n\n max1 = np.max(bbox1, axis=1)\n min1 = np.min(bbox1, axis=1)\n\n max2 = np.max(bbox2, axis=1)\n min2 = np.min(bbox2, axis=1)\n\n out = (min1 <= max2) & (max1 >= min2)\n return np.all(out)",
"def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])",
"def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])",
"def check_overlap(self, a, b):\n return utils.is_point_in_circle(b.get_pos(), a.get_pos(), a.radius)",
"def doBoundingBoxesIntersect(self, other):\n if(self.upperLeft.x <= other.lowerRight.x and\n self.lowerRight.x >= other.upperLeft.x and\n self.upperLeft.y >= other.lowerRight.y and\n self.lowerRight.y <= other.upperLeft.y):\n return True\n return False",
"def is_overlapping(box1, box2):\n if box1[2] <= box2[0]: # If box1 is to the left of box2\n return False\n elif box1[0] >= box2[2]: # If box1 is to the right of box2\n return False\n elif box1[3] <= box2[1]: # If box1 is below box2\n return False\n elif box1[1] >= box2[3]: # If box1 is above box2\n return False\n else:\n return True",
"def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True",
"def iOverlap (a1, a2, b1, b2):\n if b1<=a1<=b2 or b1<=a2<=b2 or a1<=b1<=a2 or a1<=b2<=a2:\n return True\n elif a1>a2 or b1>b2:\n return False\n else:\n return False",
"def _intersects_1D(A, B):\n return False if (B[1] <= A[0]) or (B[0] >= A[1]) else True",
"def bbox_overlap(bbox_a, bbox_b):\n ymin_a, xmin_a, ymax_a, xmax_a = bbox_a\n ymin_b, xmin_b, ymax_b, xmax_b = bbox_b\n\n x_intersection = min(xmax_a, xmax_b) - max(xmin_a, xmin_b) + 1\n y_intersection = min(ymax_a, ymax_b) - max(ymin_a, ymin_b) + 1\n\n if x_intersection <= 0 or y_intersection <= 0:\n return 0\n else:\n return x_intersection * y_intersection",
"def bbox_overlaps(bboxes1, bboxes2, mode='iou'):\n\n from icv.data.core.bbox import BBox\n assert mode in ['iou', 'iof']\n\n bboxes1 = np.array([np.array(b.bbox) if isinstance(b,BBox) else b for b in bboxes1])\n bboxes2 = np.array([np.array(b.bbox) if isinstance(b,BBox) else b for b in bboxes2])\n\n bboxes1 = bboxes1.astype(np.float32)\n bboxes2 = bboxes2.astype(np.float32)\n rows = bboxes1.shape[0]\n cols = bboxes2.shape[0]\n ious = np.zeros((rows, cols), dtype=np.float32)\n if rows * cols == 0:\n return ious\n exchange = False\n if bboxes1.shape[0] > bboxes2.shape[0]:\n bboxes1, bboxes2 = bboxes2, bboxes1\n ious = np.zeros((cols, rows), dtype=np.float32)\n exchange = True\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\n for i in range(bboxes1.shape[0]):\n x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(\n y_end - y_start + 1, 0)\n if mode == 'iou':\n union = area1[i] + area2 - overlap\n else:\n union = area1[i] if not exchange else area2\n ious[i, :] = overlap / union\n if exchange:\n ious = ious.T\n return ious",
"def overlap(a, b):\n return not(a[2]<=b[0] or a[3]<=b[1] or a[0]>=b[2] or a[1]>=b[3])",
"def do_box_overlap(coord1, coord2):\n return (\n (coord1[0] - 2 < coord2[0] and coord1[1] + 2 > coord2[0]\n or coord2[0] - 2 < coord1[0] and coord2[1] + 2 > coord1[0]) \n and (coord1[2] - 2 < coord2[2] and coord1[3] + 2 > coord2[2]\n or coord2[2] - 2 < coord1[2] and coord2[3] + 2 > coord1[2]))",
"def is_boundary_edge(a, b, bdy_edges):\n for edge in bdy_edges:\n a0, b0 = edge\n if a == a0 and b == b0:\n return True\n return False",
"def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th",
"def overlaps(a, b):\n\n dx = a.x - b.x\n dy = a.y - b.y\n try:\n radius = a.radius + b.radius\n except AttributeError:\n radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)\n\n return dx * dx + dy * dy <= radius * radius",
"def bbox_overlaps(boxes1, boxes2):\n # Compute the areas of `boxes1` and `boxes2`.\n area1 = (boxes1[:, 2] - boxes1[:, 0] + 1) * (boxes1[:, 3] - boxes1[:, 1] + 1) # [num_boxes1]\n area2 = (boxes2[:, 2] - boxes2[:, 0] + 1) * (boxes2[:, 3] - boxes2[:, 1] + 1) # [num_boxes2]\n\n # Compute the areas of the intersections.\n intersection_h = np.maximum(\n (np.minimum(np.expand_dims(boxes1[:, 3], axis=1), boxes2[:, 3]) -\n np.maximum(np.expand_dims(boxes1[:, 1], axis=1), boxes2[:, 1]) + 1),\n 0\n ) # [num_boxes1, num_boxes2]-D\n intersection_w = np.maximum(\n (np.minimum(np.expand_dims(boxes1[:, 2], axis=1), boxes2[:, 2]) -\n np.maximum(np.expand_dims(boxes1[:, 0], axis=1), boxes2[:, 0]) + 1),\n 0\n ) # [num_boxes1, num_boxes2]-D\n intersection = intersection_h * intersection_w # [num_boxes1, num_boxes2]-D\n\n # Compute the areas of the unions.\n union = np.maximum(\n np.expand_dims(area1, 1) + area2 - intersection,\n np.finfo(float).eps\n )\n\n # Compute IOU values.\n iou = intersection / union\n\n return iou",
"def overlaps(box1, box2):\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n\n # get the coordinates of the intersection rectangle\n inter_rect_x1 = max(b1_x1, b2_x1)\n inter_rect_y1 = max(b1_y1, b2_y1)\n inter_rect_x2 = min(b1_x2, b2_x2)\n inter_rect_y2 = min(b1_y2, b2_y2)\n\n overlaps_touches: bool = inter_rect_x1 <= inter_rect_x2 and inter_rect_y1 <= inter_rect_y2\n return overlaps_touches",
"def iou_bbox(bboxes1, bboxes2):\n bboxes1 = np.array(bboxes1, np.float32)\n bboxes2 = np.array(bboxes2, np.float32)\n \n intersection_min_y = np.maximum(bboxes1[:, 0], bboxes2[:, 0])\n intersection_max_y = np.minimum(bboxes1[:, 0] + bboxes1[:, 2] - 1, bboxes2[:, 0] + bboxes2[:, 2] - 1)\n intersection_height = np.maximum(intersection_max_y - intersection_min_y + 1, np.zeros_like(bboxes1[:, 0]))\n\n intersection_min_x = np.maximum(bboxes1[:, 1], bboxes2[:, 1])\n intersection_max_x = np.minimum(bboxes1[:, 1] + bboxes1[:, 3] - 1, bboxes2[:, 1] + bboxes2[:, 3] - 1)\n intersection_width = np.maximum(intersection_max_x - intersection_min_x + 1, np.zeros_like(bboxes1[:, 1]))\n\n area_intersection = intersection_height * intersection_width\n area_first = bboxes1[:, 2] * bboxes1[:, 3]\n area_second = bboxes2[:, 2] * bboxes2[:, 3]\n area_union = area_first + area_second - area_intersection\n \n iou = area_intersection * 1.0 / area_union\n iof = area_intersection * 1.0 / area_first\n ios = area_intersection * 1.0 / area_second\n\n return iou, iof, ios",
"def test_bounding_box_intersection(self):\n\n west_java = [105, -7, 108, -5]\n jakarta = [106.5, -6.5, 107, -6]\n\n # Test commutative law\n assert numpy.allclose(bbox_intersection(west_java, jakarta),\n bbox_intersection(jakarta, west_java))\n\n # Test inclusion\n assert numpy.allclose(bbox_intersection(west_java, jakarta), jakarta)\n\n # Ignore Bounding Boxes that are None\n assert numpy.allclose(bbox_intersection(west_java, jakarta, None),\n jakarta)\n\n # Realistic ones\n bbox1 = [94.972335, -11.009721, 141.014, 6.073612333333]\n bbox2 = [105.3, -8.5, 110.0, -5.5]\n bbox3 = [105.6, -7.8, 110.5, -5.1]\n\n ref1 = [max(bbox1[0], bbox2[0]),\n max(bbox1[1], bbox2[1]),\n min(bbox1[2], bbox2[2]),\n min(bbox1[3], bbox2[3])]\n assert numpy.allclose(bbox_intersection(bbox1, bbox2), ref1)\n assert numpy.allclose(bbox_intersection(bbox1, bbox2), bbox2)\n\n ref2 = [max(bbox3[0], bbox2[0]),\n max(bbox3[1], bbox2[1]),\n min(bbox3[2], bbox2[2]),\n min(bbox3[3], bbox2[3])]\n assert numpy.allclose(bbox_intersection(bbox3, bbox2), ref2)\n assert numpy.allclose(bbox_intersection(bbox2, bbox3), ref2)\n\n # Multiple boxes\n assert numpy.allclose(bbox_intersection(bbox1, bbox2, bbox3),\n bbox_intersection(ref1, ref2))\n\n assert numpy.allclose(bbox_intersection(bbox1, bbox2, bbox3,\n west_java, jakarta),\n jakarta)\n\n # From actual example\n b1 = [94.972335000000001, -11.009721000000001,\n 141.014002, 6.0736119999999998]\n b2 = (95.059660952000002, -10.997409961000001,\n 141.00132578099999, 5.9109226959999983)\n b3 = (94.972335000000001, -11.009721000000001,\n 141.0140016666665, 6.0736123333332639)\n\n res = bbox_intersection(b1, b2, b3)\n assert numpy.allclose(res, [95.059660952, -10.997409961,\n 141.001325781, 5.910922695999998],\n rtol=1.0e-12, atol=1.0e-12)\n\n # Empty intersection should return None\n assert bbox_intersection(bbox2, [50, 2, 53, 4]) is None\n\n # Deal with invalid boxes\n try:\n bbox_intersection(bbox1, [53, 2, 40, 4])\n except BoundingBoxError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1, [50, 7, 53, 4])\n except BoundingBoxError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1, 'blko ho skrle')\n except BoundingBoxError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection(bbox1)\n except VerificationError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection('')\n except VerificationError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)\n\n try:\n bbox_intersection()\n except VerificationError:\n pass\n else:\n msg = 'Should have raised exception'\n raise Exception(msg)",
"def intersects_segment(\n self, a: Tuple[float, float], b: Tuple[float, float]\n ) -> bool:\n assert len(a) == 2\n assert len(b) == 2\n return bool(lib.cpBBIntersectsSegment(self, a, b))",
"def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):\n\n assert mode in ['iou', 'iof']\n\n rows = bboxes1.size(0)\n cols = bboxes2.size(0)\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)\n\n if is_aligned:\n lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]\n rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]\n\n wh = (rb - lt + 1).clamp(min=0) # [rows, 2]\n overlap = wh[:, 0] * wh[:, 1]\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\n\n if mode == 'iou':\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\n ious = overlap / (area1 + area2 - overlap)\n else:\n ious = overlap / area1\n else:\n # not aligned boxes are usually used for assigners,\n # because assigners need to know all the overlaps between every bbox and every gtbox\n \n # none add a new axis ag: [3,4] => [3, 1, 4] \n # torch.max will broadcast: 3 1 4, 5 4 => 3 5 4\n lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]\n rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]\n\n wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]\n overlap = wh[:, :, 0] * wh[:, :, 1]\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\n\n if mode == 'iou':\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\n ious = overlap / (area1[:, None] + area2 - overlap)\n else:\n ious = overlap / (area1[:, None])\n\n return ious",
"def bbox_overlaps(boxes, query_boxes):\n n_ = boxes.shape[0]\n k_ = query_boxes.shape[0]\n overlaps = np.zeros((n_, k_), dtype=np.float)\n for k in range(k_):\n query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)\n for n in range(n_):\n iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1\n if iw > 0:\n ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1\n if ih > 0:\n box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)\n all_area = float(box_area + query_box_area - iw * ih)\n overlaps[n, k] = iw * ih / all_area\n return overlaps",
"def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False):\r\n\r\n assert mode in ['iou', 'iof']\r\n\r\n if isinstance(bboxes1, np.ndarray):\r\n bboxes1 = torch.from_numpy(bboxes1.copy())\r\n if isinstance(bboxes2, np.ndarray):\r\n bboxes2 = torch.from_numpy(bboxes2.copy())\r\n\r\n rows = bboxes1.size(0)\r\n cols = bboxes2.size(0)\r\n if is_aligned:\r\n assert rows == cols\r\n\r\n if rows * cols == 0:\r\n return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols)\r\n\r\n if is_aligned:\r\n lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2]\r\n rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2]\r\n\r\n wh = (rb - lt + 1).clamp(min=0) # [rows, 2]\r\n overlap = wh[:, 0] * wh[:, 1]\r\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\r\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\r\n\r\n if mode == 'iou':\r\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\r\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\r\n ious = overlap / (area1 + area2 - overlap)\r\n else:\r\n ious = overlap / area1\r\n else:\r\n lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2]\r\n rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2]\r\n\r\n wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2]\r\n overlap = wh[:, :, 0] * wh[:, :, 1]\r\n area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\r\n bboxes1[:, 3] - bboxes1[:, 1] + 1)\r\n\r\n if mode == 'iou':\r\n area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\r\n bboxes2[:, 3] - bboxes2[:, 1] + 1)\r\n ious = overlap / (area1[:, None] + area2 - overlap)\r\n else:\r\n ious = overlap / (area1[:, None])\r\n\r\n return ious",
"def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True",
"def overlap(a: Pos, b: Pos, exact: bool = False) -> bool:\n if a == b:\n return True\n elif exact:\n return False\n s0, e0 = a\n s1, e1 = b\n if in_interval(s1, s0, e0):\n return True\n if in_interval(e1, s0, e0):\n return True\n if in_interval(s0, s1, e1):\n return True\n if in_interval(e0, s1, e1):\n return True\n return False"
] | [
"0.7563125",
"0.72570115",
"0.710988",
"0.701064",
"0.6934993",
"0.677594",
"0.6731343",
"0.66541207",
"0.66268736",
"0.66024",
"0.659357",
"0.65671766",
"0.65535945",
"0.65387017",
"0.6467221",
"0.64572394",
"0.64162743",
"0.6390569",
"0.63497204",
"0.631824",
"0.63000226",
"0.6252903",
"0.622882",
"0.62243235",
"0.6214763",
"0.6192003",
"0.61771154",
"0.61616576",
"0.6107477",
"0.6105982"
] | 0.85588765 | 0 |
Takes a degree value deg and returns the equivalent value in radians. | def _deg_to_rad(deg):
return deg * math.pi / 180 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def degree_to_radians(degree):\n return degree * pi / 180",
"def deg2rad(deg: float) -> float:\n return deg * 3.14 / 180.0",
"def deg2rad(deg):\n return deg * sp.pi / 180",
"def deg_to_rad(degrees):\n degrees = Decimal(unicode(degrees))\n radians = degrees * TWO_PI / 360\n radians = radians % (TWO_PI)\n return radians",
"def degrees_to_radians(degrees):\n return degrees * math.pi / 180",
"def degrees_to_radians(value):\n return float(value)*math.pi/180.0",
"def rad_to_deg(radians):\n if isinstance(radians, (Angle,)): #Ephem doesn't output a numerical value with unicode\n radians = Decimal(float(radians))\n else:\n radians = Decimal(unicode(radians))\n degrees = radians * (360/TWO_PI)\n degrees = degrees % (Decimal(\"360\"))\n return degrees",
"def toRadians(self,degrees):\n rad = degrees * (math.pi/180.0)\n return rad",
"def _rad_to_deg(rad):\r\n return rad * 180 / math.pi",
"def AngleDtoR(degree):\n\trad=degree*math.pi/180\n\treturn rad",
"def deg2rad(a):",
"def rad2deg(rad: float) -> float:\n return rad * 180.0 / math.pi()",
"def AngleRtoD(rad):\n\tdegreee=180*rad/math.pi \n\treturn degreee",
"def radians(degree_tensor):\n radian_tensor = degree_tensor/180 * math.pi\n return radian_tensor",
"def radians(self) -> float:\n return math.atan2(self.y, self.x)",
"def rad2deg(a):",
"def deg2rad(x):\r\n # see decorator for function body\r",
"def radians(x):\n return 0.0",
"def deg2rad(self, x):\n\t\treturn x*2*pi/360",
"def degree(x):\n return x*(180.0/math.pi)",
"def rad2deg(x):\r\n # see decorator for function body\r",
"def radians(self):\n self._setDegreesPerAU(2*math.pi)",
"def degrees(rad_angle) :\r\n if rad_angle is None :\r\n return None\r\n angle = rad_angle * 180 / math.pi\r\n while angle > 180 :\r\n angle = angle - 360\r\n while angle < -180 :\r\n angle = angle + 360\r\n return angle",
"def gon2rad(gon):\n return radians(gon2dec(gon))",
"def degrees(rad_angle) :\n angle = rad_angle * 180 / math.pi\n #Note this assume the radians angle is positive as that's what MMTK does\n while angle > 180 :\n angle = angle - 360\n return angle",
"def degrees(self) -> float:\n return math.degrees(self.radians)",
"def rads_to_degrees(angle_in_radians):\n \n angle_in_degrees = angle_in_radians*180/np.pi\n return(angle_in_degrees)",
"def radians(image):\n\n return ee.Image(image).toFloat().multiply(3.1415927).divide(180)",
"def construct_angle_degrees(loader, node):\n value = loader.construct_scalar(node)\n exprvalue = value\n if exprvalue.startswith(\"deg(\"):\n exprvalue = exprvalue.strip()[4:-1]\n try:\n return float(exprvalue) * math.pi / 180.0\n except ValueError:\n raise RosParamException(\"invalid degree value: %s\"%value)",
"def theta_deg(self):\n return self.theta * 180 / np.pi"
] | [
"0.83952487",
"0.8153732",
"0.8061395",
"0.77332145",
"0.7705545",
"0.7689395",
"0.7613848",
"0.75884396",
"0.7573327",
"0.7562161",
"0.75616604",
"0.751927",
"0.7456573",
"0.7348964",
"0.7297646",
"0.72903407",
"0.7284143",
"0.7225561",
"0.72026175",
"0.71694857",
"0.7099037",
"0.7062642",
"0.70370495",
"0.6893328",
"0.6883625",
"0.6871864",
"0.6789824",
"0.6726548",
"0.6691671",
"0.6630393"
] | 0.8162636 | 1 |
Takes a radian value rad and returns the equivalent value in degrees. | def _rad_to_deg(rad):
return rad * 180 / math.pi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rad2deg(rad: float) -> float:\n return rad * 180.0 / math.pi()",
"def AngleRtoD(rad):\n\tdegreee=180*rad/math.pi \n\treturn degreee",
"def degrees(rad_angle) :\n angle = rad_angle * 180 / math.pi\n #Note this assume the radians angle is positive as that's what MMTK does\n while angle > 180 :\n angle = angle - 360\n return angle",
"def degrees(rad_angle) :\r\n if rad_angle is None :\r\n return None\r\n angle = rad_angle * 180 / math.pi\r\n while angle > 180 :\r\n angle = angle - 360\r\n while angle < -180 :\r\n angle = angle + 360\r\n return angle",
"def rad_to_deg(radians):\n if isinstance(radians, (Angle,)): #Ephem doesn't output a numerical value with unicode\n radians = Decimal(float(radians))\n else:\n radians = Decimal(unicode(radians))\n degrees = radians * (360/TWO_PI)\n degrees = degrees % (Decimal(\"360\"))\n return degrees",
"def rad2deg(a):",
"def deg2rad(a):",
"def deg_to_rad(degrees):\n degrees = Decimal(unicode(degrees))\n radians = degrees * TWO_PI / 360\n radians = radians % (TWO_PI)\n return radians",
"def deg2rad(deg: float) -> float:\n return deg * 3.14 / 180.0",
"def AngleDtoR(degree):\n\trad=degree*math.pi/180\n\treturn rad",
"def _deg_to_rad(deg):\r\n return deg * math.pi / 180",
"def deg2rad(deg):\n return deg * sp.pi / 180",
"def degree_to_radians(degree):\n return degree * pi / 180",
"def deg2rad(self, x):\n\t\treturn x*2*pi/360",
"def degrees_to_radians(value):\n return float(value)*math.pi/180.0",
"def rad2deg(x):\r\n # see decorator for function body\r",
"def degrees_to_radians(degrees):\n return degrees * math.pi / 180",
"def toRadians(self,degrees):\n rad = degrees * (math.pi/180.0)\n return rad",
"def deg2rad(x):\r\n # see decorator for function body\r",
"def rads_to_degrees(angle_in_radians):\n \n angle_in_degrees = angle_in_radians*180/np.pi\n return(angle_in_degrees)",
"def gon2rad(gon):\n return radians(gon2dec(gon))",
"def rad2deg_inplace(a):",
"def rad(x) :#en mm!\r\n return topdia(x)/2.0",
"def radians(degree_tensor):\n radian_tensor = degree_tensor/180 * math.pi\n return radian_tensor",
"def degrees(self) -> float:\n return math.degrees(self.radians)",
"def meters_to_decimal_degrees(value):\n return value * 360.0 / EARTH_RADIUS",
"def rad(area) :\n return sqrt(area/pi)",
"def radians(x):\n return 0.0",
"def mas2rad(x):\n return x * 4.8481368110953599e-09",
"def rad2mas(rad):\n mas = rad * (3600.0 * 180 / np.pi) * 10.0 ** 3\n return mas"
] | [
"0.8273594",
"0.8022791",
"0.79581714",
"0.7873671",
"0.76528996",
"0.7592591",
"0.74427694",
"0.735636",
"0.72945213",
"0.7291209",
"0.72846985",
"0.7222064",
"0.7218808",
"0.70348",
"0.6971813",
"0.6958033",
"0.6890582",
"0.6882272",
"0.68188",
"0.6781663",
"0.6751045",
"0.6735107",
"0.6724798",
"0.66596484",
"0.6635116",
"0.66225743",
"0.6585816",
"0.6423228",
"0.63823724",
"0.63735384"
] | 0.8178221 | 1 |
Takes a polynomial array poly_ary and a longitude value lon and returns the angle in radians at that longitude. | def _angle_from_slope(poly_ary, lon):
# find the first derivative
poly = polyder(poly_ary, 1)
# get the slope at our point of interest
slope = polyval(poly, lon)
# get the angle from the slope
angle = math.atan(slope)
return angle # in radians
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def angle(z, deg=0):\n if deg:\n fact = 180/pi\n else:\n fact = 1.0\n z = asarray(z)\n if (issubclass(z.dtype.type, _nx.complexfloating)):\n zimag = z.imag\n zreal = z.real\n else:\n zimag = 0\n zreal = z\n return arctan2(zimag, zreal) * fact",
"def angle_to(self, latlng):\n y_node, x_node = latlng.lat, latlng.lng\n y_self, x_self = self.lat, self.lng\n return atan2(y_node - y_self, x_node - x_self)",
"def angle(l, m, n):\n q = round(m ** 2 + n ** 2 - l ** 2, 2)\n r = round(2 * m * n, 2)\n return math.acos(q / r)",
"def angle(z):",
"def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)",
"def get_angel(coordinates):\n x = coordinates[0]\n y = coordinates[1]\n\n if x == 0:\n if y < 0:\n return 0\n else:\n return math.pi\n\n if y == 0:\n if x < 0:\n return (3 * math.pi) / 2\n else:\n return math.pi / 2\n\n if x >= 0:\n if y >= 0:\n return ((math.pi / 2) + math.atan(abs(y)/abs(x)))\n else:\n return math.atan(abs(x)/abs(y))\n else:\n if y >= 0:\n return math.pi + math.atan(abs(x)/abs(y))\n else:\n return (3/2) * math.pi + math.atan(abs(y)/abs(x))",
"def polar_angle(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return atan2(y_span, x_span)",
"def lon360to180(lon):\n\tlon = np.asanyarray(lon)\n\treturn ((lon + 180.) % 360.) - 180.",
"def calcNadirAngle(ele):\n\n nadeg = np.arcsin(6378.0/26378.0 * np.cos(ele/180.*np.pi)) * 180./np.pi\n\n return nadeg",
"def getLarmorAngleMap(self):\n return np.degrees(self.theta_L_array)",
"def construct_angle_degrees(loader, node):\n value = loader.construct_scalar(node)\n exprvalue = value\n if exprvalue.startswith(\"deg(\"):\n exprvalue = exprvalue.strip()[4:-1]\n try:\n return float(exprvalue) * math.pi / 180.0\n except ValueError:\n raise RosParamException(\"invalid degree value: %s\"%value)",
"def construct_angle_radians(loader, node):\n value = loader.construct_scalar(node).strip()\n exprvalue = value.replace('pi', 'math.pi')\n if exprvalue.startswith(\"rad(\"):\n exprvalue = exprvalue[4:-1]\n try:\n return float(eval(exprvalue))\n except SyntaxError as e:\n raise RosParamException(\"invalid radian expression: %s\"%value)",
"def degree_to_radians(degree):\n return degree * pi / 180",
"def calc_angle_of_incidence(g, lat, ha, tilt, teta_z):\n # surface normal vector\n n_E = sin(tilt)*sin(teta_z)\n n_N = sin(tilt)*cos(teta_z)\n n_Z = cos(tilt)\n # solar vector\n s_E = -cos(g)*sin(ha)\n s_N = sin(g)*cos(lat) - cos(g)*sin(lat)*cos(ha)\n s_Z = cos(g)*cos(lat)*cos(ha) + sin(g)*sin(lat)\n\n # angle of incidence\n teta_B = acos(n_E*s_E + n_N*s_N + n_Z*s_Z)\n return teta_B",
"def angle(*args):\n if len(args) < 1:\n return 0.0\n elif len(args) == 1:\n return np.arctan2(args[0][1], args[0][0])\n else:\n v1 = args[0].flatten()\n v2 = args[1].flatten()\n return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))",
"def get_angle(self, angle_):\n return self.two_pi * angle_",
"def calculate_angle(opp, adjacent):\n return math.degrees(math.atan((opp / adjacent)))",
"def polar_angle(points):\n\n\tpolar_angle = []\n\n\tfor each in points:\n\t\tdy = each[1] - P0[1]\n\t\tdx = each[0] - P0[0]\n\t\tpolar_angle.append(atan2(dy, dx))\n\n\treturn polar_angle",
"def angle_in_degrees(x, y):\n return math.atan2(y, x) / math.pi * 180",
"def get_angle(pf, p0=np.array([0, 0]), pi=None):\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n\n return angle",
"def getAngle(p1, p2, p3):\n\tv1 = p1 - p2\n\tv2 = p3 - p2\n\tmag = la.norm(v1) * la.norm(v2)\n\tc = np.dot(v1, v2) / mag\n\tcross = np.cross(v1,v2)\n\ts = la.norm(cross)/mag\n\tatang = math.atan2(s,c)\n\tang = atang * 180 / math.pi\n\treturn ang",
"def scalar_earth_angle( lat1, lon1, lat2, lon2):\n theta1 = lat1 *dtor\n phi1 = lon1 *dtor\n theta2 = lat2 * dtor\n phi2 = lon2 * dtor\n p1 = numpy.vstack((cos(theta1)*cos(phi1),cos(theta1)*sin(phi1),sin( theta1))).T\n p2 = numpy.vstack((cos(theta2)*cos(phi2), cos( theta2)* sin( phi2), sin( theta2))).T\n dsq = ((p1-p2)**2).sum(-1)\n return numpy.arccos((2 -dsq)/2.)/dtor",
"def angle_2D(v):\n len_v=(v[0]**2+v[1]**2)**(0.5)\n if len_v==0:\n return 0\n ret = math.acos(v[0]/len_v)\n if v[1]<0:\n ret=6.283185307179586-ret\n return ret",
"def avl_angle(self):\n dif_height = (self.heights[5] - self.heights[7])\n dif_position = (self.positions[0][7] - self.positions[0][5])\n angle = atan(dif_height / dif_position) / 1.5 * 180 / pi\n return angle",
"def test_degree(poly_equation):\n equation = poly_equation\n A = 1e10\n degree = np.log(equation.flux(A)/equation.flux(1))/np.log(A)\n npt.assert_allclose(equation.degree(), degree)",
"def rotation2D_to_angle(R: np.array) -> float:\n return np.arctan2(R[1, 0], R[0, 0])",
"def angle(x, y, deg=False):\n rad_angle = np.arccos(np.dot(x, y)/ (norm(x)*norm(y)))\n if deg:\n return rad_angle*(180.0/np.pi)\n else:\n return rad_angle",
"def angle(self, vec, unit='rad'):\n raise NotImplementedError('angle not implemented for VectorArrays')",
"def deg2rad(a):",
"def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))"
] | [
"0.60667235",
"0.60559404",
"0.6052086",
"0.604196",
"0.6015926",
"0.5962259",
"0.5858523",
"0.5830844",
"0.58076715",
"0.5771097",
"0.5732114",
"0.57266414",
"0.5722521",
"0.5678085",
"0.56580085",
"0.5646384",
"0.5643149",
"0.5639333",
"0.5612041",
"0.5611166",
"0.55884653",
"0.5585819",
"0.55849814",
"0.55780554",
"0.5574544",
"0.5556461",
"0.55540705",
"0.5511922",
"0.5500524",
"0.548615"
] | 0.8269071 | 0 |
Takes an acute angle B and distance in kilometers perp and returns the distance perpendicular to the equator. Assumes an orbital section positive in slope. | def _slope_pos_vert_distance(B, perp):
# get the arc length of 'a' at around this latitude
a = _deg_to_rad(perp / ODL_DISTANCE)
# arclength from beam 0110 to first or last beam vertically
arclength = math.atan(math.tan(a) / math.cos(B))
# distance in km from beam 0110 to first or last vertically
beam_distance = _rad_to_deg(arclength) * ODL_DISTANCE
return beam_distance # in km
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def PerpendicularDistanceToFinish(point_b_angle: float,\n point_b: gps_pb2.Point) -> float:\n return math.cos(math.radians(point_b_angle)) * point_b.start_finish_distance",
"def _slope_neg_vert_distance(B, perp):\r\n # get the arc length of 'a' at around this latitude\r\n c = _deg_to_rad(perp / ODL_DISTANCE)\r\n # arclength from beam 0110 to first or last beam vertically\r\n arclength = math.atan(math.cos(B) * math.tan(c))\r\n # distance in km from beam 0110 to first or last vertically.\r\n beam_distance = _rad_to_deg(arclength) * ODL_DISTANCE\r\n return beam_distance # in km\r",
"def perpendicular_distance_2_angle(distance):\n angular_distance = 2. * np.arcsin(distance / 2.)\n return angular_distance",
"def mplane(B, c):\n # for Sun Mg Potential: c=1.6281689374348\n A = np.zeros(shape=4)\n s = A.shape\n phi = 0\n l = 0\n b = 0\n for i in range(s[0]):\n A[0] = (2 / 3) * B[0]\n A[1] = 0.5 * ((2 / sqrt(3)) * B[1] - A[0])\n A[2] = -A[0] - A[1]\n phi = atan(B[1] / B[0])\n b = 0.5 * sqrt(3) / sin(2 * pi / 3 - phi)\n l = sqrt(B[0] ** 2 + B[1] ** 2)\n A[3] = c * B[2] / (l * b)\n return A",
"def SolvePointBAngle(point_b, point_c) -> float:\n dist_b_c = common_lib.PointDelta(point_b, point_c)\n # cos(B) = (c² + a² - b²)/2ca https://rb.gy/pgi7zm\n a = point_b.start_finish_distance\n b = point_c.start_finish_distance\n c = dist_b_c\n return math.degrees(math.acos((c**2 + a**2 - b**2)/(2*c*a)))",
"def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d",
"def get_distance(p1, p2):\n\n deg_rad = math.pi / 180\n\n dphi = p1[1] - p2[1]\n phim = 0.5 * (p1[1] + p2[1])\n dlam = p1[0] - p2[0]\n\n k1 = (111.13209 - 0.56605 * math.cos(2 * phim * deg_rad) + 0.00120 * \n math.cos(4 * phim * deg_rad))\n k2 = (111.41513 * math.cos(phim * deg_rad) - 0.09455 * \n math.cos(3 *phim * deg_rad) + 0.0012 * math.cos(5 * phim * deg_rad))\n\n return numpy.sqrt(k1**2 * dphi**2 + k2**2 * dlam**2)",
"def hypot(a, b):\n\n return calculator.sqrt(calculator.sum(calculator.product(a, a), calculator.product(b, b)))\n\n raise NotImplementedError(\"Problem 3 Incomplete\")",
"def distance(point_a, point_b):\r\n a_to_b = math.hypot(point_b[0] - point_a[0], point_b[1] - point_a[1])\r\n return a_to_b",
"def theta(point_a, point_b):\r\n dx = point_b[0] - point_a[0]\r\n dy = point_b[1] - point_a[1]\r\n\r\n if abs(dx) < 1.e-6 and abs(dy) < 1.e-6:\r\n return 360\r\n else:\r\n t = dy/(abs(dx) + abs(dy))\r\n\r\n if dx < 0:\r\n t = 2 - t\r\n elif dy < 0:\r\n t += 4\r\n\r\n if t == 0:\r\n return 360\r\n\r\n return t*90",
"def calc_dist(self, p):\n p = np.array((p.x, p.y, p.z))\n return LA.norm(p - self.car_pos)",
"def lineseg_dists(p, a, b):\n\t\t# normalized tangent vectors\n\t\td_ba = b - a\n\t\td = np.divide(d_ba, (np.hypot(d_ba[:, 0], d_ba[:, 1])\n\t\t .reshape(-1, 1)))\n\n\t\t# signed parallel distance components\n\t\t# rowwise dot products of 2D vectors\n\t\ts = np.multiply(a - p, d).sum(axis=1)\n\t\tt = np.multiply(p - b, d).sum(axis=1)\n\n\t\t# clamped parallel distance\n\t\th = np.maximum.reduce([s, t, np.zeros(len(s))])\n\n\t\t# perpendicular distance component\n\t\t# rowwise cross products of 2D vectors\n\t\td_pa = p - a\n\t\tc = d_pa[:, 0] * d[:, 1] - d_pa[:, 1] * d[:, 0]\n\n\t\tprint(\"C value:\", c)\n\t\treturn np.hypot(h, c)",
"def proper_motion2vperpendicular(self, distance=\"distance\", pm_long=\"pm_l\", pm_lat=\"pm_b\",\n vl=\"vl\", vb=\"vb\",\n propagate_uncertainties=False,\n radians=False,\n inplace=True):\n df = self.df if inplace else self.df.copy()\n k = 4.74057\n df.add_variable(\"k\", k, overwrite=False)\n df.add_virtual_column(vl, \"k*{pm_long}*{distance}\".format(**locals()))\n df.add_virtual_column(vb, \"k* {pm_lat}*{distance}\".format(**locals()))\n if propagate_uncertainties:\n df.propagate_uncertainties([df[vl], df[vb]])\n return df",
"def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)",
"def vincenty_direct_solution(begin_lat, begin_lon, begin_azimuth, distance, a, b, f):\n # Convert latitude, longitude, azimuth of the begining point to radians\n lat1 = math.radians(begin_lat)\n lon1 = math.radians(begin_lon)\n alfa1 = math.radians(begin_azimuth)\n\n sinAlfa1 = math.sin(alfa1)\n cosAlfa1 = math.cos(alfa1)\n\n # U1 - reduced latitude\n tanU1 = (1 - f) * math.tan(lat1)\n cosU1 = 1 / math.sqrt(1 + tanU1 * tanU1)\n sinU1 = tanU1 * cosU1\n\n # sigma1 - angular distance on the sphere from the equator to begining point\n sigma1 = math.atan2(tanU1, math.cos(alfa1))\n\n # sinAlfa - azimuth of the geodesic at the equator\n sinAlfa = cosU1 * sinAlfa1\n cosSqAlfa = 1 - sinAlfa * sinAlfa\n uSq = cosSqAlfa * (a * a - b * b) / (b * b)\n A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))\n B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))\n\n sigma = distance / (b * A)\n sigmap = 1\n\n while (math.fabs(sigma - sigmap) > 1e-12):\n cos2sigmaM = math.cos(2 * sigma1 + sigma)\n sinSigma = math.sin(sigma)\n cosSigma = math.cos(sigma)\n dSigma = B * sinSigma * (cos2sigmaM + B / 4 * (\n cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM) - B / 6 * cos2sigmaM * (\n -3 + 4 * sinSigma * sinSigma) * (-3 + 4 * cos2sigmaM * cos2sigmaM)))\n sigmap = sigma\n sigma = distance / (b * A) + dSigma\n\n var_aux = sinU1 * sinSigma - cosU1 * cosSigma * cosAlfa1 # Auxiliary variable\n\n # Latitude of the end point in radians\n lat2 = math.atan2(sinU1 * cosSigma + cosU1 * sinSigma * cosAlfa1,\n (1 - f) * math.sqrt(sinAlfa * sinAlfa + var_aux * var_aux))\n\n lamb = math.atan2(sinSigma * sinAlfa1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlfa1)\n C = f / 16 * cosSqAlfa * (4 + f * (4 - 3 * cosSqAlfa))\n L = lamb - (1 - C) * f * sinAlfa * (\n sigma + C * sinSigma * (cos2sigmaM + C * cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM)))\n # Longitude of the second point in radians\n lon2 = (lon1 + L + 3 * math.pi) % (2 * math.pi) - math.pi\n\n # Convert to decimal degrees\n lat2_dd = math.degrees(lat2)\n lon2_dd = math.degrees(lon2)\n\n return lat2_dd, lon2_dd",
"def vincenty_direct_solution(begin_lat, begin_lon, begin_azimuth, distance, a, b, f):\n # Convert latitude, longitude, azimuth of the begining point to radians\n lat1 = math.radians(begin_lat)\n lon1 = math.radians(begin_lon)\n alfa1 = math.radians(begin_azimuth)\n\n sinAlfa1 = math.sin(alfa1)\n cosAlfa1 = math.cos(alfa1)\n \n # U1 - reduced latitude\n tanU1 = (1 - f) * math.tan(lat1)\n cosU1 = 1 / math.sqrt(1 + tanU1 * tanU1)\n sinU1 = tanU1 * cosU1\n \n # sigma1 - angular distance on the sphere from the equator to begining point\n sigma1 = math.atan2(tanU1, math.cos(alfa1))\n \n # sinAlfa - azimuth of the geodesic at the equator\n sinAlfa = cosU1 * sinAlfa1\n cosSqAlfa = 1 - sinAlfa * sinAlfa\n uSq = cosSqAlfa * (a * a - b * b) / (b * b)\n A = 1 + uSq/16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))\n B = uSq/1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))\n \n sigma = distance / (b * A)\n sigmap = 1\n \n while (math.fabs(sigma - sigmap) > 1e-12):\n cos2sigmaM = math.cos(2 * sigma1 + sigma)\n sinSigma = math.sin(sigma)\n cosSigma = math.cos(sigma)\n dSigma = B*sinSigma*(cos2sigmaM+B/4*(cosSigma*(-1+2*cos2sigmaM*cos2sigmaM)-B/6*cos2sigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2sigmaM*cos2sigmaM))) \n sigmap = sigma\n sigma = distance / (b * A) + dSigma\n \n var_aux = sinU1 * sinSigma - cosU1 * cosSigma * cosAlfa1\n \n # Latitude of the end point in radians\n lat2 = math.atan2(sinU1 * cosSigma + cosU1 * sinSigma*cosAlfa1, (1 - f)*math.sqrt(sinAlfa * sinAlfa + var_aux*var_aux))\n \n lamb = math.atan2 (sinSigma * sinAlfa1, cosU1 * cosSigma - sinU1 * sinSigma * cosAlfa1)\n C = f / 16 * cosSqAlfa * (4 + f * (4 - 3 * cosSqAlfa))\n L = lamb - (1 - C) * f * sinAlfa *(sigma + C * sinSigma * (cos2sigmaM + C * cosSigma * (-1 + 2 * cos2sigmaM * cos2sigmaM)))\n # Longitude of the second point in radians\n lon2 = (lon1 + L +3*math.pi)%(2*math.pi) - math.pi\n \n # Convert to decimal degrees\n lat2_dd = math.degrees(lat2) \n lon2_dd = math.degrees(lon2)\n \n return lat2_dd, lon2_dd",
"def distance(pa, pb):\n return hypot(pa._x - pb._x, pa._y - pb._y, pa._z - pb._z)",
"def ellipse_dist_ratio(self, theta, lwr):\n\n #clear form of the code\n \"\"\"\n def focal_distance(theta, lwr):\n a = lwr\n b = 1.0\n\n #eccentricity\n # e = math.sqrt( (a**2 - b**2)/a**2)\n e = math.sqrt( (a**2 - 1.0) / a**2)\n\n dist_on_angle = a * (1.0 - e**2) / (1.0 - e*math.cos(theta))\n\n #when theta = 0, the dist formula becomes:\n #dist_forward = a * (1.0 - e**2) / (1.0 - e*math.cos(0))\n #dist_forward = a * (1.0 - e**2) / (1.0 - e*1)\n dist_forward = a * (1.0 - e**2) / (1.0 - e)\n\n return dist_on_angle / dist_forward\n\n \"\"\"\n #TESTED 3-10-16: This function evaluates identically to the above\n e = math.sqrt( (lwr**2 - 1.0) / lwr**2)\n e2 = (1.0 - e**2)\n dist_on_angle = lwr * e2 / (1.0 - e*math.cos(theta))\n dist_forward = lwr * e2 / (1.0 - e)\n\n return dist_on_angle / dist_forward",
"def phi(cylindrical_x: sc.Variable, cylindrical_y: sc.Variable) -> sc.Variable:\n return sc.atan2(y=cylindrical_y, x=cylindrical_x)",
"def theta(a, b):\n \n \n def norm_vec(x):\n norm_out = sqrt(dot(x, x))\n return norm_out\n \n theta = acos(dot(a, b) / (norm_vec(a) * norm_vec(b))) * 180 / pi\n \n print theta",
"def offset_by(lon, lat, posang, distance):\n\n # Calculations are done using the spherical trigonometry sine and cosine rules\n # of the triangle A at North Pole, B at starting point, C at final point\n # with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)\n # with sides a (distance), b (final co-latitude), c (starting colatitude)\n # B, a, c are knowns; A and b are unknowns\n # https://en.wikipedia.org/wiki/Spherical_trigonometry\n\n cos_a = np.cos(distance)\n sin_a = np.sin(distance)\n cos_c = np.sin(lat)\n sin_c = np.cos(lat)\n cos_B = np.cos(posang)\n sin_B = np.sin(posang)\n\n # cosine rule: Know two sides: a,c and included angle: B; get unknown side b\n cos_b = cos_c * cos_a + sin_c * sin_a * cos_B\n # sin_b = np.sqrt(1 - cos_b**2)\n # sine rule and cosine rule for A (using both lets arctan2 pick quadrant).\n # multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors\n # at poles. Correct for the x=0 multiplication a few lines down.\n # sin_A/sin_a == sin_B/sin_b # Sine rule\n xsin_A = sin_a * sin_B * sin_c\n # cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule\n xcos_A = cos_a - cos_b * cos_c\n\n A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)\n # Treat the poles as if they are infinitesimally far from pole but at given lon\n small_sin_c = sin_c < 1e-12\n if small_sin_c.any():\n # For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang\n A_pole = (90*u.deg + cos_c*(90*u.deg-Angle(posang, u.radian))).to(u.rad)\n if A.shape:\n # broadcast to ensure the shape is like that of A, which is also\n # affected by the (possible) shapes of lat, posang, and distance.\n small_sin_c = np.broadcast_to(small_sin_c, A.shape)\n A[small_sin_c] = A_pole[small_sin_c]\n else:\n A = A_pole\n\n outlon = (Angle(lon, u.radian) + A).wrap_at(360.0*u.deg).to(u.deg)\n outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)\n\n return outlon, outlat",
"def toBarycentric(self, p: Vec3) -> Vec3:\n abc = triangleArea(self.a.position, self.b.position, self.c.position)\n pbc = triangleArea(p, self.b.position, self.c.position)\n apc = triangleArea(self.a.position, p, self.c.position)\n\n if abc == 0.0:\n return Vec3(0, 0, 0)\n\n x = pbc / abc\n y = apc / abc\n return Vec3(x, y, 1.0 - x - y)",
"def eDouble(P): #adding P + P by using a tangent line\r\n R = point(0, 0, P.c)\r\n i = ( (3 * P.x ** 2) + P.c.a) #the slope equation (i/j)\r\n j = (2 * P.y)\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( (s ** 2) - 2 * P.x) % P.c.p\r\n R.y = (-P.y + s * (P.x - R.x) ) % P.c.p\r\n return R",
"def calc_point_direction_angle(point_a, point_b):\n return direction_diff(point_a[2], point_b[2])",
"def perpendicular_bisector(point_1, point_2):\r\n A = 2 * (point_2.x - point_1.x)\r\n B = 2 * (point_2.y - point_1.y)\r\n C = (point_1.y - point_2.y) * (point_1.y + point_2.y) + \\\r\n (point_1.x - point_2.x) * (point_1.x + point_2.x)\r\n return np.matrix([[A],[B],[C]])",
"def hypot(a, b):\n return math.sqrt(a**2 + b**2)",
"def get_vector(a, b):\n dx = float(b[0] - a[0])\n dy = float(b[1] - a[1])\n\n distance = math.sqrt(dx ** 2 + dy ** 2)\n\n if dy > 0:\n angle = math.degrees(math.atan(-dx / dy))\n elif dy == 0:\n if dx < 0:\n angle = 90.0\n elif dx > 0:\n angle = -90.0\n else:\n angle = 0.0\n else:\n if dx < 0:\n angle = 180 - math.degrees(math.atan(dx / dy))\n elif dx > 0:\n angle = -180 - math.degrees(math.atan(dx / dy))\n else:\n angle = 180.0\n\n return distance, angle",
"def perpendicularTo(self, vector):\n perpendicular = self.subtractVector(self.parallelTo(vector))\n return perpendicular",
"def parang (hourangle, declination, latitude):\n\n return -np.arctan2 (-np.sin (hourangle),\n np.cos (declination) * np.tan (latitude)\n - np.sin (declination) * np.cos (hourangle))",
"def dist(self, p):\n return math.sqrt((p.x - self.x)**2 + (p.y - self.y)**2)"
] | [
"0.7223186",
"0.67502093",
"0.66600615",
"0.6265598",
"0.6012678",
"0.59357464",
"0.5722671",
"0.56781554",
"0.56774014",
"0.5673714",
"0.5669494",
"0.56690276",
"0.5657861",
"0.56260705",
"0.5611562",
"0.5574001",
"0.5554485",
"0.55370617",
"0.5533862",
"0.5531555",
"0.55132526",
"0.54960245",
"0.54833317",
"0.5476788",
"0.54676396",
"0.5461376",
"0.54593766",
"0.5457505",
"0.5453157",
"0.5447044"
] | 0.6905677 | 1 |
Takes a polynomial array poly_ary, starting longitude lon_min, ending longitude lon_max, and error distance max_error and returns a nested array representing a bounding box for a GEDI swath extent. Return value is in ll, ur format [[lon, lat], [lon, lat]]. | def poly_bbox(poly_ary, lon_min, lon_max, max_error):
# Angle 'B' is also theta
B_lon_min = _angle_from_slope(poly_ary, lon_min)
B_lon_max = _angle_from_slope(poly_ary, lon_max)
# Slope assumptions:
# 1) will never be 0 at orbit major maximum or minimum
# 2) will never change sign because of #1
# Therefore, slopes are assumed continuously increasing or decreasing
slope_is_positive = True if B_lon_min > 0 else False
if slope_is_positive:
# lat min will be at min lon
lat_min_ = polyval(poly_ary, lon_min)
# lat max will be at max lon
lat_max_ = polyval(poly_ary, lon_max)
# find the latitudinal distance using rules for right spherical triangles
distance_below = _slope_pos_vert_distance(B_lon_min, PERPENDICULAR_BELOW_DISTANCE)
distance_above = _slope_pos_vert_distance(B_lon_max, PERPENDICULAR_ABOVE_DISTANCE)
else: # slope is negative
# lat min will be at max lon
lat_min_ = polyval(poly_ary, lon_max)
# lat max will be at min lon
lat_max_ = polyval(poly_ary, lon_min)
# find the latitudinal distance using rules for right spherical triangles
distance_below = _slope_neg_vert_distance(B_lon_min, PERPENDICULAR_BELOW_DISTANCE)
distance_above = _slope_neg_vert_distance(B_lon_max, PERPENDICULAR_ABOVE_DISTANCE)
# subtract distance from 0110 beam to 0111 beam
lat_min = lat_min_ - ((distance_below * STATIC_MULT) - max_error) / ODL_DISTANCE
# add distance from 0110 beam to 0000 beam
lat_max = lat_max_ + ((distance_above * STATIC_MULT) + max_error) / ODL_DISTANCE
# bbox format of [ll, ur]
bbox = np.array([[lon_min, lat_min], [lon_max, lat_max]])
return bbox | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bounding_box(self, poly=None):\n\n use_poly = poly if poly else self.res_poly\n\n # TODO: Test to comply with future values.\n # Updates the bounds\n if self.bounds_changed:\n # Gets the minimum and maximum value of each bounds.\n self.xmin = float('inf')\n self.ymin = float('inf')\n self.xmax = float('-inf')\n self.ymax = float('-inf')\n\n for points in use_poly:\n x = points[0] - self.x\n y = points[1] - self.y\n\n if x < self.xmin:\n self.xmin = x\n if x > self.xmax:\n self.xmax = x\n if y < self.ymin:\n self.ymin = y\n if y > self.ymax:\n self.ymax = y\n\n # Set bounds changed to be false\n self.bounds_changed = False\n \n return [self.xmin + self.x,\n self.ymin + self.y,\n self.xmax + self.x,\n self.ymax + self.y]",
"def polybbox(a):\n if len(a) == 0:\n return False\n elif len(a) == 1:\n return pointbbox(a[0])\n else:\n minx = maxx = a[0][0]\n miny = maxy = a[0][1]\n for i in range(1,len(a)):\n x=a[i][0]\n y=a[i][1]\n if x < minx:\n minx =x\n elif x > maxx:\n maxx = x\n if y < miny:\n miny = y\n elif y > maxy:\n maxy = y\n return [ point(minx,miny),point(maxx,maxy)]",
"def bounding_box(polyreg):\n if polyreg.bbox is not None:\n return polyreg.bbox\n # For regions, calculate recursively for each\n # convex polytope and take maximum\n if isinstance(polyreg, Region):\n lenP = len(polyreg)\n dimP = polyreg.dim\n alllower = np.zeros([lenP, dimP])\n allupper = np.zeros([lenP, dimP])\n # collect lower and upper bounds\n for ii in range(lenP):\n bbox = polyreg.list_poly[ii].bounding_box\n ll, uu = bbox\n alllower[ii, :] = ll.T\n allupper[ii, :] = uu.T\n l = np.zeros([dimP, 1])\n u = np.zeros([dimP, 1])\n # compute endpoints\n for ii in range(dimP):\n l[ii] = min(alllower[:, ii])\n u[ii] = max(allupper[:, ii])\n polyreg.bbox = l, u\n return l, u\n # For a single convex polytope, solve an optimization problem\n (m, n) = np.shape(polyreg.A)\n In = np.eye(n)\n l = np.zeros([n, 1])\n u = np.zeros([n, 1])\n # lower corner\n for i in range(n):\n c = np.array(In[:, i])\n G = polyreg.A\n h = polyreg.b\n sol = lpsolve(c, G, h)\n if sol['status'] == 0:\n x = sol['x']\n l[i] = x[i]\n else:\n raise RuntimeError((\n '`polytope.solvers.lpsolve` returned: {v}\\n'\n 'its docstring describes return values'\n ).format(\n v=sol))\n # upper corner\n for i in range(n):\n c = np.negative(np.array(In[:, i]))\n G = polyreg.A\n h = polyreg.b\n sol = lpsolve(c, G, h)\n if sol['status'] == 0:\n x = sol['x']\n u[i] = x[i]\n else:\n raise RuntimeError((\n '`polytope.solvers.lpsolve` returned: {v}\\n'\n 'its docstring describes return values'\n ).format(\n v=sol))\n polyreg.bbox = l, u\n return l, u",
"def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))",
"def _polygons_to_bboxes(polygons):\n# Build bounding boxes\n bboxes = np.empty([len(polygons), 4])\n for n, p in enumerate(polygons):\n try:\n left, bottom = np.min(p, axis = 0)\n except:\n import pdb\n pdb.set_trace()\n right, top = np.max(p, axis = 0)\n bboxes[n] = [left, bottom, right, top]\n return bboxes",
"def bisect_rectange(numSplits, minlat, minlong, maxlat, maxlong):\n #initialize function variables\n longpoints = []\n latpoints = []\n extents = []\n\n #Get a list of the split lat/long locations in the rectangle\n for i in range(numSplits+1):\n latpoints.append( (minlat + ((maxlat-minlat)/numSplits)*i) )\n longpoints.append( (minlong + ((maxlong-minlong)/numSplits)*i) )\n\n #Loop through the line locations and create a list of sub-rectangles\n for latindex, latmin in enumerate(latpoints):\n for longindex, longmin in enumerate(longpoints):\n if latindex<(len(latpoints)-1) and longindex<(len(longpoints)-1):\n newextent = [latmin, longmin, latpoints[latindex+1], longpoints[longindex+1]]\n extents.append(newextent)\n return extents",
"def get_bounds(shape, affine):\n adim, bdim, cdim = shape\n adim -= 1\n bdim -= 1\n cdim -= 1\n # form a collection of vectors for each 8 corners of the box\n box = np.array([[0., 0, 0, 1],\n [adim, 0, 0, 1],\n [0, bdim, 0, 1],\n [0, 0, cdim, 1],\n [adim, bdim, 0, 1],\n [adim, 0, cdim, 1],\n [0, bdim, cdim, 1],\n [adim, bdim, cdim, 1]]).T\n box = np.dot(affine, box)[:3]\n return zip(box.min(axis=-1), box.max(axis=-1))",
"def get_bbox_from_geojson(geojson_dictionary):\n geojson_dictionary = json.loads(geojson_dictionary)\n all_lats = []\n all_lngs = []\n\n feature = geojson_dictionary[\"features\"][0]\n coordinates = feature[\"geometry\"][\"coordinates\"][0]\n\n for coord in coordinates:\n\n # [lng, lat]\n lng = coord[0]\n lat = coord[1]\n all_lats.append(lat)\n all_lngs.append(lng)\n\n max_lat = max(all_lats) # north\n min_lat = min(all_lats) # south\n\n max_lng = max(all_lngs) # east\n min_lng = min(all_lngs) # west\n\n bbox = [max_lat, min_lat, max_lng, min_lng]\n print(\"\\n\\n\\nCALCULATED bbox:\", bbox)\n return bbox",
"def get_bound(box_list):\n box_xyxy_list = []\n for box in box_list:\n box_xyxy = xywh2xyxy(box)\n box_xyxy_list.append(box_xyxy)\n\n box_xyxy_list = np.array(box_xyxy_list)\n x1max, y1max, x2max, y2max = np.amax(box_xyxy_list, axis=0)\n x1min, y1min, x2min, y2min = np.amin(box_xyxy_list, axis=0)\n\n boundbox = xyxy2xywh([x1min, y1min, x2max, y2max])\n return boundbox",
"def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)",
"def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))",
"def getPolygonBoundaries(self, polygon: Polygon):\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n polygon_input = 'POLYGON(('\n xcords, ycords = polygon_df['geometry'][0].exterior.coords.xy\n for x, y in zip(list(xcords), list(ycords)):\n polygon_input += f'{x} {y}, '\n polygon_input = polygon_input[:-2]\n polygon_input += '))'\n\n return f\"({[minx, maxx]},{[miny,maxy]})\", polygon_input",
"def _bounding_box_to_polytope(lower, upper):\n intervals = [(a[0], b[0]) for a, b in zip(lower, upper)]\n return box2poly(intervals)",
"def bounding_box(self):\n latlon00 = self.ij_to_latlon(-1,-1)\n latlon01 = self.ij_to_latlon(-1,self.domain_size[1]+1)\n latlon11 = self.ij_to_latlon(self.domain_size[0]+1,self.domain_size[1]+1)\n latlon10 = self.ij_to_latlon(self.domain_size[0]+1,-1)\n return (latlon00,latlon01,latlon11,latlon10)",
"def get_bounds(geo_data):\n return geo_data[\"geometry\"].bounds",
"def bounding_box(self, integral=False):\n box_min = []\n box_max = []\n if self.n_vertices==0:\n raise ValueError('Empty polytope is not allowed')\n for i in range(0,self.ambient_dim()):\n coords = [ v[i] for v in self.Vrep_generator() ]\n max_coord = max(coords)\n min_coord = min(coords)\n if integral:\n box_max.append(ceil(max_coord))\n box_min.append(floor(min_coord))\n else:\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))",
"def _get_polygon(areasrc):\n\n str = areasrc.geometry.wkt\n str = re.sub('POLYGON\\(\\(', '', str)\n str = re.sub('\\)\\)', '', str)\n aa = re.split('\\,', str)\n lons = []\n lats = []\n for str in aa:\n bb = re.split('\\s+', re.sub('^\\s+', '', str))\n lons.append(float(bb[0]))\n lats.append(float(bb[1]))\n return lons, lats",
"def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)",
"def linear_interpolation_of_x_y(georeferenced_array, extent_minimum,\n extent_maximum):\n # Extracts axis 1 (columns) from the input array.\n # This represents the longitude.\n if extent_minimum == x0:\n inn = georeferenced_array[0, :]\n\n # Extracts axis 0 (rows) from the input array.\n # This represents the latitude.\n elif extent_minimum == y0:\n inn = georeferenced_array[:, 0]\n\n #\n linear_interpolation = [((i-0)*(extent_maximum-extent_minimum)/(\n (len(inn)-1)-0)+extent_minimum) for i, r in enumerate(inn)]\n\n # Claculates the difference between the value in front and the value\n # behind in the list\n difference = [y - x for x, y in zip(linear_interpolation,\n linear_interpolation[1:])]\n\n # Calculates the size of each array so to compare it to the size of the\n # input array.\n array_length = [np.size(np.arange(\n extent_minimum, extent_maximum, i)) for i in difference]\n\n # Select values that only match the longitude/latitude length then return\n # the first index in the list of matched values.\n # This list is a list of indexes that correspond to the index in the\n # variable difference.\n index_of_correct_value = [i for i, v in enumerate(\n array_length) if v == len(inn)][0]\n\n #\n interpolated_coordinates = np.arange(extent_minimum,\n extent_maximum,\n difference[index_of_correct_value])\n\n return interpolated_coordinates",
"def _get_clip_loc_in_array(self):\r\n\r\n # coordinates of upperleft and lowerright points of binding box\r\n box_ulx, box_uly, box_lrx, box_lry = self.clip_box[0][0], self.clip_box[0][1], \\\r\n self.clip_box[1][0], self.clip_box[1][1]\r\n\r\n # Get the offsets that correspond to the bounding box corner coordinates.\r\n offsets_ul = gdal.ApplyGeoTransform(self.inv_gt, box_ulx, box_uly)\r\n offsets_lr = gdal.ApplyGeoTransform(self.inv_gt, box_lrx, box_lry)\r\n\r\n # The offsets are returned as floating point, but we need integers.\r\n self.off_ulx, self.off_uly = map(int, offsets_ul)\r\n self.off_lrx, self.off_lry = map(int, offsets_lr)\r\n\r\n # Compute the numbers of rows and columns to extract, based on the offsets.\r\n self.row = self.off_lry - self.off_uly\r\n self.column = self.off_lrx - self.off_ulx",
"def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]",
"def get_bounds(self):\n\n northing=self.f.variables['y']\n easting=self.f.variables['x']\n\n lat1,lon1 = utm.to_latlon(np.min(easting),np.min(northing),11,northern=True)\n lat2,lon2 = utm.to_latlon(np.max(easting),np.max(northing),11,northern=True)\n\n return (lon1,lon2,lat1,lat2)",
"def get_MultiPolyLists_xy(mpoly):\n # Get the x or y coordinates\n x = []\n y = []\n if isinstance(mpoly,Polygon):\n mpoly = [mpoly]\n for poly in mpoly: # the polygon objects return arrays, it's important they be lists or Bokeh fails\n exterior_coords_x = poly.exterior.coords.xy[0].tolist();\n interior_coords_x = []\n exterior_coords_y = poly.exterior.coords.xy[1].tolist();\n interior_coords_y = []\n\n for interior in poly.interiors:\n if isinstance(interior.coords.xy[0],list):\n interior_coords_x += [interior.coords.xy[0]];\n interior_coords_y += [interior.coords.xy[1]];\n else:\n interior_coords_x += [interior.coords.xy[0].tolist()];\n interior_coords_y += [interior.coords.xy[1].tolist()];\n x.append([exterior_coords_x, *interior_coords_x])\n y.append([exterior_coords_y, *interior_coords_y])\n return (x,y)",
"def test_geometry_collection_get_bounds():\n geojson_data = {\n \"geometries\": [\n {\n \"coordinates\": [\n [\n [-1, 1],\n [0, 2],\n [-3, 4],\n [2, 0],\n ]\n ],\n \"type\": \"Polygon\",\n },\n ],\n \"type\": \"GeometryCollection\",\n }\n assert folium.GeoJson(geojson_data).get_bounds() == [[0, -3], [4, 2]]",
"def _xywh2min_max(box):\n x, y, w, h = box\n return np.array([x, y, x+w, y+h])",
"def bounding_box(coords):\n min_x = min(coords, key = lambda p: p[0])[0]\n min_y = min(coords, key = lambda p: p[1])[1]\n max_x = max(coords, key = lambda p: p[0])[0]\n max_y = max(coords, key = lambda p: p[1])[1]\n print(min_x)\n print(min_y)\n print(max_x)\n print(max_y)\n return (min_x, max_y), (max_x, min_y)",
"def find_max_coords(self):\n all_max_bound = []\n all_min_bound = []\n shape_dict = self.shape_dict\n for zone_id in shape_dict:\n zone_shape = shape_dict[zone_id]\n max_bound_zone = zone_shape.max_bound\n min_bound_zone = zone_shape.min_bound\n all_max_bound.append(max_bound_zone)\n all_min_bound.append(min_bound_zone)\n\n map_max_bound, unused_max = Utils.calculate_boundaries(all_max_bound)\n unused_min, map_min_bound = Utils.calculate_boundaries(all_min_bound)\n\n return (map_max_bound, map_min_bound)",
"def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc",
"def _get_bounds(x, y, size):\n x = np.array(np.atleast_1d(x))\n y = np.array(np.atleast_1d(y))\n\n lower_x = np.rint(x - size[0]/2)\n lower_y = np.rint(y - size[1]/2)\n\n return np.stack((np.stack((lower_x, lower_x + size[0]), axis=1),\n np.stack((lower_y, lower_y + size[1]), axis=1)), axis=1).astype(int)",
"def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])"
] | [
"0.6630684",
"0.6445707",
"0.633211",
"0.6272101",
"0.6204608",
"0.6063664",
"0.6033086",
"0.5961396",
"0.5944326",
"0.59405506",
"0.5891103",
"0.58675516",
"0.5803532",
"0.58031636",
"0.57914704",
"0.5761674",
"0.5709238",
"0.5686564",
"0.5606224",
"0.55829823",
"0.5531209",
"0.55096114",
"0.550571",
"0.55056953",
"0.5492108",
"0.5476666",
"0.5475095",
"0.5462166",
"0.54551995",
"0.5427274"
] | 0.7609233 | 0 |
Builds a circle control. | def circle(self):
self._build_name()
if self.ctrl_name:
self.ctrl = pm.circle(n=self.ctrl_name, ch=0, o=1, nr=[1, 0, 0])[0]
self._finalize_ctrl() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def circle(self):\n return circle(self.N, self.o, self.r)",
"def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self",
"def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self",
"def create_circle(self, cx, cy, radius, style=None, parent=None):\n if parent is None:\n parent = self.current_parent\n if parent is not None:\n attrs = {'r': str(radius), 'cx': str(cx), 'cy': str(cy)}\n if style:\n attrs['style'] = style\n return etree.SubElement(parent, svgns('circle'), attrs)",
"def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)",
"def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)",
"def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)",
"def DrawCircle(self, center, radius, color, drawwidth=1):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, drawwidth)",
"def draw_circle(self, color, position, radius, width=0, anchor='topleft'):\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, (position + offset).floor(),\n radius, width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self",
"def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')",
"def circle(self, center, radius, color=(255, 255, 255), width=0):\n center = self._transform(center)\n pygame.draw.circle(self.screen, color, center, radius, width)",
"def _generate_circle_mask(center_y, center_x, radius):\n\n circle = draw.circle(center_y, center_x, radius)\n\n return circle",
"def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)",
"def showCircle(self, window, color=None, radius=None, fill=None, conversion=None):\n if not color: color = self.color\n if not radius: radius = self.radius\n if not fill: fill = self.fill\n if not conversion: conversion = self.conversion\n window.draw.circle(window.screen, color, [self.x, self.y], radius, fill, conversion)",
"def make_circle(x, y, r):\n\tnew_circle = Circle()\n\tnew_circle.x = x\n\tnew_circle.y = y\n\tnew_circle.r = r\n\treturn new_circle",
"def createCircle(self, x, y, radius):\n # TODO (#2398) fix this to be top left coordinates, width, height\n return QtCore.QRectF(\n int(x - radius), int(y - radius), int(radius * 2), int(radius * 2)\n )",
"def draw_circle(centerx, centery, radius):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n circle = Circle()\r\n circle.move(centerx, centery)\r\n circle.setRadius(radius)\r\n _set_not_filled(circle)\r\n _canvas.add(circle)",
"def __init__(self, ax, onselect, minspan=None, useblit=False, circprops=None):\n if circprops is None:\n circprops = dict(fc='w', alpha=0.5) \n \n self.ax = ax\n self.visible = True\n self.canvas = ax.figure.canvas\n self.canvas.mpl_connect('motion_notify_event', self.onmove)\n self.canvas.mpl_connect('button_press_event', self.press)\n self.canvas.mpl_connect('button_release_event', self.release)\n self.canvas.mpl_connect('draw_event', self.update_background)\n\n self.circ = None\n self.background = None\n\n self.circprops = circprops\n self.onselect = onselect\n self.useblit = useblit\n self.minspan = minspan\n\n self.circ = Circle( (0,0), 1, **self.circprops)\n \n\tself.unit_verts = [v for v in self.circ.verts]\n\tself.circ.set_visible(False)\n\n if not self.useblit: self.ax.add_patch(self.circ)\n self.pressx = None",
"def draw_neuron(self, center, radius, color):\r\n self.pen.up()\r\n self.pen.color(color)\r\n self.pen.goto(center)\r\n\r\n self.pen.setheading(0)\r\n self.pen.forward(radius)\r\n self.pen.setheading(90)\r\n\r\n # draw circle\r\n self.pen.begin_fill()\r\n self.pen.pendown()\r\n self.pen.circle(radius)\r\n self.pen.end_fill()\r\n\r\n self.pen.color('black')\r\n self.pen.up()\r\n self.pen.goto(center)\r\n self.pen.setheading(0)",
"def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)",
"def buildProfile(radius=1, spans=8):\n crv = cmds.circle(c=[0, 0, 0], nr=[0, 0, 1], sw=360, r=radius, s=spans, d=3, ch=False)\n return crv",
"def draw_circle(self, color, position, radius, width = 0, anchor= 'topleft'):\n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, position + offset, radius, width)",
"def draw_circle(self, center, radius, line_width, line_color, fill_color=\"\"):\n line_color, fill_color = check_color(line_color), check_color(fill_color)\n SToval.oval(self.canvas, center, radius, line_width, line_color, fill_color)",
"def DrawCircle(*args, **kwargs):\n return _gdi_.PseudoDC_DrawCircle(*args, **kwargs)",
"def DrawCircle(*args, **kwargs):\n return _gdi_.DC_DrawCircle(*args, **kwargs)",
"def wdraw_circle(self, wx, wy, dradius, fill, outline):\r\n dx, dy = self.w_to_d(wx, wy)\r\n self.canvas.create_oval(dx - dradius, dy - dradius, dx + dradius, dy + dradius, fill=fill, outline=outline)",
"def __init__( self , center , radius ):\r\n self.center = center\r\n self.radius = radius"
] | [
"0.70402366",
"0.67007273",
"0.67007273",
"0.6687724",
"0.6502949",
"0.6500092",
"0.64748067",
"0.64442",
"0.64442",
"0.64380133",
"0.63871896",
"0.63791484",
"0.6370651",
"0.63555884",
"0.6327178",
"0.6325744",
"0.6300299",
"0.62929326",
"0.6208691",
"0.61940867",
"0.61614597",
"0.614061",
"0.6116669",
"0.6081951",
"0.6080429",
"0.60783374",
"0.6051911",
"0.60458434",
"0.60426205",
"0.6009848"
] | 0.72939414 | 0 |
Builds the name of the control. | def _build_name(self):
self.ctrl_name = NameUtils.get_unique_name(self.asset,
self.side,
self.part,
"ctrl") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)",
"def _get_name(self):\n return '%s (%d)' % (\n self.panel_template.panel_name,\n self.implementation)",
"def _build_name(name_id):\n return \"xp_%08d\" % name_id",
"def _get_name(self):\n name_string = '%s_%s' % (\n self.parameter_type,\n self.parameter_value_type)\n if self.paneltemplateparametermarker_set.count() > 0:\n marker_string = \"_\".join(sorted([m.marker.marker_abbreviation for m in self.paneltemplateparametermarker_set.all()]))\n name_string += '_' + marker_string\n if self.fluorochrome:\n name_string += '_' + self.fluorochrome.fluorochrome_abbreviation\n return name_string",
"def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)",
"def _build_name(name_idx):\n return \"explored%s.set_%05d.xa_%08d\" % (\n ArrayParameter.IDENTIFIER,\n name_idx // 1000,\n name_idx,\n )",
"def name(self):\n return '{} {}'.format(self.client_name, self.variable)",
"def name(self, value):\n\t\tself.form.setObjectName(value)",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def name(self) -> str:",
"def form_name(self):\n \n # Unpack\n request = self.request\n context = request.context\n \n # Prepare\n first_btn = len(self.buttons) and self.buttons[0]\n btn_is_create = first_btn and first_btn.lower() == u'create'\n action = u'Create new' if btn_is_create else u'Edit'\n name = self.__class__.__name__\n \n # Try to use the ``request.context``.\n if context:\n name = getattr(context, 'name', context.__name__)\n \n # Return concatenated, e.g. `Edit Foo`.\n return u'{0} {1}'.format(action, name)",
"def _make_display_name(cls, key: str) -> str:\n return f\"{cls._temp_prefix}-{key}-{uuid.uuid4()}\"",
"def name(self) -> str:\n ...",
"def name(self) -> str:\n ...",
"def get_name():",
"def get_name() -> str:",
"def Name(self) -> str:",
"def Name(self) -> str:",
"def Name(self) -> str:",
"def Name(self) -> str:",
"def name(self):\n return f\"{self.client_name} {self.variable}\"",
"def get_name(self) -> str:\n pass",
"def init_name(self):\r\n try:\r\n rval = self.name\r\n except AttributeError:\r\n if 0:\r\n l = []\r\n for n in self.fgraph.toposort():\r\n if hasattr(n.op, \"name\") and n.op.name is not None:\r\n v = n.op.name\r\n if v.startswith(\"Composite\"):\r\n v = v[len(\"Composite\"):]\r\n else:\r\n v = n.op.__class__.__name__\r\n l.append(v)\r\n rval = \"Composite{\" + \",\".join(l) + \"}\"\r\n else:\r\n for i, r in enumerate(self.fgraph.inputs):\r\n r.name = 'i%i' % i\r\n for i, r in enumerate(self.fgraph.outputs):\r\n r.name = 'o%i' % i\r\n io = set(self.fgraph.inputs + self.fgraph.outputs)\r\n for i, r in enumerate(self.fgraph.variables):\r\n if r not in io and len(r.clients) > 1:\r\n r.name = 't%i' % i\r\n rval = \"Composite{%s}\" % str(self.fgraph)\r\n self.name = rval",
"def name(self) -> str: # pragma: no cover",
"def name(self):\n return \"{} {}\".format(self._clientname, self._name)",
"def getname(self):\n if self.instance is not None:\n return '%s_%s' % (self.name,self.instance)\n else:\n return self.name",
"def get_name() -> str:\n pass"
] | [
"0.6769557",
"0.669109",
"0.6648812",
"0.65889513",
"0.65886915",
"0.64992493",
"0.64962196",
"0.648181",
"0.6439064",
"0.6439064",
"0.6439064",
"0.6439064",
"0.6439064",
"0.64387935",
"0.6416379",
"0.64004415",
"0.64004415",
"0.6391087",
"0.63670844",
"0.6349884",
"0.6349884",
"0.6349884",
"0.6349884",
"0.633943",
"0.63139033",
"0.6312202",
"0.6282268",
"0.6280519",
"0.6276772",
"0.6224177"
] | 0.85207385 | 0 |
Aims control based on a provided aim axis. | def _aim_ctrl(self):
y = 0
z = 0
if self.aim_axis == "y":
z = 90
elif self.aim_axis == "z":
y = -90
for shape in self.ctrl.getShapes():
pm.rotate(shape.cv, 0, y, z, r=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_guards_to_axes(self, axis: plt.Axes, color: str = \"g\") -> None:\n if self.orientation == Orientation.UP_DOWN:\n length = self.image.shape[0]\n else:\n length = self.image.shape[1]\n x_data = np.arange(length)\n left_y_data = self.left_guard_separated\n right_y_data = self.right_guard_separated\n for left, right in zip(left_y_data, right_y_data):\n if self.orientation == Orientation.UP_DOWN:\n axis.plot(left(x_data), x_data, color=color)\n axis.plot(right(x_data), x_data, color=color)\n else:\n axis.plot(x_data, left(x_data), color=color)\n axis.plot(x_data, right(x_data), color=color)",
"def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])",
"def exp_focus_track(self, axis):\n\n # Update step distance\n if self.arm.position(axis) == 0:\n self.step = self.first_step\n elif self.arm.position(axis)+self.step*2 > self.maxdist:\n self.step = self.maxdist-self.arm.position(axis)\n else:\n self.step *= 2.\n\n # Move the arm\n self.arm.step_move(self.step, axis)\n\n # Move the platform to center the tip\n for i in range(3):\n self.microscope.step_move(self.mat[i, axis] * self.step, i)\n\n # Waiting for motors to stop\n self.arm.wait_motor_stop(axis)\n self.microscope.wait_motor_stop([0, 1, 2])\n\n # Focus around the estimated focus height\n try:\n _, _, loc = self.focus()\n except ValueError:\n raise EnvironmentError('Could not focus on the tip')\n\n # Move the platform for compensation\n delta = np.array([[(self.x_init - loc[0]) * self.um_px], [(self.y_init - loc[1]) * self.um_px], [0]])\n move = self.rot_inv * delta\n for i in range(2):\n self.microscope.step_move(move[i, 0], i)\n\n self.microscope.wait_motor_stop([0, 1])\n\n # Update the transform matrix\n for i in range(3):\n self.mat[i, axis] = self.microscope.position(i) / self.arm.position(axis)\n\n pass",
"def aimConstraint(*args, aimVector: Union[List[float, float, float], bool]=None, layer:\n AnyStr=\"\", maintainOffset: bool=True, name: Union[AnyStr, bool]=\"\", offset:\n Union[List[float, float, float], bool]=None, remove: bool=True, skip:\n Union[AnyStr, List[AnyStr]]=\"\", targetList: bool=True, upVector:\n Union[List[float, float, float], bool]=None, weight: Union[float, bool]=0.0,\n weightAliasList: bool=True, worldUpObject: Union[name, bool]=None,\n worldUpType: Union[AnyStr, bool]=\"\", worldUpVector: Union[List[float, float,\n float], bool]=None, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def aim(cmd:str):\n if (cmd is \"FIRE\"):\n fire()\n else:\n move(cmd)",
"def show(self, q , x_axis = 0 , y_axis = 1 ):\n \n ani = self.get_animator()\n ani.x_axis = x_axis\n ani.y_axis = y_axis\n \n ani.show( q )",
"def reflect(self, axis):\n if axis == \"x\":\n self.y = - self.y\n elif axis == \"y\":\n self.x = - self.x\n else:\n print(\"The argument axis only accepts values 'x' and 'y'!\")",
"def interact(self):\n x, mu = self.update_position_direction(self.l_int)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_int)\n self.update_estimators(self.l_int, mu_mean)\n\n self.is_absorbed = True\n self.is_active = False",
"def on_cam_base_adjust_btn_clicked(self):\n pitch = self.cam_base_pitch_hSlider.value()\n yaw = self.cam_base_yaw_hSlider.value()\n len = self.cam_processing_len_edit.text()\n wid = self.cam_processing_width_edit.text()\n self.baseCamThread.cam.set_cam_parameters(int(len),float(wid))\n pitch, yaw = self.control1.device.cmd_cam_adjust(pitch, yaw)\n status = \"goint to angles as, pitch: \" + str(pitch) + \", yaw: \" + str(yaw)\n self.cam_set_status_txt(status)",
"def move_aim(self):\n self.color = random.choice(COLORS)\n self.x += 3 * self.speed_x / FPS\n self.y += 3 * self.speed_y / FPS\n self.r -= 1\n self.draw_aim()\n if self.r <= 10:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n if self.x >= 1100:\n self.speed_x = randint(-100, -10)\n if self.x <= 50:\n self.speed_x = randint(10, 100)\n if self.y >= 800:\n self.speed_y = randint(-100, -10)\n if self.y <= 50:\n self.speed_y = randint(10, 100)",
"def adjust_axes(axis):\r\n x_lim = axis.get_xlim()\r\n y_lim = axis.get_ylim()\r\n new_lim = (min(x_lim[0], y_lim[0]), max(x_lim[1], y_lim[1]))\r\n axis.set_xlim(new_lim)\r\n axis.set_ylim(new_lim)\r\n axis.set_aspect('equal')",
"def setup_anime(self, xmin_off=0, ymin_off=0, xmax_off=0, ymax_off=0):\n xtremes = [(min(x), min(y), max(x), max(y)) for x, y in self.artists]\n xmin = min(map(lambda lst: lst[0], xtremes)) + xmin_off\n ymin = min(map(lambda lst: lst[1], xtremes)) + ymin_off\n xmax = max(map(lambda lst: lst[2], xtremes)) + xmax_off\n ymax = max(map(lambda lst: lst[3], xtremes)) + ymax_off\n print(\"Xtremes:\", xmin, xmax, ymin, ymax)\n\n self.fig = plt.figure()\n self.ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin, ymax),\n autoscale_on=False)\n self.ax.set_facecolor('k')\n self.ax.set(xlabel='x [a.u.]', ylabel='y [a.u.]',\n title='Projectile motion')\n self.ax.set_aspect('equal')\n self.ax.grid()\n\n for a in range(self.art_num):\n ln, = self.ax.plot([], [], '--')\n ln.set_clip_on(False)\n self.lines.append(ln)\n\n plt.gca().set_prop_cycle(None)\n\n for a in range(self.art_num):\n pt, = self.ax.plot([], [], 'o')\n pt.set_clip_on(False)\n self.points.append(pt)\n\n self.time_template = 'time = %d a.u.'\n self.time_text = self.ax.text(.5, .5, '', color='c',\n transform=self.ax.transAxes,\n horizontalalignment='center',\n verticalalignment='center')",
"def offset_aim(self, offset, aim):\n # eight possible orientations\n row, col = offset\n if aim == 0:\n return offset\n elif aim == 1:\n return -row, col\n elif aim == 2:\n return row, -col\n elif aim == 3:\n return -row, -col\n elif aim == 4:\n return col, row\n elif aim == 5:\n return -col, row\n elif aim == 6:\n return col, -row\n elif aim == 7:\n return -col, -row",
"def calibrate_arm(self, axis):\n\n while self.arm.position(axis) < self.maxdist:\n\n # calibrate arm axis using exponential moves:\n # moves the arm, recenter the tip and refocus.\n try:\n self.exp_focus_track(axis)\n except EnvironmentError:\n self.update_message('Could not track the tip.')\n return 0\n\n # When calibration is finished:\n\n # Resetting position of arm and microscope so no error gets to the next axis calibration\n self.go_to_zero()\n time.sleep(2)\n\n return 1",
"def give_direct_input(self, x_axis, y_axis, esc):\n self._set_axis_vals(x_axis, y_axis)\n self.esc = esc",
"def front_wheel_from_axis():",
"def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return",
"def auto_adjust_axes(self, *args):\n\n xmin, xmax = self.axes.get_xlim()\n ymin, ymax = self.axes.get_ylim()\n self.adjust_axes(xmin, ymin, xmax, ymax)",
"def aim_in_plane(positions, aim_vector=(1, 0, 0), up_vector=(0, 1, 0)):\n # pylint: disable=too-many-locals\n\n # create nulls and snap them to given positions\n nulls = []\n for pos in positions:\n null = pm.createNode(\"transform\")\n pm.xform(null, translation=pos, worldSpace=True)\n nulls.append(null)\n\n locator = pm.spaceLocator()\n locator.setMatrix(nulls[0].getMatrix(worldSpace=True))\n\n # reverse vectors if we're on the right side (YZ plane)\n x_axis = locator.getTranslation(space=\"world\")[0]\n if x_axis < 0:\n aim_vector = [-1 * x for x in aim_vector]\n up_vector = [-1 * x for x in up_vector]\n\n # aim to nulls[2]\n pm.delete(\n pm.aimConstraint(\n nulls[-1],\n locator,\n maintainOffset=False,\n aimVector=aim_vector,\n upVector=up_vector,\n worldUpObject=nulls[1],\n worldUpType=\"object\",\n ),\n )\n\n # find AH distance\n index = len(nulls) // 2\n pt_a = pm.datatypes.Point(nulls[0].getTranslation(space=\"world\"))\n pt_b = pm.datatypes.Point(nulls[index].getTranslation(space=\"world\"))\n pt_c = pm.datatypes.Point(nulls[-1].getTranslation(space=\"world\"))\n\n c_side = pt_b - pt_a\n b_side = pt_c - pt_a\n height = sin(c_side.angle(b_side)) * c_side.length()\n ah_dist = sqrt(pow(c_side.length(), 2) - pow(height, 2))\n\n # offset by ah_dist along aim axis\n ah_values = [ah_dist * x for x in aim_vector]\n pm.move(\n locator,\n *ah_values,\n relative=True,\n objectSpace=True,\n worldSpaceDistance=True\n )\n\n # re-orient properly\n pm.delete(\n pm.aimConstraint(\n nulls[index],\n locator,\n maintainOffset=False,\n aimVector=aim_vector,\n upVector=up_vector,\n worldUpObject=nulls[0],\n worldUpType=\"object\",\n ),\n )\n\n # move forward by half of AC\n ac_values = [b_side.length() * x for x in aim_vector]\n pm.move(\n locator,\n *ac_values,\n relative=True,\n objectSpace=True,\n worldSpaceDistance=True\n )\n\n # orient the base locator\n for i, each in enumerate(nulls, 1):\n if i < len(nulls):\n tmp = pm.spaceLocator()\n tmp.setMatrix(each.getMatrix(worldSpace=True))\n aim = pm.aimConstraint(\n nulls[i],\n tmp,\n maintainOffset=False,\n aimVector=aim_vector,\n upVector=up_vector,\n worldUpObject=locator,\n worldUpType=\"object\",\n )\n orientation = pm.xform(\n tmp, query=True, worldSpace=True, rotation=True\n )\n pm.delete(aim, tmp)\n pm.xform(each, rotation=orientation, worldSpace=True)\n else:\n tmp = pm.spaceLocator()\n pm.parent(tmp, nulls[-2])\n tmp.resetFromRestPosition()\n orientation = pm.xform(\n tmp, query=True, worldSpace=True, rotation=True\n )\n pm.xform(each, rotation=orientation, worldSpace=True)\n pm.delete(tmp)\n\n # cleanup and return\n matrices = [\n cmds.xform(x.name(), query=True, matrix=True, worldSpace=True)\n for x in nulls\n ]\n pm.delete(locator, nulls)\n\n return matrices",
"def set_axis_limits(*args):\n robots = get_robot_roots()\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n current_tab = pm.tabLayout('limits_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'position_limits_tab':\n set_position_limits()\n elif current_tab == 'velocity_limits_tab':\n set_deriv_limits('Velocity')\n elif current_tab == 'accel_limits_tab':\n set_deriv_limits('Accel')\n elif current_tab == 'jerk_limits_tab':\n set_deriv_limits('Jerk')",
"def plot_autocorrs(self, axis=0, n_rows=4, n_cols=8):\n self.current_plot = 'multi'\n self.ax_zoomed = False\n \n bls = self.uv.d_uv_data['BASELINE']\n\n # Extract the relevant baselines using a truth array\n # bls = bls.tolist()\n bl_ids = set([256*i + i for i in range(1, n_rows * n_cols + 1)])\n bl_truths = np.array([(b in bl_ids) for b in bls])\n \n #print self.uv.d_uv_data['DATA'].shape\n #x_data = self.d_uv_data['DATA'][bl_truths,0,0,:,0,axis] # Baselines, freq and stokes\n #x_cplx = x_data[:,:,0] + 1j * x_data[:,:,1]\n\n x_cplx = self.stokes[axis][bl_truths]\n\n\n \n # Plot the figure\n #print self.uv.n_ant\n fig = self.sp_fig\n figtitle = '%s %s: %s -- %s'%(self.uv.telescope, self.uv.instrument, self.uv.source, self.uv.date_obs)\n for i in range(n_rows):\n for j in range(n_cols):\n ax = fig.add_subplot(n_rows, n_cols, i*n_cols + j +1)\n ax.set_title(self.uv.d_array_geometry['ANNAME'][i*n_cols + j], fontsize=10)\n #ax.set_title(\"%s %s\"%(i, j))\n \n x = x_cplx[i*n_cols+j::self.uv.n_ant]\n \n if self.scale_select.currentIndex() == 0 or self.scale_select.currentIndex() == 1:\n if x.shape[0] == self.uv.n_ant:\n self.plot_spectrum(ax, x, label_axes=False)\n else:\n self.plot_spectrum(ax, x, stat='max', label_axes=False)\n self.plot_spectrum(ax, x, stat='med', label_axes=False)\n self.plot_spectrum(ax, x, stat='min', label_axes=False)\n else:\n self.plot_spectrum(ax, x, label_axes=False)\n self.updateFreqAxis(ax)\n \n if i == n_rows-1:\n ax.set_xlabel('Freq')\n if j == 0:\n ax.set_ylabel('Amplitude')\n \n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.tick_params(axis='both', which='major', labelsize=10)\n plt.tick_params(axis='both', which='minor', labelsize=8)\n plt.xticks(rotation=30)\n \n plt.subplots_adjust(left=0.05, right=0.98, top=0.95, bottom=0.1, wspace=0.3, hspace=0.45)\n return fig, ax",
"def effect(self):\n AxisType = self.options.AxisType\n AxisDescription = self.options.AxisDescription\n AxisUnit = self.options.AxisUnit\n AxisLabel = self.options.AxisLabel\n AxisMaxValue = self.options.AxisMaxValue\n AxisMinValue = self.options.AxisMinValue\n AxisScale = self.options.AxisScale\n \n \n for id, node in self.selected.iteritems():\n axis = node #TODO: This selection should be further tested\n axis.set(inkex.addNS(\"Type\",\"TimeAnalysis\"), \"Axis\")\n axis.set(inkex.addNS(\"AxisType\",\"TimeAnalysis\"), AxisType)\n axis.set(inkex.addNS(\"AxisDescription\",\"TimeAnalysis\"), AxisDescription)\n #TODO: The label should be unique.\n axis.set(inkex.addNS(\"AxisLabel\",\"TimeAnalysis\"), AxisLabel) \n axis.set(inkex.addNS(\"AxisUnit\",\"TimeAnalysis\"), AxisUnit)\n axis.set(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"), AxisMaxValue)\n axis.set(inkex.addNS(\"AxisMinValue\",\"TimeAnalysis\"), AxisMinValue)\n axis.set(inkex.addNS(\"AxisScale\",\"TimeAnalysis\"), AxisScale)\n # sys.stderr.write(\"The max value of the axis is: \" + str(axis.get(inkex.addNS(\"AxisMaxValue\",\"TimeAnalysis\"))))",
"def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return",
"def aim_chief_ray(opt_model, fld, wvl=None):\n seq_model = opt_model.seq_model\n if wvl is None:\n wvl = seq_model.central_wavelength()\n stop = seq_model.stop_surface\n aim_pt = iterate_ray(opt_model, stop, np.array([0., 0.]), fld, wvl)\n return aim_pt",
"def T_from_approach_axis_center(approach, axis, center):\n T = np.eye(4)\n T[0:3, 0] = approach\n T[0:3, 1] = np.cross(approach, axis)\n T[0:3, 2] = axis\n T[0:3, 3] = center\n return T",
"def rescale_axes(self, x=True, y=True, xlim=None, ylim=None, \n tighten_up=0): \n \n # First, figure out what limits should be\n col_xlim = [[1e10, -1e10] for i in range(self.dims[0])]\n row_ylim = [[1e10, -1e10] for i in range(self.dims[1])]\n \n # Loop over axes\n for i in range(self.N):\n if self.grid[i] is None:\n continue\n \n # column, row\n j, k = self.axis_position(i)\n \n if self.above_diagonal(i):\n continue\n \n if x and xlim is None:\n col_xlim[j][0] = min(col_xlim[j][0], self.grid[i].dataLim.min[0])\n col_xlim[j][1] = max(col_xlim[j][1], self.grid[i].dataLim.max[0]) \n elif x:\n col_xlim[j][0] = xlim[0]\n col_xlim[j][1] = xlim[1]\n \n if self.diagonal is not None and i in self.diag:\n continue\n \n if y and (ylim is None): \n row_ylim[k][0] = min(row_ylim[k][0], self.grid[i].dataLim.min[1])\n row_ylim[k][1] = max(row_ylim[k][1], self.grid[i].dataLim.max[1]) \n elif y:\n row_ylim[k][0] = ylim[0]\n row_ylim[k][1] = ylim[1] \n \n # Apply limits \n for i in range(self.N):\n if self.grid[i] is None:\n continue\n \n # column, row \n j, k = self.axis_position(i)\n \n col_tmp = [col_xlim[j][0] * (1. + tighten_up * np.sign(col_xlim[j][0])),\n col_xlim[j][1] * (1. - tighten_up * np.sign(col_xlim[j][1]))]\n \n row_tmp = [row_ylim[k][0] * (1. + tighten_up * np.sign(row_ylim[k][0])),\n row_ylim[k][1] * (1. - tighten_up * np.sign(row_ylim[k][1]))]\n\n # Kludge\n if np.all(np.isfinite(col_tmp)):\n self.grid[i].set_xlim(col_tmp)\n \n if self.diagonal and i in self.diag:\n continue\n\n if np.all(np.isfinite(row_tmp)):\n self.grid[i].set_ylim(row_tmp)\n\n pl.draw()",
"def set_axes(self, a):\r\n self.axes = a",
"def set_axis(axis_number):\n robots = get_robot_roots()\n if not robots:\n pm.warning('Nothing Selected; Select a valid robot')\n return\n\n # These are specific to how the robots are rigged in relation to Maya's\n # coordinate system\n rotation_axes = ['Y', 'X', 'X', 'Z', 'X', 'Z']\n\n try: # if the text field is empty, or not a float value, skip it\n rotation_axis = rotation_axes[axis_number - 1]\n val = float(pm.textField('t_a{}'.format(axis_number),\n query=True,\n text=True))\n\n for robot in robots:\n ns = robot.namespace()\n pm.setAttr('{0}|{1}robot_GRP|{1}FK_CTRLS|{1}a{2}FK_CTRL.rotate{3}'.format(robot, ns, axis_number, rotation_axis), val)\n except:\n pass",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)",
"def automove_to(self, x: int, y: int) -> None:\n self.cpu_controlled = True\n self.end_cinematic_x_pos = x\n self.end_cinematic_y_pos = y"
] | [
"0.5673403",
"0.55739117",
"0.5537915",
"0.55028373",
"0.5464646",
"0.5432632",
"0.5375639",
"0.532435",
"0.52990675",
"0.529851",
"0.5267174",
"0.52602017",
"0.5174245",
"0.5149552",
"0.5135988",
"0.5101093",
"0.50228465",
"0.50111693",
"0.4996139",
"0.49835455",
"0.4960167",
"0.4925929",
"0.49125645",
"0.4884109",
"0.48615262",
"0.48112318",
"0.48104668",
"0.48024058",
"0.47990045",
"0.47985888"
] | 0.6640851 | 0 |
Plot a price chart marking where long and short positions would be, given values of signal. | def chart_price(price_series, signal_series, threshold=0):
chart_data = pd.DataFrame()
chart_data["out"] = price_series
chart_data["long"] = (signal_series > threshold) * price_series
chart_data["short"] = (signal_series < -threshold) * price_series
chart_data.replace(0, np.nan, inplace=True)
return chart_data.plot(figsize=(20, 10), grid=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_plot(x,y):",
"def value_line(self):\n marks = self._get_marks(False)\n marks['Val'] = self.data['Value']\n fig = plt.figure(figsize=(4,2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n ax.plot(self.data['Value'], alpha=0.8, lw=1.2, color=\"green\", label='Value')\n ax.scatter([x for x in marks[marks['Marks']>0].index], marks[marks['Marks']>0]['Val'], marker='^', s=20, c=\"b\", label=\"Buy\")\n ax.scatter([x for x in marks[marks['Marks']<0].index], marks[marks['Marks']<0]['Val'], marker='v', s=20, c=\"r\", label=\"Sell\")\n ax.set_xlabel('Time')\n ax.set_ylabel('Portfolio\\'s Value (€)')\n ax.set_title('Portfolio\\'s Value (€) - Daily')\n ax.xaxis.set_major_locator(dates.MonthLocator())\n ax.xaxis.set_major_formatter(dates.DateFormatter('%b-%Y'))\n for x, y, mark in zip(marks.index, marks['Val'], marks['Marks']):\n a = ax.get_ylim()\n if x == marks.index[0]:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 80), y + (a[1]-a[0])/35), fontsize=5)\n else:\n if mark > 0:\n ax.annotate(str(mark) + \" €\", xy=(x + timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 60), y - (a[1]-a[0])/35), fontsize=5)\n else:\n ax.annotate(str(mark) + \" €\", xy=(x - timedelta(abs((self.data.index[0] - self.data.index[-1]).days) / 15), y + (a[1]-a[0])/35), fontsize=5)\n ax.grid(True)\n fig.autofmt_xdate()\n ax.legend()\n return fig, ax",
"def __plot(data, days: int = None):\n if days is not None:\n points = days * 144\n else:\n points = len(data)\n\n temp = data[-points:, 1]\n\n plt.plot(range(points), temp)\n plt.grid()\n plt.show()",
"def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')",
"def plot_n_oscs_bubbles(dat, save_fig=False):\n\n fig = plt.figure(figsize=[6, 6])\n ax = plt.gca()\n for ke, va in dat.items():\n plt.plot(ke[0], ke[1], '.', markersize=va/10, color='blue')\n plt.xticks(list(range(0, 5)), list(range(0, 5)));\n\n # Titles & Labels\n ax.set_title('Multiple Peak Fits', fontsize=16)\n ax.set_xlabel('Number of Simulated Peaks', fontsize=14)\n ax.set_ylabel('Number of Fit Peaks', fontsize=14)\n\n # Set the top and right side frame & ticks off\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n # Set linewidth of remaining spines\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)\n\n if save_fig:\n\n save_name = 'plts/MultiplePeakFits.pdf'\n plt.savefig(save_name, bbox_inches='tight', dpi=300)",
"def plot_xy(x, y, ax=None, xlabel='Energy [keV]', **kwargs):\n\n if not ax:\n new_plot = True\n plt.figure()\n ax = plt.axes()\n else:\n new_plot = False\n\n plt.semilogy(x, y, axes=ax, drawstyle='steps-mid', **kwargs)\n\n if new_plot:\n plt.xlabel(xlabel)\n plt.ylabel('Counts')\n\n if 'label' in kwargs:\n plt.legend()\n plt.show()\n\n return ax",
"def cli(sample, title, dpi, out):\n click.echo('\\n' + '.' * 50)\n\n # reading the CDT file.\n try:\n signalData = pd.read_csv(sample, sep='\\t', index_col=0)\n except IOError:\n print(\"\\nUnable to OPEN input files !\\n\")\n sys.exit(1)\n\n # prepare PlotData, remove extra decimal values\n signalData = signalData.round(decimals=3)\n\n # General DEBUG\n print(signalData.index)\n print(signalData.shape)\n\n # retrieve the row index from the dataframe\n rowIndex = list(signalData.index)\n\n # retrieve data for Sense strand\n sx = list(signalData.loc[rowIndex[0]])\n\n # retrieve values for y axis and convert them to float\n sy = list(signalData.columns)\n sy = list(map(float, sy))\n\n # prepare PlotData for antisense strand\n cx = list(signalData.loc[rowIndex[1]])\n\n # convert antisense data values to negative, to plot it below the sense data.\n x1 = [-i for i in cx]\n\n fig, ax = plt.subplots()\n # ax = plt.axes([0, 0, 1, 1])\n\n plt.plot(sy, sx, 'b', sy, x1, 'r') # plotting the graph\n\n # adding the fill color for both the strands.\n d = numpy.zeros(len(sx))\n d1 = numpy.zeros(len(sx))\n plt.fill_between(sy, sx, where=sx >= d, interpolate=False, color=\"blue\")\n plt.fill_between(sy, x1, where=sx >= d1, interpolate=False, color=\"red\")\n\n # Option to draw a vertical line at origin on x-axis\n # plt.axvline(x=0, color='black', linestyle='--')\n\n # creating the grid lines\n # plt.grid(linestyle='--', linewidth=0.5)\n\n plt.gca().xaxis.grid(True, linestyle='--', linewidth=0.5)\n\n # adding custom xticks and yticks\n plt.xticks(range(-100, 150, 50), fontsize=14)\n\n # retrieve the yticks\n my_yticks = ax.get_yticks()\n # pprint.pprint(my_yticks)\n lastTick = int(len(my_yticks) - 1)\n\n # Handle edge cases, not to round off to -0.0\n if my_yticks[0] <= -1.0:\n # setting the ylim for the y-axis\n ax.set_ylim(math.ceil(my_yticks[0]), math.ceil(my_yticks[lastTick]))\n # setting the ticks for y-axis\n plt.yticks([math.ceil(my_yticks[0]), 0, math.ceil(\n my_yticks[lastTick])], fontsize=14)\n else:\n # setting the ylim for the y-axis\n ax.set_ylim(my_yticks[0], math.ceil(my_yticks[lastTick]))\n # setting the ticks for y-axis\n plt.yticks([my_yticks[0], 0, math.ceil(\n my_yticks[lastTick])], fontsize=14)\n\n plt.ylabel('Tags', fontsize=18)\n\n # setting the padding space between the y-axis label and the y-axis\n if math.ceil(my_yticks[lastTick]) < 10:\n ax.yaxis.labelpad = -10\n else:\n ax.yaxis.labelpad = -15\n\n # to increase the width of the plot borders and tick width\n plt.setp(ax.spines.values(), linewidth=2)\n plt.tick_params(length=8, width=2)\n\n # if you chose to not include the xticks , since they are similar to heatmap x-axis ticks\n # plt.xticks([-100,0,100])\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.xaxis.set_ticks_position('none')\n\n # plt.yticks(range(-10,12,2))\n # plt.xticks([-500,0,500])\n\n # start,end=ax.get_ylim()\n # ax.set_ylim(start-1,end+1)\n\n # Customizing the border/ spines on each side of the plot.\n # frame1 = plt.gca()\n # frame1.axes.xaxis.set_ticklabels([])\n # frame1.axes.yaxis.set_ticklabels([])\n # frame1.axes.spines['top'].set_visible(False)\n # frame1.axes.spines['right'].set_visible(False)\n # frame1.axes.spines['bottom'].set_visible(False)\n # frame1.axes.spines['left'].set_visible(False)\n\n # plt.show()\n plt.title(title, fontsize=25)\n # setting the margins\n plt.margins(0.01)\n\n # saving the image at 300dpi , web standard for printing images.\n plt.savefig(out, facecolor=None, dpi=dpi, pad_inches=0)\n click.echo('\\n' + '.' * 50)",
"def graph(stock):\n output=stock_price(stock)\n return plt.plot(output)",
"def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')",
"def plot_data(self):",
"def plot(self, values=None):\r\n plt.cla()\r\n plt.xlim([0, self.pond_size[0]])\r\n plt.ylim([0, self.pond_size[1]])\r\n plt.xticks(np.arange(self.pond_size[0]), [])\r\n for i in range(self.pond_size[0]):\r\n plt.text(i+0.4, -0.5, str(i))\r\n plt.yticks(np.arange(self.pond_size[1]), [])\r\n for i in range(self.pond_size[1]):\r\n plt.text(-0.5, i+0.4, str(i))\r\n\r\n # Draw the trajectory\r\n t_x = np.array([t[0] for t in self.trajectory])\r\n t_y = np.array([t[1] for t in self.trajectory])\r\n plt.plot(t_x+0.5, t_y+0.5, 'r-o')\r\n\r\n # Draw currents and values\r\n for x in range(self.pond_size[0]):\r\n for y in range(self.pond_size[1]):\r\n if values is not None:\r\n plt.text(x, y, '%.1f'%values[y, x])\r\n c = self.currents[y][x]\r\n assert len(c)==4\r\n for i in range(4):\r\n if c[i] != '0':\r\n head_size = 0.15 if c[i] == '1' else 0.3\r\n d = self.current_directions[i]\r\n plt.arrow(x+0.5-0.4*d[0], y+0.5-0.4*d[1], (0.8-head_size)*d[0], (0.8-head_size)*d[1],\r\n head_width=head_size, head_length=head_size, overhang=1.0)\r\n\r\n # Draw start and end states\r\n plt.gcf().gca().add_artist(plt.Circle((self.start_state[0]+0.5, self.start_state[1]+0.5), 0.4, color='r', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.end_state[0]+0.5, self.end_state[1]+0.5), 0.4, color='g', alpha=0.5))\r\n plt.gcf().gca().add_artist(plt.Circle((self.current_state[0]+0.5, self.current_state[1]+0.5), 0.25, color='b', alpha=0.5))\r\n plt.grid(True)\r\n plt.pause(0.2)",
"def view(self, lo_en: Quantity = Quantity(0.0, \"keV\"), hi_en: Quantity = Quantity(30.0, \"keV\"),\n figsize: Tuple = (8, 6)):\n if lo_en > hi_en:\n raise ValueError(\"hi_en cannot be greater than lo_en\")\n else:\n lo_en = lo_en.to(\"keV\").value\n hi_en = hi_en.to(\"keV\").value\n\n if len(self._plot_data.keys()) != 0:\n # Create figure object\n plt.figure(figsize=figsize)\n\n # Set the plot up to look nice and professional.\n ax = plt.gca()\n ax.minorticks_on()\n ax.tick_params(axis='both', direction='in', which='both', top=True, right=True)\n\n # Set the title with all relevant information about the spectrum object in it\n plt.title(\"{n} - {o}{i} Spectrum\".format(n=self.src_name, o=self.obs_id, i=self.instrument.upper()))\n for mod_ind, mod in enumerate(self._plot_data):\n x = self._plot_data[mod][\"x\"]\n # If the defaults are left, just update them to the min and max of the dataset\n # to avoid unsightly gaps at the sides of the plot\n if lo_en == 0.:\n lo_en = x.min()\n if hi_en == 30.0:\n hi_en = x.max()\n\n # Cut the x dataset to just the energy range we want\n plot_x = x[(x > lo_en) & (x < hi_en)]\n\n if mod_ind == 0:\n # Read out the data just for line length reasons\n # Make the cuts based on energy values supplied to the view method\n plot_y = self._plot_data[mod][\"y\"][(x > lo_en) & (x < hi_en)]\n plot_xerr = self._plot_data[mod][\"x_err\"][(x > lo_en) & (x < hi_en)]\n plot_yerr = self._plot_data[mod][\"y_err\"][(x > lo_en) & (x < hi_en)]\n plot_mod = self._plot_data[mod][\"model\"][(x > lo_en) & (x < hi_en)]\n\n plt.errorbar(plot_x, plot_y, xerr=plot_xerr, yerr=plot_yerr, fmt=\"k+\", label=\"data\", zorder=1)\n else:\n # Don't want to re-plot data points as they should be identical, so if there is another model\n # only it will be plotted\n plot_mod = self._plot_data[mod][\"model\"][(x > lo_en) & (x < hi_en)]\n\n # The model line is put on\n plt.plot(plot_x, plot_mod, label=mod, linewidth=1.5)\n\n # Generate the legend for the data and model(s)\n plt.legend(loc=\"best\")\n\n # Ensure axis is limited to the chosen energy range\n plt.xlim(lo_en, hi_en)\n\n plt.xlabel(\"Energy [keV]\")\n plt.ylabel(\"Normalised Counts s$^{-1}$ keV$^{-1}$\")\n\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(ScalarFormatter())\n ax.xaxis.set_minor_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n ax.xaxis.set_major_formatter(FuncFormatter(lambda inp, _: '{:g}'.format(inp)))\n\n plt.tight_layout()\n # Display the spectrum\n plt.show()\n\n # Wipe the figure\n plt.close(\"all\")\n\n else:\n warnings.warn(\"There are no XSPEC fits associated with this Spectrum, you can't view it.\")",
"def plot_vals(df, colors, events):\n #to do: take df.Datum.loc[0] und df.Datum.loc[-1], datetime.strftime() back to str, set as start&end\n fig, ax = plt.subplots(figsize=(16,5))\n ax.axis([0, 1440, 0, 5])\n start, end = ax.get_xlim()\n formatter = FuncFormatter(my_ticks)\n plt.xticks(rotation=70)\n ax.xaxis.set_major_locator(ticker.MultipleLocator(60.00))\n ax.xaxis.set_major_formatter(formatter)\n plt.yticks((1,2,3,4), events)\n ax.set_title(\"Jakobs Tag\") # to do: add dates (siehe oben)\n for ind, ev in enumerate(events):\n (xs,ys,dotsizes) = create_scatter_vals(df, ev, (ind+1))\n plt.scatter(xs, ys, s=dotsizes, marker=\"o\", alpha=0.5, color=colors[ind])\n plt.savefig(\"test1.pdf\")\n return fig,ax",
"def plot_equity_prices(ticker, prices):\n\n # define x-axis data points\n x = np.linspace(0, prices.shape[0], prices.shape[0])\n\n plt.plot(x, prices[ticker], linewidth=1, color='b', label=ticker)\n plt.legend(loc='upper left')\n plt.xlabel('Time (days)')\n plt.ylabel('Price')\n plt.title('Price vs Time: ' + ticker)\n plt.show()",
"def make_plot(counts):\n # YOUR CODE HERE\n posX=[]\n posY=[]\n negX=[]\n negY=[]\n\t\n count=1\n for i in counts:\n\tif len(i)!=0:\t\n\t\tposX.append(count)\n\t posY.append(i[0][1])\n\t\tnegX.append(count)\n\t negY.append(i[1][1])\n\t count=count+1\n\t\n line1, =plt.plot(posX,posY,marker=\"o\",label=\"Positive\",color=\"g\")\n line2, =plt.plot(negX,negY,marker=\"o\",label=\"Negative\",color=\"r\")\n plt.xlabel('Time Step')\n plt.ylabel('Word Count')\n plt.title('Basic Twitter Sentiment Analytics')\n plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)\n plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})\n plt.show()",
"def make_plot(counts):\n cn1 = []\n cn2 = []\n time = []\n\n for x in counts:\n y1 = x[0]\n cn1.append(y1[1])\n y2 = x[1]\n cn2.append(y2[1])\n\n for i in range(len(counts)):\n time.append(i)\n\n posLine = plt.plot(time, cn1,'bo-', label='Positive')\n negLine = plt.plot(time, cn2,'go-', label='Negative')\n plt.axis([0, len(counts), 0, max(max(cn1), max(cn2))+50])\n plt.xlabel('Time step')\n plt.ylabel('Word count')\n plt.legend(loc = 'upper left')\n plt.show()\n plt.savefig(\"plot.png\", format=\"png\")",
"def volatility_factor_plot(prices: list, dates: list, vf_data: VFStopsResultType,\n green_zone_x_values: List[list], red_zone_x_values: List[list],\n yellow_zone_x_values: List[list], y_range: float, minimum: float,\n text_str: str = \"\", str_color: str = \"\", **kwargs):\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n register_matplotlib_converters()\n\n title = kwargs.get('title', '')\n save_fig = kwargs.get('save_fig', False)\n filename = kwargs.get('filename', 'temp_candlestick.png')\n\n stop_loss_objects = vf_data.data_sets\n\n shown_stop_loss = f\"VF: {np.round(vf_data.vf.curated, 3)}\\n\"\n if vf_data.current_status.status.value != 'stopped_out':\n shown_stop_loss += f\"Stop Loss: ${np.round(vf_data.stop_loss.curated, 2)}\"\n else:\n shown_stop_loss += \"Stop Loss: n/a\"\n\n fig, ax_handle = plt.subplots()\n\n date_indexes = [datetime.strptime(date, '%Y-%m-%d').date() for date in dates]\n ax_handle.plot(date_indexes, prices, color='black')\n\n # Set the tick spacing (this is because dates crowd easily)\n mid_tick_size = int(len(date_indexes) / 4)\n ax_handle.xaxis.set_ticks([\n date_indexes[0], date_indexes[mid_tick_size], date_indexes[mid_tick_size * 2],\n date_indexes[mid_tick_size * 3], date_indexes[-1]\n ])\n\n y_start = minimum - (y_range * 0.05)\n height = y_range * 0.02\n\n for stop in stop_loss_objects:\n sub_dates = [date_indexes[index] for index in stop.time_index_list]\n ax_handle.plot(sub_dates, stop.caution_line, color='gold')\n ax_handle.plot(sub_dates, stop.stop_loss_line, color='red')\n\n for green_zone in green_zone_x_values:\n start = mdates.date2num(date_indexes[green_zone[0]])\n end = mdates.date2num(date_indexes[green_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='green',\n facecolor='green',\n fill=True\n )\n )\n\n for red_zone in red_zone_x_values:\n start = mdates.date2num(date_indexes[red_zone[0]])\n end = mdates.date2num(date_indexes[red_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='red',\n facecolor='red',\n fill=True\n )\n )\n\n for yellow_zone in yellow_zone_x_values:\n start = mdates.date2num(date_indexes[yellow_zone[0]])\n end = mdates.date2num(date_indexes[yellow_zone[-1]])\n width = end - start\n ax_handle.add_patch(\n Rectangle(\n (start, y_start),\n width,\n height,\n edgecolor='yellow',\n facecolor='yellow',\n fill=True\n )\n )\n\n ax_handle.set_title(title)\n\n if len(text_str) > 0 and len(str_color) > 0:\n new_start = minimum - (y_range * 0.2)\n new_end = minimum + (y_range * 1.02)\n ax_handle.set_ylim(new_start, new_end)\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.02,\n text_str,\n color=str_color,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n if len(shown_stop_loss) > 0:\n props = dict(boxstyle='round', facecolor='white', alpha=0.25)\n ax_handle.text(\n 0.02,\n 0.90,\n shown_stop_loss,\n transform=ax_handle.transAxes,\n bbox=props\n )\n\n try:\n if save_fig:\n temp_path = os.path.join(\"output\", \"temp\")\n if not os.path.exists(temp_path):\n # For functions, this directory may not exist.\n plt.close(fig)\n plt.clf()\n return\n\n filename = os.path.join(temp_path, filename)\n if os.path.exists(filename):\n os.remove(filename)\n plt.savefig(filename)\n\n else:\n plt.show()\n\n except: # pylint: disable=bare-except\n print(\n f\"{utils.WARNING}Warning: plot failed to render in 'volatility factor plot' of \" +\n f\"title: {title}{utils.NORMAL}\")\n\n plt.close('all')\n plt.clf()",
"def add_price_flag(fig, axis, series, color, last_index=None):\n\n series = series.dropna()\n value = series.tail(1)\n\n try:\n index = value.index.tolist()[0]\n if last_index is not None:\n axis.plot(\n [index, last_index], [value.values[0], value.values[0]],\n color=color, linewidth=0.6, linestyle='--', alpha=0.6\n )\n else:\n last_index = index\n\n trans_offset = mtrans.offset_copy(\n axis.transData, fig=fig,\n x=0.05, y=0.0, units='inches'\n )\n\n # Add price text box for candlestick\n value_clean = format(value.values[0], '.6f')\n axis.text(\n last_index, value.values, value_clean,\n size=7, va=\"center\", ha=\"left\",\n transform=trans_offset,\n color=config['colors']['price_flag'],\n bbox=dict(\n boxstyle=\"angled,pad=0.2\",\n alpha=0.6, color=color\n )\n )\n\n except IndexError:\n pass",
"def graph(self):\n seq_obj = MultiSequence(self.symbol, self.__best_model.window_size,1)\n test_predict = self.__best_model.model.predict(seq_obj.X)\n\n #our data is scaled between -1 and 1 so lets scale it back up\n scaler = MinMaxScaler(feature_range=(self.__min_price ,self.__max_price))\n orig_data = seq_obj.original_data.reshape(-1,1)\n orig_prices = scaler.fit_transform(orig_data).flatten()\n \n # plot actual prices\n plt.plot(orig_prices, color='k')\n \n # plot test set prediction after scaling back up\n length = len(seq_obj.X) + self.__best_model.window_size \n test_in = np.arange(self.__best_model.window_size,length,1)\n pred_prices = scaler.fit_transform(test_predict.reshape(-1,1)).flatten()\n plt.plot(test_in,pred_prices,color = 'b')\n \n # pretty up graph\n plt.xlabel('day')\n plt.ylabel('Closing price of stock')\n plt.title(\"Price prediction for {}\".format(self.symbol))\n plt.legend(['Actual','Prediction'],loc='center left', bbox_to_anchor=(1, 0.5))\n plt.show()",
"def band_plot(N=400,a=1.0):\n foot_step=2*np.pi/N\n x=np.arange(0.0,2*np.pi/a,foot_step)\n y=band_energy(x)\n plt.plot(x,y)",
"def _plot_series(series, ax_id, linewidth, label_name, **kwargs):\n\n for i, c in enumerate(series._xa.component[:10]):\n comp = series._xa.sel(component=c)\n\n if comp.sample.size > 1:\n central_series = comp.mean(dim=\"sample\")\n low_series = comp.quantile(q=0.05, dim=\"sample\")\n high_series = comp.quantile(q=0.95, dim=\"sample\")\n else:\n central_series = comp\n\n label_to_use = (\n (label_name + (\"_\" + str(i) if len(series.components) > 1 else \"\"))\n if label_name != \"\"\n else \"\" + str(str(c.values))\n )\n\n central_series.plot(ax=ax_id, linewidth=linewidth, label=label_to_use, **kwargs)\n\n if comp.sample.size > 1:\n ax_id.fill_between(\n series.time_index, low_series, high_series, alpha=0.25, **kwargs\n )",
"def plot_data(df, title=\"normalized Stock prices\", ylabel=\"Price\", xlabel=\"Date\" ):\n plt.clf()\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.savefig('files/output/'+title+'.png')",
"def plot_rsi(stocks_data, stock):\n stock = stocks_data[stock]\n closes = stock['Close'].values\n plt.subplot(211)\n plt.plot(closes)\n\n labels, ifr = find_rsi(stock)\n plt.subplot(212)\n plt.plot(ifr)\n # plt.xlabel(labels)\n\n plt.show()",
"def making_plot(sample_points_x_y_nonZero, gauge_volume, y_upper_imit, y_lower_limit,\n sample_height=10, sample_width=5., min_color=None, max_color = None):\n if sample_points_x_y_nonZero.size==0:\n print \"the array does not have a non zero gauge volume\"\n\n\n else:\n\n xS, yS=sample_points_x_y_nonZero\n X,Y= np.meshgrid(xS,yS)\n\n gauge_volume=np.array(gauge_volume)\n\n Z = griddata((xS,yS), gauge_volume, (X,Y), method='nearest')\n\n plt.figure()\n # r=plt.contour( X, Y,Z)\n # plt.clabel(r, inline=1, fontsize=10)\n plt.pcolormesh(X, Y, Z, cmap = plt.get_cmap('rainbow'),vmin=min_color, vmax=max_color )\n plt.xlabel('points along sample width (mm)')\n plt.ylabel('points along sample height (mm)')\n plt.ylim(y_lower_limit,y_upper_imit)\n plt.colorbar()\n plt.axhline(y=-sample_height/2., color='r', linestyle='-')\n plt.axhline(y=sample_height/2., color='r', linestyle='-')\n plt.axvline(x=- sample_width/2., color='r', linestyle='-')\n plt.axvline(x= sample_width/2., color='r', linestyle='-')\n # plt.scatter(xS,yS ,marker = 'o', c = 'b', s = 5, zorder = 10)\n plt.savefig(os.path.join(thisdir, '../figures/{sample}.png'.format(sample='gauge_volume')))\n plt.show()",
"def plot(self, **kwargs):\n if self.order != None:\n name = str(_constructModelName(self.teff, self.logg, \n self.metal, self.en, self.order, self.path))\n output = kwargs.get('output', str(name) + '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', \n alpha=.8, linewidth=1, label=name)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim) \n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()\n\n else:\n output = kwargs.get('output'+ '.pdf')\n ylim = kwargs.get('yrange', [min(self.flux)-.2, max(self.flux)+.2])\n title = kwargs.get('title')\n save = kwargs.get('save', False)\n \n plt.figure(figsize=(16,6))\n plt.plot(self.wave, self.flux, color='k', alpha=.8, linewidth=1)\n plt.legend(loc='upper right', fontsize=12)\n plt.ylim(ylim)\n \n minor_locator = AutoMinorLocator(5)\n #ax.xaxis.set_minor_locator(minor_locator)\n # plt.grid(which='minor') \n \n plt.xlabel(r'$\\lambda$ [$\\mathring{A}$]', fontsize=18)\n plt.ylabel(r'$Flux$', fontsize=18)\n #plt.ylabel(r'$F_{\\lambda}$ [$erg/s \\cdot cm^{2}$]', fontsize=18)\n if title != None:\n plt.title(title, fontsize=20)\n plt.tight_layout()\n\n if save == True:\n plt.savefig(output)\n plt.show()\n plt.close()",
"def plot_data(df, title=\"normalized Stock prices\", ylabel=\"Price\", xlabel=\"Date\"):\n plt.clf()\n ax = df.plot(title=title, fontsize=12)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.savefig('files/output/' + title + '.png')",
"def stock_volume_history(stock_values):\n ticker = stock_values.name\n dates = stock_values.index\n \n # stock volume plot \n p2hover = HoverTool(tooltips=[(\"volume\", \"$y\"),])\n\n p = figure(x_axis_type = \"datetime\")\n\n p.title = \"{} Daily Volume\".format(ticker)\n p.title_text_font_size = '12'\n p.title_text_font_style = 'bold'\n\n # x axis\n p.xaxis.axis_label = 'Date'\n p.xaxis.axis_label_text_font_size = '9'\n\n # y axis\n p.yaxis.axis_label = 'Kilo Transactions'\n p.yaxis.axis_label_text_font_size = '9'\n p.yaxis[0].formatter = PrintfTickFormatter(format=\"%3d\")\n\n p.quad(top=stock_values['Volume'], bottom=0, left=dates, right=dates,\n fill_color=\"#036564\", line_color=\"#033649\")\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 30'],\n color='#dfbd4d', **line_style)\n\n p.line(np.array(dates, 'M64'), stock_values['Volume 300'],\n color='#df1b06', **line_style)\n\n # set plot style\n p.plot_width = 800\n p.plot_height = 200\n p.grid.grid_line_alpha=0.3\n\n # set grid\n # change just some things about the x-grid\n p.xgrid.grid_line_color = None\n\n # change just some things about the y-grid\n p.ygrid.grid_line_alpha = 0.5\n p.ygrid.grid_line_dash = [6, 4]\n\n return p",
"def plot_equity_prices(ticker, prices):\n\n # define x-axis data points\n x = np.linspace(0, prices.shape[0], prices.shape[0])\n\n figure = plt.figure()\n axis = figure.add_subplot(111)\n\n axis.plot(x, prices[ticker], linewidth=1, color='b', label=ticker)\n axis.legend(loc='upper left')\n axis.set_xlabel('Time (days)')\n axis.set_ylabel('Price')\n axis.set_title('Price vs Time: ' + ticker)\n\n return figure"
] | [
"0.6332631",
"0.62353545",
"0.61054397",
"0.60435236",
"0.5956085",
"0.5932323",
"0.58881456",
"0.5884435",
"0.5882181",
"0.58787453",
"0.58743924",
"0.5874154",
"0.5873469",
"0.58398116",
"0.5824793",
"0.57651365",
"0.57198536",
"0.57072663",
"0.5705869",
"0.5703248",
"0.56924915",
"0.5673311",
"0.56678456",
"0.56617475",
"0.5661031",
"0.564426",
"0.56279576",
"0.5617431",
"0.5613078",
"0.560168"
] | 0.7166306 | 0 |
Given Series with daily returns (simple, nonlog ie. P1/P01), return indicators comparing simplified Sharpe to 'true' Sharpe (defined by different caluclation conventions). | def true_sharpe(ret):
r = pd.Series()
df = pd.DataFrame({"returns": ret})
df["cummulative_return"] = (df["returns"] + 1).cumprod()
df["log_returns"] = np.log(df["returns"] + 1)
r["cummulative_return"] = df["cummulative_return"][-1] - 1
r["annual_return"] = ((r["cummulative_return"] + 1) ** (252 / len(df.index))) - 1
r["mean"] = df["returns"].mean() * 252
r["mean_log"] = df["log_returns"].mean() * 252
r["vol"] = df["returns"].std() * np.sqrt(252)
r["vol_log"] = df["log_returns"].std() * np.sqrt(252)
r["sharpe"] = r["mean"] / r["vol"]
r["sharpe_log"] = r["mean_log"] / r["vol_log"]
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_single_sign_value(series, log=False):\n # gets useful values\n negative_values_unique, positive_values_unique = set(series[series < 0]), \\\n set(series[series > 0])\n if len(negative_values_unique) == 1 and len(positive_values_unique) > 1:\n series = series.replace(to_replace=list(negative_values_unique), value=np.nan)\n elif len(positive_values_unique) == 1 and len(negative_values_unique) > 1:\n series = series.replace(to_replace=list(positive_values_unique), value=np.nan)\n\n return series",
"def stationarity_test(df, differenced):\n if differenced:\n\n result = adfuller(df['STU'])\n print(f'ADF Statistic: {result[0]}')\n print(f'p-value: {result[1]}')\n if result[1] <= 0.05:\n print(\"Evidence against the null-hypothesis, series look stationary!\")\n else:\n print(\"Weak evidence against the null-hypothesis, showing that the series is likely to be non-stationary\")\n else:\n result = adfuller(df['STU'])\n print(f'ADF Statistic: {result[0]}')\n print(f'p-value: {result[1]}')\n if result[1] <= 0.05:\n print(\"Evidence against the null-hypothesis, series look stationary!\")\n return True\n else:\n print(\"Weak evidence against the null-hypothesis, showing that the series is likely to be non-stationary!\")\n return False",
"def get_sharpe(self,df, df_type = \"returns\"):\n if df_type == \"price\":\n df = df.pct_change()\n sharpe = (df.mean() * 252) / (df.std() * np.sqrt(252))\n return sharpe",
"def compare_series(series_a, series_b):\n return {\n 'rmse': ((series_a - series_b) ** 2).mean() ** 0.5,\n 'mbe': (series_b - series_a).mean(),\n 'mae': abs(series_b - series_a).mean(),\n 'rsqr': stats.linregress(series_a, series_b).rvalue ** 2,\n }",
"def test_stationarity(\n timeseries, plot=False, plt=None, method='ADF', print_results=True):\n\n # Rolling statistics. We select one day: 24 hours\n rolmean = timeseries.rolling(window=24).mean()\n rolstd = timeseries.rolling(window=24).std()\n\n if plot:\n # Plot rolling statistics of orginal, mean and std:\n plt.plot(timeseries, color='blue', label='Original')\n plt.plot(rolmean, color='red', label='Rolling Mean')\n plt.plot(rolstd, color='black', label='Rolling Std')\n plt.legend(loc='best')\n plt.title('Rolling Mean & Standard Deviation')\n\n if method == 'ADF':\n test = adfuller(timeseries, autolag='AIC')\n test = [t for t in test]\n del test[3]\n if method == 'KPSS':\n test = kpss(timeseries, regression='ct')\n\n output = pd.Series(\n test[0:3],\n index=[\n 'Test Statistic',\n 'p-value',\n '#Lags Used',\n # 'Number of Observations Used'\n ]\n )\n for key, value in test[3].items():\n output['Critical Value (%s)' % key] = value\n\n if print_results:\n print('Results of {} Test:'.format(method))\n print(output)\n\n return output",
"def sharpe(s, d1, d2, libor=0):\n libor_daily_return = (1 + libor * 0.01) ** (1 / 252) - 1\n s = s.loc[d1:d2]\n return (252 ** 0.5) * (s.mean() - libor_daily_return) / s.std()",
"def _analyze_series(self, series):\n # bin series by analysis time\n # only analyze the last bin\n ts = array([si['timestamp'] for si in series])\n ds = diff(ts)\n\n # tolerance_seconds = 60 * 60 * self._bin_hours\n # ds = diff(ts) > tolerance_seconds\n # bounds = where(ds)[0]\n # itemidx = bounds[-1] if bounds else 0\n # series = series[itemidx:]\n\n for ci in self._conditionals:\n ret = self._execute_conditional(ci, series, ds)\n if ret:\n return ret",
"def stability_of_timeseries(returns):\n\n cum_log_returns = np.log1p(returns).cumsum()\n rhat = sp.stats.linregress(np.arange(len(cum_log_returns)),\n cum_log_returns.values)[2]\n\n return rhat",
"def series_are_equivalent(series_1: Series, series_2: Series) -> bool:\n d1 = series_1.copy().reset_index()\n cols_1 = sorted([c for c in d1.columns if c != 'p'])\n cols_p = cols_1 + ['p']\n s1 = d1[cols_p].set_index(cols_1)['p']\n d2 = series_2.copy().reset_index()\n cols_2 = sorted([c for c in d2.columns if c != 'p'])\n if cols_1 != cols_2:\n return False\n s2 = d2[cols_p].set_index(cols_2)['p']\n for k, v in s1.iteritems():\n if v == 0:\n continue\n if k not in s2.keys() or abs(s2[k] - v) > 1e-10:\n return False\n return True",
"def calc_returns(se, remove_na=True):\r\n ser = se.shift(-1) / se - 1\r\n if remove_na:\r\n ser.dropna(inplace=True)\r\n return ser",
"def quality_data(self, s):\n known_symbols = np.mod(range(176),48)>=32\n print('quality_data',np.sum(np.real(s[known_symbols])<0))\n success = np.sum(np.real(s[known_symbols])<0) < 20\n return success,0 ## no doppler estimate for data frames",
"def sharpe(returns):\n return returns.mean() / returns.std()",
"def test_adf(self):\n\n dftest = adfuller(self.ts_df['y'], autolag='AIC')\n dfoutput = pd.Series(dftest[0:4],\n index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput)\n if dftest[0] > dftest[4]['5%']:\n print(\n \"Test statistic greater than critical value at 5% --> series seems to be not stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be less than test statistic.\")\n else:\n print(\n \"Test statistic less than critical value at 5% --> series seems to be stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be greater than test statistic.\")",
"def getSharpe(l, days):\n df = pd.DataFrame(data=l)\n df['dd'] = df[0].diff()\n avg = df['dd'].mean()\n std = df['dd'].std()\n try:\n sharpe = (avg / std) * (365 / days) ** 0.5\n return sharpe\n except:\n return None",
"def is_series_like(self):\n return len(self.columns) == 1 or len(self.index) == 1",
"def extract_seasonal_component(original_ts, ppy):\n \"\"\"\n # === get in-sample data\n original_ts = original_ts[:-out_of_sample]\n \"\"\"\n if seasonality_test(original_ts, ppy):\n # print(\"seasonal\")\n # ==== get moving averages\n ma_ts = moving_averages(original_ts, ppy)\n\n # ==== get seasonality indices\n le_ts = original_ts * 100 / ma_ts\n le_ts = np.hstack((le_ts, np.full((ppy - (len(le_ts) % ppy)), np.nan)))\n le_ts = np.reshape(le_ts, (-1, ppy))\n si = np.nanmean(le_ts, 0)\n norm = np.sum(si) / (ppy * 100)\n si = si / norm\n else:\n # print(\"NOT seasonal\")\n si = np.full(ppy, 100)\n return si",
"def get_series(series):\n if series == 'acs1':\n return census.acs1dp\n elif series == 'acs5':\n return census.acs5\n elif series == 'sf1':\n return census.sf1\n elif series == 'sf3':\n return census.sf3\n else:\n return None",
"def clean_series(y,smooth = False,p = 6.25,logsmooth = True):\n\n # Remove null values in the middle of the series using interpolate\n # First null values are not interpolated but later filled by 0.0\n y = y.replace(0.0,np.NaN).interpolate().fillna(0.0)\n\n # Smooth using Hodrick Prescott filter with parameter p\n if smooth:\n y = smooth_series(y,p)\n y.loc[(y < 1) & (y > 0)] = 1\n\n if logsmooth:\n y = y.map(lambda x : np.log(1+x))\n y = smooth_series(y,p)\n y = y.map(lambda x : np.exp(x) - 1)\n y.loc[(y < 1) & (y > 0)] = 1\n y.loc[y < 0] = 0\n\n return y",
"def __in_spring__(series):\n isinspring = lambda x: (((x.month == 3) and (x.day >= 14)) or\n ((x.month == 4) and (x.day < 15)))\n return Series(series.index.map(isinspring), index=series.index)",
"def is_not_constant(series: np.ndarray) -> bool:\n #print(\"enter bartpy/bartpy/data.py is_not_constant\")\n \n if len(series) <= 1:\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return False\n first_value = None\n for i in range(1, len(series)):\n # if not series.mask[i] and series.data[i] != first_value:\n if series[i] != first_value:\n if first_value is None:\n first_value = series.data[i]\n else:\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return True\n #print(\"-exit bartpy/bartpy/data.py is_not_constant\")\n return False",
"def _hpfilter_one_return(series, lamb=1600, part=\"trend\"):\n hp_cycle, hp_trend = hpfilter(series, lamb)\n if part == \"cycle\":\n return hp_cycle\n else:\n return hp_trend",
"def __in_spring__(series):\n isinspring = lambda x: (((x.month == 3) and (x.day >= 14)) or\n ((x.month == 4) and (x.day < 15)))\n return pd.Series(series.index.map(isinspring), index=series.index)",
"def test_stationarity(series, print_vals=True):\n\n result = adfuller(series)\n print('ADF Statistic: %f' % result[0])\n print('p-value: %f' % result[1])\n print('Critical Values:')\n\n for key, value in result[4].items():\n print('\\t%s: %.3f' % (key, value))\n return result",
"def adfuller_test(series, signif=0.05, name='', verbose=False):\n \n r = adfuller(series, autolag='AIC')\n output = {'test_statistic':round(r[0], 4), 'pvalue':round(r[1], 4), 'n_lags':round(r[2], 4), 'n_obs':r[3]}\n p_value = output['pvalue'] \n def adjust(val, length= 6): return str(val).ljust(length)\n\n # Print Summary\n print(f' Augmented Dickey-Fuller Test on \"{name}\"', \"\\n \", '-'*47)\n print(f' Null Hypothesis: Data has unit root. Non-Stationary.')\n print(f' Significance Level = {signif}')\n print(f' Test Statistic = {output[\"test_statistic\"]}')\n print(f' No. Lags Chosen = {output[\"n_lags\"]}')\n\n for key,val in r[4].items():\n print(f' Critical value {adjust(key)} = {round(val, 3)}')\n\n if p_value <= signif:\n print(f\" => P-Value = {p_value}. Rejecting Null Hypothesis.\")\n print(f\" => Series is Stationary.\")\n else:\n print(f\" => P-Value = {p_value}. Weak evidence to reject the Null Hypothesis.\")\n print(f\" => Series is Non-Stationary.\")",
"def SweepSeries(*args, **kwargs):\n if args or kwargs:\n underride(kwargs, dtype=float)\n series = pd.Series(*args, **kwargs)\n else:\n series = pd.Series([], dtype=np.float64)\n\n series.index.name = 'Parameter'\n if 'name' not in kwargs:\n series.name = 'Metric'\n return series",
"def SMAPE(y_true, y_pred):\n return smape(y_true, y_pred) / 2",
"def get_benchmark_returns(symbol, first_date, last_date):\n if symbol == '^GSPC':\n symbol = 'spy'\n\n data = pd_reader.DataReader(\n symbol,\n 'google',\n first_date,\n last_date\n )\n\n data = data['Close']\n\n data[pd.Timestamp('2008-12-15')] = np.nan\n data[pd.Timestamp('2009-08-11')] = np.nan\n data[pd.Timestamp('2012-02-02')] = np.nan\n\n data = data.fillna(method='ffill')\n\n return data.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]",
"def basic_series() -> pd.Series:\n series = pd.Series(range(1,6), name=\"Fred\")\n return series",
"def test_load_time_series (self):\n self.dm._load_time_series()\n df = self.dm.df_final\n # Define expected results.\n dt_1 = pd.to_datetime('3/1/2018')\n dt_2 = pd.to_datetime('2/10/2018')\n\n expected = [('SP500', dt_1, -0.013324),\n ('btc', dt_1, 0.053193),\n ('btc_volume', dt_1, 0.05040065),\n ('eth', dt_2, -0.026542),\n ('eth_volume', dt_2, -0.209714)]\n for (col, idx, value) in expected:\n msg = '{0} value not what expected on {1}'.format(\n col, cryp.fmt_date(idx))\n actual_value = df.loc[idx, col]\n self.assertAlmostEqual(value, actual_value, DEC_ACCY, msg)\n\n ##############################\n # Test non-crypto-asset (SPX) to ensure returns properly rolled forward.\n ##############################\n # Return on 1/13/2018 (Saturday), 1/14 (Sunday), and 1/15 (MLK day)\n # should all be equal to the return on 1/12 (Friday).\n # Next \"new\" return should be on 1/16 (price change from 1/12 to 1/16).\n expected_return_1 = 0.006750 # index change from 1/11 -> 1/12.\n expected_return_2 = -0.003524 # index change from 1/12 -> 1/16.\n expected_spx = [(pd.to_datetime('1/12/2018'), expected_return_1),\n (pd.to_datetime('1/13/2018'), expected_return_1),\n (pd.to_datetime('1/14/2018'), expected_return_1),\n (pd.to_datetime('1/15/2018'), expected_return_1),\n (pd.to_datetime('1/16/2018'), expected_return_2)]\n\n for (idx, value) in expected_spx:\n msg = 'SP500 return on {} not what expected.'.format(\n cryp.fmt_date(idx))\n actual_value = df.loc[idx, 'SP500']\n self.assertAlmostEqual(value, actual_value, DEC_ACCY, msg)",
"def snaive_exp_smoothing_method_pred(training_data,HORIZON,METHOD=\"simple\",smoothing_level=.3,optimized=True,smoothing_slope=.05):\n \n \n exp_smoothing_type = METHOD #\"simple\"\n data_predictions = pd.DataFrame(index=training_data.index[-HORIZON:]+timedelta(days=HORIZON))\n data_predictions.astype(np.float)\n for i in range(7):\n for series_name in training_data.filter(regex='^series').columns:\n try:\n data_predictions[series_name].shape\n except:\n data_predictions[series_name]=0.0\n if exp_smoothing_type == \"holt\":\n model = Holt(training_data[series_name][training_data.index.dayofweek==i])\n elif exp_smoothing_type == \"simple\":\n model = SimpleExpSmoothing(training_data[series_name][training_data.index.dayofweek==i])\n \n model._index = training_data[training_data.index.dayofweek==i].index\n \n if exp_smoothing_type == \"holt\":\n if optimized:\n fit = model.fit(optimized=True)\n else:\n fit = model.fit(smoothing_level=smoothing_level, smoothing_slope=smoothing_slope)\n elif exp_smoothing_type == \"simple\":\n fit = model.fit(smoothing_level=smoothing_level)\n \n \n #pred = fit.forecast(HORIZON)\n data_predictions[series_name][data_predictions.index.dayofweek==i] = fit.forecast(HORIZON//7)\n \n return data_predictions"
] | [
"0.58642346",
"0.5710043",
"0.5479896",
"0.54715127",
"0.54500556",
"0.5381189",
"0.5379159",
"0.52815366",
"0.52155226",
"0.51980346",
"0.51967704",
"0.519325",
"0.51814866",
"0.518038",
"0.517456",
"0.51569456",
"0.5140055",
"0.5135205",
"0.5126213",
"0.5102445",
"0.5099832",
"0.50934714",
"0.5089398",
"0.5084615",
"0.50736475",
"0.50616235",
"0.50446844",
"0.50150603",
"0.50041085",
"0.5000499"
] | 0.6321988 | 0 |
Run func on every element of dfs in mulpiple processes. dfs is a list of DataFrames | def m_proc(dfs, func):
pool = Pool(processes=cpu_count())
results = [pool.apply_async(func, args=(df,)) for df in dfs]
output = [p.get() for p in results]
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def groupby_apply_parallel(grouped_df, func, *args):\n with Pool(cpu_count()) as p:\n return_list = p.starmap(func, [(group, *args) for name, group in grouped_df])\n return pd.concat(return_list)",
"def apply_parallel(df, func, **kwargs):\n num_workers = cpu_count()\n\n if (df.shape[0] == 1) or (num_workers == 1):\n return apply_df((df, func, kwargs))\n\n retLst = Parallel(n_jobs=num_workers)(delayed(apply_df)(\n input_args=(d, func, kwargs)) for d in np.array_split(df, num_workers))\n return pd.concat(retLst)",
"def _apply_parallel(grouped_df, func, neg_compound, compound, f_cols, n_jobs,\n method):\n n_cpu = multiprocessing.cpu_count()\n output = Parallel(n_jobs=n_jobs)(delayed(func)(\n group, neg_compound, compound, f_cols, method) for _, group in grouped_df)\n return pd.concat(output)",
"def main(save_dir, img_dir, df, fname_col):\n\tpool = mp.Pool(mp.cpu_count())\n\tresult = pool.map(multi_run_wrapper,[(save_dir, img_dir, \n\t\t\t\t\t\tfname) for fname in df[fname_col].values[0:4]])",
"def parallelize_dataframe(df, func, num_partitions):\n\n df_split = np.array_split(df, num_partitions)\n pool = mp.Pool(num_partitions)\n df = pd.concat(pool.map(func, df_split))\n pool.close()\n pool.join()\n\n return df",
"def apply_parallel(df_grouped, func, n_jobs=16, backend='loky', as_index=False, **kwargs):\n\n names = []\n groups = []\n for name, group in df_grouped:\n names.append(name)\n groups.append(group)\n\n results = Parallel(n_jobs=n_jobs, verbose=5, backend=backend, batch_size='auto') \\\n (delayed(func)(group, **kwargs) for group in groups)\n\n return pd.concat(results, keys=names if as_index else None)",
"def sequential(self, func, args_dict=None):\n for uri, cf in self._cfs.items():\n args = self._process_args_dict(cf, uri, args_dict)\n func(*args)",
"def load_all_dfs(clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n print('loading data')\n first_clf = clf_list[0]\n df = pd.read_csv('Pikki'+first_clf+'.csv')\n df['df'] = first_clf\n\n df = df.set_index(['id','df'])\n\n for clf in clf_list[1:]:\n file_name = 'Pikki' + clf + '.csv'\n df_tmp = pd.read_csv(file_name)\n df_tmp['df'] = clf\n\n df_tmp = df_tmp.set_index(['id','df'])\n\n df = pd.concat([df,df_tmp])\n\n \n df['std'] = df.apply(np.std,axis=1,raw = True)\n end = time.clock()\n print(end-start)\n return df#.swaplevel(0,1)",
"def apply(df, f):\n return [f(row) for row in df]",
"def self_map(self, func: Callable[[dd.Series], Any], **kwargs: Any) -> List[Any]:\n return [func(df, **kwargs) for df in self.data]",
"def process_frame(data, index, resultsq, functions):\n results_list = []\n failed = False\n for function_id in functions:\n function = function_mapper[function_id]\n result = function(data.slice, functions[function_id])\n results_list.append(result)\n if not result.res:\n failed = True\n\n results = ct.Results(index, failed, results_list)\n resultsq.put(results)",
"def controller(df, func):\n # Initialization: Generate computational graph for each attribute which will be on RHS\n schema = df.columns\n computational_graph = dict()\n FDs = []\n for RHS in schema:\n computational_graph[RHS] = generate_computational_graph(RHS, schema)\n\n for level in range(3):\n # Get current level candidates\n current_level_candidates = dict()\n for RHS in computational_graph.keys():\n current_level_candidates[RHS] = get_candidates(level, computational_graph[RHS])\n\n # print('candidates:',current_level_candidates)\n # Use current_level candidates as an input to FD-functions for each level, func will return discovered (soft/delta)functional dependencies\n tFDs = func(level, df, current_level_candidates)\n # print('FDs:',tFDs)\n # print(tFDs)\n FDs.extend(tFDs)\n # Transform res into a dictionary where key: RHS value: a list of LHS where candidates are in the form of sets\n current_level_result = transform_res(tFDs)\n # print(current_level_result)\n\n # Prune graphs according to feedback of FD-functions\n # print(f\"level:{level}, computatioanl_graph_key:{computational_graph.keys()},current_level_result_key:{current_level_result.keys()}\")\n for RHS in computational_graph.keys():\n if RHS in current_level_result.keys():\n computational_graph[RHS] = prune_graph(level, current_level_result[RHS], computational_graph[RHS])\n\n return FDs",
"def concat_all_dfs(dflist):\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n # reduced\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n # add col for function name\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n # reorder cols\n dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',\n 'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]\n\n # complete runs only:\n return dfa[dfa['Actions'].values > 0]",
"def map(self, fcn, incols, outcols, dview=None):\n # Design notes:\n # - Things go the fastest and work the best when we send numpy arrays to the engines.\n # In other words, avoid pickling at all costs.\n # - Pandas objects are slow. The algorithm should work with plain numpy arrays;\n # we convert back to DataFrames at the end.\n alldata = self.frametracks[incols].values\n allresults = np.ones((alldata.shape[0], len(outcols))) * np.nan\n def worker(fcn, data, loopindices, coords, nncutoff, n_output_cols):\n # This runs only once on each engine so it's ok to have all this setup code\n import numpy as np\n import scipy.spatial.ckdtree\n tree = scipy.spatial.ckdtree.cKDTree(coords, 5)\n results = np.ones((len(loopindices), n_output_cols)) * np.nan\n neighborlist = tree.query_ball_point(coords[loopindices], nncutoff)\n for i, (pindex, neighbors) in enumerate(zip(loopindices, neighborlist)):\n neighbors.remove(pindex)\n results[i] = fcn(data[pindex], data[neighbors])\n return results\n if dview is None:\n allresults[self.loopindices] = worker(fcn, alldata, self.loopindices, self.coords, self.nncutoff, len(outcols))\n else:\n from IPython.parallel.util import interactive\n dview.execute('''import numpy as np''')\n # To send function to engines, its parent namespace must be the global namespace. \n dview['worker'] = interactive(worker)\n dview['fcn'] = interactive(fcn)\n dview['data'] = alldata\n dview.scatter('loopindices', self.loopindices)\n dview['coords'] = self.coords\n dview['nncutoff'] = self.nncutoff\n dview['n_output_cols'] = len(outcols)\n dview.execute('''results = worker(fcn, data, loopindices, coords, nncutoff, n_output_cols)''')\n allresults[self.loopindices] = dview.gather('results', block=True)\n rtr = self.frametracks.copy()\n for i, name in enumerate(outcols):\n rtr[name] = allresults[:,i]\n return rtr",
"def applyParallel(df_grouped, func, n_jobs, backend='multiprocessing'): \n results = Parallel(n_jobs=n_jobs, verbose=4, backend=backend)(\n delayed(func)(name, group) for name, group in df_grouped)\n\n return {k: v for k, v, _ in results}, {k:v for k, _, v in results}",
"def process_dfs(df_10s, df_hour):\n columns_to_ffill = ['DD', 'DR', 'FF', 'FX', 'N', 'P', 'Q', 'RG', 'SQ', 'T', 'T10', 'TD', 'U', 'VV', 'WW']\n columns_to_interpolate = ['eMeter', 'gasMeter']\n\n print('Processing df_10s')\n df_10s[columns_to_ffill] = df_10s[columns_to_ffill].fillna(method='ffill')\n df_10s[columns_to_interpolate] = df_10s[columns_to_interpolate].interpolate(method='time')\n df_10s_processed = df_10s\n print('Amount of NaNs left in df_10s_processed: %s' % df_10s_processed.isnull().sum().sum())\n\n print('Processing df_hour')\n df_hour[columns_to_ffill] = df_hour[columns_to_ffill].fillna(method='ffill')\n df_hour[columns_to_interpolate] = df_hour[columns_to_interpolate].interpolate(method='time')\n df_hour_processed = df_hour\n print('Amount of NaNs left in df_hour_processed: %s' % df_hour_processed.isnull().sum().sum())\n\n return df_10s_processed, df_hour_processed",
"def export_data_parallel(dataframes, args):\n\n pool = Pool(args.procs)\n \n for df in dataframes:\n # parse name\n out_file_name = f'{args.output_prefix}_{generate_datetime_str(df.index)}.csv'\n \n out_file_path = path.join(args.output, out_file_name)\n \n if args.verbose: print(f'* exporting data file: {out_file_path}')\n\n pool.apply(write_frame, (df, out_file_path))\n\n pool.close()\n pool.join()",
"def multiple_eval_for_loops_v1():",
"def _doMap(self, func, iterable):\n name = \"Mapper\"\n sys.stderr.write(\"Master[%s phase]: starting\\n\" % name)\n pipes = [mp.Pipe() for _ in range(self.num_workers)]\n proc = [mp.Process(target=spawn_mapper(func), name=name, args=(q,)) for q in pipes]\n for p in proc:\n p.daemon = True\n p.start()\n for output_p, input_p in pipes:\n input_p.close() # we don't need to read from the pipes\n qi = 0\n for item in iterable:\n pipes[qi][0].send(item)\n qi = (qi+1) % self.num_workers\n for q,_ in pipes:\n q.send(None) # add termination tokens\n q.close()\n for p in proc:\n p.join()\n sys.stderr.write(\"Master[%s phase]: ended..\\n\" % name)",
"def process_frame_seq(data, index, functions):\n results_list = []\n failed = False\n for function_id in functions:\n function = function_mapper[function_id]\n result = function(data.slice, functions[function_id])\n results_list.append(result)\n if not result.res:\n failed = True\n\n results = ct.Results(index, failed, results_list)\n return results",
"def apply(func, path, proc=1, only=None):\n peps = get_items(path, only=only)\n total = len(peps)\n if proc < 1:\n proc = os.cpu_count()\n proc = min(total, proc)\n with mp.Pool(proc) as pool:\n return pool.starmap(partial(_apply, func, total), enumerate(peps, 1))",
"def processing_handler(\n datasets: list, load: Callable[[dict], None], cores: int, threads: int\n) -> None:\n\n # Data output\n output = []\n\n # Multi-core processing\n if cores > 1 and len(datasets) > 1:\n\n # Create process pool\n with Pool(cores) as pool:\n\n # Process datasets in pool\n output = pool.starmap(load, datasets)\n\n # Wait for Pool to finish\n pool.close()\n pool.join()\n\n # Multi-thread processing\n elif threads > 1 and len(datasets) > 1:\n\n # Create process pool\n with ThreadPool(threads) as pool:\n\n # Process datasets in pool\n output = pool.starmap(load, datasets)\n\n # Wait for Pool to finish\n pool.close()\n pool.join()\n\n # Single-thread processing\n else:\n\n for dataset in datasets:\n output.append(load(*dataset))\n\n # Remove empty DataFrames\n filtered = list(filter(lambda df: df.index.size > 0, output))\n\n return pd.concat(filtered) if len(filtered) > 0 else output[0]",
"def runStats(df):\n\tpass",
"def store_dfs_in_HANA(df_filenames,table_name,multiprocessing=False):\r\n\r\n for index,df_filename in enumerate(df_filenames):\r\n df = pd.read_csv(df_filename, compression='gzip', header=0, sep=',', quotechar='\"')\r\n del df[\"Unnamed: 0\"]\r\n colnames = list(df.columns.values)\r\n #REMOVE before flight\r\n drop_table_in_HANA(colnames, table_name)\r\n create_table_in_HANA(colnames, table_name)\r\n number_of_parts = math.ceil(len(df.index)/settings['chunksize'])\r\n number_of_parts = settings['num_cores']\r\n\r\n if multiprocessing:\r\n with multiprocessing.Pool(settings['num_cores']) as pool:\r\n pool.imap_unordered(partial(store_partial_df,table_name=table_name), numpy.array_split(df,number_of_parts))\r\n pool.close()\r\n pool.join()\r\n else:\r\n store_partial_df(df, table_name)\r\n\r\n logging.info(\"Finished storing {0} df\".format(index))\r\n\r\n # dont forget to close the connestion otherwise we may run into\r\n # connect issues.\r\n hana.close()",
"def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x",
"def multiple_eval_for_loops_v2():",
"def split_calculation_to_threads(iterable, func, args):\n args_list = []\n batches = list(split_iterable_to_batches(iterable))\n for batch in batches:\n temp = list(args)\n temp.insert(0, batch)\n args_list.append(tuple(temp))\n with Pool(NUM_THREADS) as p:\n results = p.starmap(func, args_list)\n return results",
"def run_alg_parallel(self, text_df, col_names, init_states):\n # type: (DataFrame, ColumnNames, list) -> list\n assert isinstance(init_states, list)\n # return map(lambda sent: self.run_alg(text_df, col_names, sent), init_states)\n if len(init_states) == 1:\n return map(lambda sent: self.run_alg(text_df, col_names, sent), init_states)\n return parmap(lambda sent: self.run_alg(text_df, col_names, sent), init_states, nprocs=3)",
"def run_functions(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n logger.exception(\n f\"[red]Failed running and collecting data for function: {function.__name__}[/red]\"\n )\n logger.error(traceback.format_exc())\n logger.error(f\"[red]{err}[/red]\")\n logger.error(\"Continuing..\")",
"def run_multiprocessing(args, function):\n vcf_fn = args.data_file\n num_processes = args.num_threads\n if num_processes > 1:\n # Split the VCF into chunks\n callset = allel.read_vcf(vcf_fn, fields=[\"variants/CHROM\", \"variants/POS\"])\n pos_list = callset[\"variants/POS\"]\n chroms = callset[\"variants/CHROM\"]\n assert np.all(chroms == chroms[0])\n chrom = str(chroms[0])\n\n def get_chromosome_chunks(lst, num_processes):\n length = len(lst)\n n = math.ceil(length / num_processes)\n chunks = list()\n for index, i in enumerate(range(0, length, n)):\n if index != num_processes - 1:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[i + n])),\n )\n )\n else:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[-1])),\n )\n )\n return chunks\n\n chunks = get_chromosome_chunks(pos_list, num_processes)\n chunks_iter = iter(chunks)\n reports = list()\n completed_files = list()\n with multiprocessing.Pool(processes=num_processes, maxtasksperchild=10) as pool:\n for index, row in enumerate(pool.map(function, chunks_iter)):\n reports.append(row)\n print(\n \"Processed Chunk {}: {} with {} sites added.\".format(\n index, chunks[index][2], row[\"num_sites\"]\n )\n )\n if row[\"num_sites\"] > 0:\n completed_files.append(index)\n else:\n os.remove(args.output_file + str(index) + \"-lock\")\n\n # Combine reports and print\n master_report = reports[0]\n for report in reports[1:]:\n for var_type, val in report.items():\n master_report[var_type] += val\n print(master_report)\n\n # Combine sampledata files\n filenames = completed_files\n all_samples = []\n for name in filenames:\n all_samples.append(tsinfer.load(args.output_file + str(name)))\n os.remove(args.output_file + str(name))\n\n samples = all_samples[0].copy(args.output_file)\n samples.append_sites(*all_samples[1:])\n samples.finalise()\n assert np.all(np.diff(samples.sites_position[:]) > 0)\n\n else:\n raise ValueError"
] | [
"0.64947915",
"0.64410365",
"0.62881696",
"0.61850566",
"0.6010216",
"0.59700817",
"0.5891619",
"0.5734096",
"0.57302344",
"0.56988317",
"0.56210876",
"0.55779684",
"0.55649483",
"0.5563111",
"0.55065495",
"0.54993975",
"0.5448303",
"0.5382057",
"0.53642577",
"0.53566223",
"0.5352934",
"0.53515655",
"0.5274754",
"0.52650857",
"0.52629673",
"0.521658",
"0.52163196",
"0.52162296",
"0.52051795",
"0.51939654"
] | 0.7830786 | 0 |
Series2 is filter. If input signals disagree, no signal is output. If they agree, series1 signal is the output. | def combine_signals(series1: pd.Series, series2: pd.Series) -> pd.Series:
return ((np.sign(series1) == np.sign(series2)) * series1).astype(int, copy=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def same_emitter(track_1, track_2):\n alternate_consistency = False\n start_consistency = False\n start_1_index = 0\n start_2_index = 0\n\n # First of all, check if both tracks use the same frequence to communicate\n freq_consistency = False\n f_1 = track_1.itr_measurement.central_freq_hz\n f_2 = track_2.itr_measurement.central_freq_hz\n if f_1 > 0.99*f_2 and f_1 < 1.01*f_2:\n freq_consistency = True\n\n # Then, check if the bandwidth of both tracks is the same\n bandwidth_consistency = False\n bw_1 = track_1.itr_measurement.bandwidth_hz\n bw_2 = track_2.itr_measurement.bandwidth_hz\n if bw_1 > 0.99*bw_2 and bw_1 < 1.01*bw_2:\n bandwidth_consistency = True\n\n # Is the emission type the same for both tracks ?\n type_consistency = False\n t_1 = track_1.itr_measurement.type\n t_2 = track_2.itr_measurement.type\n if t_1 == t_2:\n type_consistency = True\n\n # If all three criteria above have been fulfilled, check if alternates sequences are similar\n if freq_consistency and type_consistency and bandwidth_consistency:\n # logger.debug(\n # \"\\tFreq and type consistency found : \\n\\t\\t1° Freq - %s - Type - %s \\n\\t\\t2° Freq - %s - Type - %s\" % (f_1, t_1, f_2, t_2))\n alternate_consistency = True\n alternates_1 = track_1.alternates\n alternates_2 = track_2.alternates\n\n alt_duration_1 = [alt.duration_us for alt in alternates_1]\n alt_start_1 = [alt.start.date_ms for alt in alternates_1]\n alt_duration_2 = [alt.duration_us for alt in alternates_2]\n alt_start_2 = [alt.start.date_ms for alt in alternates_2]\n\n # Both tracks may not have been recorded at exactly the same time. Therefore,\n # we only analyse alternates that have finished. Not ongoing alternates.\n n = min(len(alternates_1), len(alternates_2)) - 1\n\n for start_1 in alt_start_1:\n if start_1 in alt_start_2:\n start_1_index = alt_start_1.index(start_1)\n start_2_index = alt_start_2.index(start_1)\n start_consistency = True\n break\n if not start_consistency:\n for start_2 in alt_start_2:\n if start_2 in alt_start_1:\n start_1_index = alt_start_1.index(start_2)\n start_2_index = alt_start_2.index(start_2)\n start_consistency = True\n break\n\n if start_consistency and track_1.itr_measurement.type != 1:\n if start_1_index == 0 or start_2_index == 0:\n start_1_index += 1\n start_2_index += 1\n while start_1_index < len(alt_start_1) and start_2_index < len(alt_start_2):\n # If there is more than a single alternate, we check if the duration of the alternates is consistent\n if alt_duration_1[start_1_index] != alt_duration_2[start_2_index]:\n alternate_consistency = False\n break\n\n # Always check that the start-dates of all alternates are the same.\n if alt_start_1[start_1_index] != alt_start_2[start_2_index]:\n alternate_consistency = False\n break\n\n start_1_index += 1\n start_2_index += 1\n\n # if alternate_consistency:\n # logger.debug(\n # \"\\tBoth tracks are from the same emitter !\")\n bool_response = freq_consistency and bandwidth_consistency and type_consistency and start_consistency and alternate_consistency\n\n track_id = get_track_id(track_1)\n return bool_response, track_id",
"def GetSecondOutput(self):\n return self.__output_signal2",
"def bandpass_filter_raw_plot(data, fs, f1, f2):\n b, a = sp.butter(N=2, Wn=np.array([f1, f2]) / fs * 2, btype='bandpass') # build a bandpass butterworth filter of order 4, with cut-off frequencies 1 and 45\n w, h = sp.freqz(b, a) # compute the frequency response of the filter\n f = w / np.pi * fs / 2\n plt.figure()\n plt.plot(f, 10 * np.log10(abs(h)))\n plt.xlabel('frequency (Hz)')\n plt.ylabel('Magnitude (dB)')\n plt.title('frequency response of butterworth bandpass [1, 45]Hz')\n plt.grid()\n\n data1 = sp.filtfilt(b, a, data)\n return data1",
"def sin_transition2(freq1, freq2, frames, start_idx=0, samplerate=SAMPLERATE):\n t = (start_idx + np.arange(frames)) / samplerate\n t = t.reshape(-1, 1)\n return np.sin(2 * np.pi * (freq1*(t[-1]-t) + freq2*(t-t[0]))/(t[-1]-t[0]) * t).reshape(-1,1)",
"def outliers_solid_tsds():\n timestamp1 = np.arange(np.datetime64(\"2021-01-01\"), np.datetime64(\"2021-02-10\"))\n target1 = [np.sin(i) for i in range(len(timestamp1))]\n target1[10] += 10\n\n timestamp2 = np.arange(np.datetime64(\"2021-01-01\"), np.datetime64(\"2021-02-10\"))\n target2 = [np.sin(i) for i in range(len(timestamp2))]\n target2[8] += 8\n target2[15] = 2\n target2[26] -= 12\n\n df1 = pd.DataFrame({\"timestamp\": timestamp1, \"target\": target1, \"segment\": \"1\"})\n df2 = pd.DataFrame({\"timestamp\": timestamp2, \"target\": target2, \"segment\": \"2\"})\n\n df = pd.concat([df1, df2], ignore_index=True)\n\n df = df.pivot(index=\"timestamp\", columns=\"segment\")\n df = df.reorder_levels([1, 0], axis=1)\n df = df.sort_index(axis=1)\n df.columns.names = [\"segment\", \"feature\"]\n tsds = TSDataset(df, \"1d\")\n return tsds",
"def filtfilt(self, b, a, in_ts=None):\r\n # Switch in the new in_ts:\r\n if in_ts is not None:\r\n data = in_ts.data\r\n Fs = in_ts.sampling_rate\r\n else:\r\n data = self.data\r\n Fs = self.sampling_rate\r\n\r\n #filtfilt only operates channel-by-channel, so we need to loop over the\r\n #channels, if the data is multi-channel data:\r\n if len(data.shape) > 1:\r\n out_data = np.empty(data.shape, dtype=data.dtype)\r\n for i in range(data.shape[0]):\r\n out_data[i] = signal.filtfilt(b, a, data[i])\r\n #Make sure to preserve the DC:\r\n dc = np.mean(data[i])\r\n out_data[i] -= np.mean(out_data[i])\r\n out_data[i] += dc\r\n else:\r\n out_data = signal.filtfilt(b, a, data)\r\n #Make sure to preserve the DC:\r\n dc = np.mean(data)\r\n out_data -= np.mean(out_data)\r\n out_data += dc\r\n\r\n return ts.TimeSeries(out_data,\r\n sampling_rate=Fs,\r\n time_unit=self.time_unit)",
"def _sanity_check_two_series(\n series_1: TimeSeries,\n series_2: TimeSeries,\n):\n\n _assert_timeseries(series_1)\n _assert_timeseries(series_2)\n\n # check if the two inputs time series have the same number of components\n raise_if_not(\n series_1.width == series_2.width,\n \"Series must have the same number of components,\"\n + f\" found {series_1.width} and {series_2.width}.\",\n )\n\n # check if the time intersection between the two inputs time series is not empty\n raise_if_not(\n len(series_1.time_index.intersection(series_2.time_index)) > 0,\n \"Series must have a non-empty intersection timestamps.\",\n )",
"def SetSecondInput(self, input_signal2):\n self.__input_signal2 = input_signal2\n self._changelength()",
"def highpass_sine2(f, fc=1.0, df=0.1):\n filt = np.zeros(len(f))\n filt[np.abs(f) > fc + df/2] = 1\n sel = (np.abs(f) > fc - df/2)*(np.abs(f) < fc + df/2)\n filt[sel] = np.sin(np.pi/2/df*(np.abs(f[sel]) - fc + df/2))**2\n return filt",
"def highpass_filter(self, data, reset=False):\n data = np.asarray(data)\n if self._highpass_sos is not None:\n if self._highpass_state is None or reset:\n self.highpass_filter_reset(data)\n data, self._highpass_state = scipy.signal.sosfilt(\n self._highpass_sos, data, zi=self._highpass_state, axis=0)\n return data\n return data",
"def linear_filter1D(sin, sout, lag=0, debug=0):\n \n assert len(sin) == len(sout), \"Signals must be same length! len(sin)=%d, len(sout)=%d\" % (len(sin), len(sout))\n assert np.sum(np.isnan(sin)) == 0, \"There are NaNs in sin\"\n assert np.sum(np.isnan(sout)) == 0, \"There are NaNs in sout\"\n \n lags = np.asarray(range(-lag, lag+1, 1)) \n corrSinSout = correlation_function(sin, sout, lags, mean_subtract=True, normalize=False)\n corrSinSin = correlation_function(sin, sin, lags, mean_subtract=True, normalize=False)\n corrSoutSout = correlation_function(sout, sout, lags, mean_subtract=True, normalize=False)\n win = hann(2*lag+1)\n\n \n if lag == 0:\n h = corrSinSout/corrSinSin\n fvals = 0\n gf = corrSinSout**2/(corrSinSin*corrSoutSout)\n else:\n # Normalize in the frequency domain\n corrSinSoutF = fft(corrSinSout*win)\n corrSinSinF = fft(corrSinSin*win)\n corrSoutSoutF = fft(corrSoutSout*win)\n hF = corrSinSoutF/np.abs(corrSinSinF)\n gf = np.abs(corrSinSoutF*corrSinSoutF.conj())/(np.abs(corrSinSinF)*np.abs(corrSoutSoutF))\n fvals = fftfreq(len(corrSinSout))\n h = ifft(hF)\n\n# Plots for debugging/analyzing\n if debug: \n # Time domain plots\n plt.figure()\n plt.subplot(141)\n plt.plot(lags, corrSinSout*win)\n plt.title('Cross-Corr')\n plt.subplot(142)\n plt.plot(lags, corrSinSin*win)\n plt.title('Auto-Corr Input')\n plt.subplot(143)\n plt.plot(lags, corrSoutSout*win)\n plt.title('Auto-Corr Output')\n plt.subplot(144)\n plt.plot(lags, h)\n plt.title('Filter')\n \n # Frequency domain plots\n plt.figure()\n fmid = len(fvals)//2\n plt.subplot(131)\n plt.plot(fvals[0:fmid], abs(corrSinSinF[0:fmid]) )\n plt.title('Input Power')\n plt.subplot(132)\n plt.plot(fvals[0:fmid], abs(corrSoutSoutF[0:fmid]) )\n plt.title('Output Power')\n plt.subplot(133)\n plt.plot(fvals[0:fmid], gf[0:fmid])\n plt.title('Coherence')\n \n return h, lags, gf, fvals",
"def sum_series(input,input1=0,input2=1):\n\n if input == 0:\n\n return input1\n\n elif input == 1:\n\n return input2\n\n else:\n return sum_series(input-1,input1,input2) + sum_series(input-2,input1,input2)",
"def lowpass_sine2(f, fc=1.0, df=0.1):\n filt = np.zeros(len(f))\n filt[np.abs(f) < fc - df/2] = 1.0\n sel = (np.abs(f) > fc - df/2)*(np.abs(f) < fc + df/2)\n filt[sel] = np.sin(np.pi/2*(1 - 1/df*(np.abs(f[sel]) - fc + df/2)))**2\n return filt",
"def FilterFXSeries(self):\r\n filtFX=self.data[self.data.columns[0]].tolist()\r\n return filtFX",
"def filter_freq(self, low_freq=None, high_freq=None, axes=None, win_fcn='boxcar'):\n axes = self._get_axes_numbers(axes)\n fdomain = self.fft(axes=axes)\n low_freq = self._cook_args(low_freq, axes)\n high_freq = self._cook_args(high_freq, axes)\n\n if low_freq is None:\n low_freq = [0]*len(axes)\n if high_freq is None:\n high_freq = [self.ts[ax]/2. for ax in axes]\n\n fupper, flower = fdomain.copy(), fdomain.copy()\n for ax in axes:\n fupper = fupper.select(lambda x: x >= 0, axis=ax)\n flower = flower.select(lambda x: x < 0, axis=ax)\n\n fupper = fupper.window(index1=low_freq, index2=high_freq, axes=axes, win_fcn=win_fcn)\n flower = flower.window(index1=-np.array(high_freq), index2=-np.array(low_freq),\n axes=axes, win_fcn=win_fcn)\n fdomain.update(fupper)\n fdomain.update(flower)\n vals = fftshift(fdomain.values, axes=axes)\n ift = ifft2(vals, axes=axes, shape=np.array(self.shape)[axes])\n return Signal2D(np.real(ift), index=self.index, columns=self.columns)",
"def bandpass(filename,f1,f2,Q,wout=True,plot=True):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n b, a = butter(Q,Wn=(f1/sr,f2/sr),btype='bandpass')\n data_filtered=lfilter(b,a,data,axis=0)\n print('Applying FFT...')\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_BP.wav',data_filtered,sr,'PCM_16')\n if plot==True:\n print('Plotting...')\n py.close()\n w, h = freqz(b,a,worN=16384)\n fig, (ax1, ax2) = py.subplots(nrows=2)\n ax1.semilogx(0.5*sr*w/np.pi,abs(h),'k-')\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Rel. Amplitude')\n ax1.grid()\n ax1.set_ylim(0,1.1)\n ax1.set_xlim(1,20000)\n ax2.plot(data,'k-',label='Raw data')\n ax2.plot(data_filtered,'m-',lw=1,label='Filtered data')\n ax2.set_xlim(0,10000)\n ax2.set_ylim(-1,1)\n ax2.set_ylabel('Amplitude (Norm Bits)')\n ax2.set_xlabel('Samples')\n ax2.legend(loc=2,frameon=False,ncol=2)\n py.subplots_adjust(hspace=0.35) \n print('Done!')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.')\n return data_filtered",
"def test_asymmetric_noise_signal(self):\n np.random.seed(0)\n test_ts = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts1 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n ts2 = self.create_ts(length=100 * 24, freq=\"1h\", signal_to_noise_ratio=0)\n\n noise = (np.random.rand(100 * 24) - 0.5) * (np.random.rand(100 * 24) > 2 / 3)\n noise *= noise > 0\n\n # add strictly positive noise to ts1 and strictly negative noise to ts2\n ts1.value += abs(ts1.value * noise)\n ts2.value -= abs(ts2.value * noise)\n\n ts1.value[93 * 24] += 20\n ts1.value[96 * 24] -= 20\n ts2.value[93 * 24] += 20\n ts2.value[96 * 24] -= 20\n\n model = ProphetDetectorModel(score_func=\"z_score\")\n response1 = model.fit_predict(test_ts[90 * 24 :], ts1[: 90 * 24])\n response2 = model.fit_predict(test_ts[90 * 24 :], ts2[: 90 * 24])\n\n self.assertGreater(\n response2.scores.value[3 * 24], response1.scores.value[3 * 24]\n )\n self.assertGreater(\n response2.scores.value[6 * 24], response1.scores.value[6 * 24]\n )",
"def synchronise_signals(in_signal_1, in_signal_2, time_interval = -1, fs = 100):\n\n # signal segmentation\n in_signal_1 = in_signal_1[:time_interval*fs]\n in_signal_2 = in_signal_2[:time_interval*fs]\n\n #in_signal_2 = in_signal_2 - gravitational_filter(in_signal_2, fs)\n in_signal_1 = in_signal_1 * (-1)\n\n #in_signal_1[time_array[0] * fs:time_array[1] * fs] = in_signal_1[time_array[0] * fs:time_array[1] * fs] + 200\n #in_signal_2[time_array[4] * fs:time_array[5] * fs] = in_signal_2[time_array[4] * fs:time_array[5] * fs] + 200\n #in_signal_1[time_array[2] * fs:time_array[3] * fs] = in_signal_1[time_array[2] * fs:time_array[3] * fs] + 200\n #in_signal_2[time_array[6] * fs:time_array[7] * fs] = in_signal_2[time_array[6] * fs:time_array[7] * fs] + 200\n\n\n # signal normalisation\n mean_1, std_1, mean_2, std_2 = [np.mean(in_signal_1), np.std(in_signal_1), np.mean(in_signal_2),\n np.std(in_signal_2)]\n signal_1 = in_signal_1 - mean_1\n signal_1 /= std_1\n signal_2 = in_signal_2 - mean_2\n signal_2 /= std_2\n\n\n # zero padding signals so that they are of same length, this facilitates the calculation because\n # then the delay between both signals can be directly calculated\n # zero padding only if needed\n #if (len(signal_1) != len(signal_2)):\n\n # check which signal has to be zero padded\n # if (len(signal_1) < len(signal_2)):\n\n # pad first signal\n # signal_1 = np.append(signal_1, np.zeros(len(signal_2) - len(signal_1)))\n\n # else:\n\n # pad second signal\n # signal_2 = np.append(signal_2, np.zeros(len(signal_1) - len(signal_2)))\n\n\n N = len(signal_1) + len(signal_2) - 1\n # Calculate the cross-correlation between the two signals.\n #correlation = np.correlate(signal_1, signal_2, 'full')\n f1 = fft(signal_1, N)\n f2 = np.conj(fft(signal_2, N))\n correlation = np.real(ifft(f1 * f2))\n #correlation = fftshift(cc)\n\n\n # calculate tau / shift between both signals\n #tau = int(np.argmax(correlation) - (len(correlation)) / 2)\n tau = np.argmax(correlation)\n print(tau)\n if tau > len(correlation) // 2:\n tau = np.argmax(correlation) - len(correlation)\n print(tau)\n\n # crop signals to original length (removing zero padding)\n #signal_1 = signal_1[:len(in_signal_1)]\n #signal_2 = signal_2[:len(in_signal_2)]\n\n\n # check which signal has to be sliced\n if (tau < 0):\n # tau negative --> second signal lags\n signal_2 = signal_2[np.abs(tau):]\n\n elif (tau > 0):\n # tau positive ---> firs signal lags\n signal_1 = signal_1[np.abs(tau):]\n\n\n # revert signals to orignal scale\n result_signal_1 = signal_1 * std_1 + mean_1\n result_signal_2 = signal_2 * std_2 + mean_2\n\n return tau, result_signal_1, result_signal_2",
"def compare_series(series_a, series_b):\n return {\n 'rmse': ((series_a - series_b) ** 2).mean() ** 0.5,\n 'mbe': (series_b - series_a).mean(),\n 'mae': abs(series_b - series_a).mean(),\n 'rsqr': stats.linregress(series_a, series_b).rvalue ** 2,\n }",
"def raw_signal_even():\n signal_x = np.linspace(0, 2 * np.pi, 1000)\n signal_y = (\n np.sin(10 * signal_x)\n + np.sin(50 * signal_x)\n + np.sin(60 * signal_x)\n + np.sin(100 * signal_x)\n + 2\n )\n return signal_y",
"def raw_signal_odd():\n signal_x = np.linspace(0, 2 * np.pi, 1001)\n signal_y = np.sin(10 * signal_x) + np.sin(50 * signal_x) + 2\n return signal_y",
"def cross_timeseries(series1, series2):\n\n ts_new1 = []\n val_new1 = []\n\n ts_new2 = []\n val_new2 = []\n\n for i in range(len(series1[1])):\n # for j in range(len(series2[1])):\n if series1[1][i] in series2[1]:\n ts_new1.append(series1[1][i])\n val_new1.append(series1[0][i])\n ts_new2.append(series2[1][series2[1].index(series1[1][i])])\n val_new2.append(series2[0][series2[1].index(series1[1][i])])\n\n return [val_new1, ts_new1], [val_new2, ts_new2]",
"def analyze2(ys, freqs, ts):",
"def itkPeakSignalToNoiseRatioCalculatorIF2_cast(*args):\n return _itkPeakSignalToNoiseRatioCalculatorPython.itkPeakSignalToNoiseRatioCalculatorIF2_cast(*args)",
"def test_filtfilt_filter_sinusoid(self):\n T = 1.0\n samples = 1000\n\n waveform_k0 = get_sinusoid(frequency=5, sample_rate=samples // T, dtype=self.dtype, device=self.device).squeeze(\n 0\n )\n waveform_k1 = get_sinusoid(\n frequency=200,\n sample_rate=samples // T,\n dtype=self.dtype,\n device=self.device,\n ).squeeze(0)\n waveform = waveform_k0 + waveform_k1\n\n # Transfer function numerator and denominator polynomial coefficients\n # corresponding to 8th-order Butterworth filter with 100-cycle/T cutoff.\n # Generated with\n # >>> from scipy import signal\n # >>> b_coeffs, a_coeffs = signal.butter(8, 0.2)\n b_coeffs = torch.tensor(\n [\n 2.39596441e-05,\n 1.91677153e-04,\n 6.70870035e-04,\n 1.34174007e-03,\n 1.67717509e-03,\n 1.34174007e-03,\n 6.70870035e-04,\n 1.91677153e-04,\n 2.39596441e-05,\n ],\n dtype=self.dtype,\n device=self.device,\n )\n a_coeffs = torch.tensor(\n [\n 1.0,\n -4.78451489,\n 10.44504107,\n -13.45771989,\n 11.12933104,\n -6.0252604,\n 2.0792738,\n -0.41721716,\n 0.0372001,\n ],\n dtype=self.dtype,\n device=self.device,\n )\n\n # Extend waveform in each direction, preserving periodicity.\n padded_waveform = torch.cat((waveform[:-1], waveform, waveform[1:]))\n\n output_waveform = F.filtfilt(padded_waveform, a_coeffs, b_coeffs)\n\n # Remove padding from output waveform; confirm that result\n # closely matches waveform_k0.\n self.assertEqual(\n output_waveform[samples - 1 : 2 * samples - 1],\n waveform_k0,\n atol=1e-3,\n rtol=1e-3,\n )",
"def waveform_loss(self, y0, y1):\n assert (self.sess is not None) and (not self.sess._closed)\n feed_dict={self.tensor_wave0: y0, self.tensor_wave1: y1}\n return self.sess.run(self.loss_waveform, feed_dict=feed_dict)",
"def facet_show_other_series(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"facet_show_other_series\")",
"def facet_show_other_series(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"facet_show_other_series\")",
"def facet_show_other_series(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"facet_show_other_series\")",
"def facet_show_other_series(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"facet_show_other_series\")"
] | [
"0.5420677",
"0.53641653",
"0.5269363",
"0.5264427",
"0.525368",
"0.5248671",
"0.5222031",
"0.5170227",
"0.5146615",
"0.506948",
"0.50103337",
"0.4943461",
"0.49222016",
"0.4921278",
"0.4906378",
"0.48961374",
"0.4856847",
"0.48558632",
"0.48525333",
"0.48479262",
"0.48258182",
"0.47895092",
"0.47800702",
"0.47707686",
"0.47622764",
"0.47525835",
"0.47497135",
"0.47497135",
"0.47497135",
"0.47497135"
] | 0.66925484 | 0 |
Generate blips only at points where indicator goes above/below threshold. | def crosser(ind: pd.Series, threshold: float) -> pd.Series:
df = pd.DataFrame({"ind": ind})
df["above_below"] = (df["ind"] >= threshold) * 1 - (df["ind"] < threshold) * 1
df["blip"] = ((df["above_below"].shift() + df["above_below"]) == 0) * df[
"above_below"
]
df = df.dropna()
return df["blip"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def range_blip(\n indicator: pd.Series,\n threshold: float = 0,\n inout: Literal[\"inside\", \"outside\"] = \"inside\",\n) -> pd.Series:\n\n indicator = indicator.dropna()\n\n r = inout_range(indicator, threshold, inout)\n return _signed_range_entry(_range_entry(r), indicator)",
"def _thresh_clip(self, xmin, ymin, zmin, xmax, ymax, zmax):\n\n for p in self.points:\n if p.y > ymax or p.y < ymin:\n print p, 1\n self.raster = False\n break\n elif p.x > xmax or p.x < xmin:\n print p, 2\n self.raster = False\n break\n elif p.z > zmax or p.z < zmin:\n print p, 3\n self.raster = False\n break",
"def pred_from_prob(a,threshold):\n bin_preds = np.zeros((np.size(a,0),))\n bin_preds[np.where(a[:,1]>threshold)]=1.0\n return bin_preds",
"def binarize(X, *, threshold=..., copy=...):\n ...",
"def get_binarized_and_belief(array, threshold=0.5):\r\n \r\n # check assumption above\r\n if (np.amax(array) > 1.0) or (np.amin(array) < 0.0):\r\n raise ValueError('Voxel value fed to lambda in converting to original labels was out of range.')\r\n \r\n # obtain binarized output\r\n binarized = binarize(array=array, threshold=threshold)\r\n \r\n # we will sort from least to greatest, so least suspicion is what we will believe\r\n raw_suspicion = np.absolute(array - binarized)\r\n \r\n belief = np.argsort(raw_suspicion, axis=-1)\r\n \r\n return binarized, belief",
"def para_lower_than(threshold):\n\n return lambda step, curr_obj, curr_optimized_obj, extra_para: extra_para<threshold",
"def bin_thresh(img: np.ndarray, thresh: Number) -> np.ndarray:\n res = img >= thresh\n return res",
"def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre_bboxes[i][1])\n (pre_x2, pre_y2) = (pre_bboxes[i][2], pre_bboxes[i][3])\n (cur_x1, cur_y1) = (bboxes[j][0], bboxes[j][1])\n (cur_x2, cur_y2) = (bboxes[j][2], bboxes[j][3])\n origin_w = pre_x2 - pre_x1\n origin_h = pre_y2 - pre_y1\n current_w = cur_x2 - cur_x1\n current_h = cur_y2 - cur_y1\n prime_area = origin_h * origin_w\n current_area = current_h*current_w\n\n if pre_x1 > cur_x1:\n if pre_y1 > cur_y1:\n if cur_x2 - pre_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = cur_y2 - pre_y1\n if width > origin_w:\n width = origin_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n\n else:\n if cur_x2 - pre_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = pre_y2 - cur_y1\n if width > origin_w:\n width = origin_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n else:\n if pre_y1 > cur_y1:\n if pre_x2 - cur_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = cur_y2 - pre_y1\n if width > current_w:\n width = current_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n else:\n if pre_x2 - cur_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = pre_y2 - cur_y1\n if width > current_w:\n width = current_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n\n if lap_area != 0:\n sum_area = (prime_area + current_area - lap_area)\n iou_score = lap_area/sum_area\n if iou_score > max_threshold: # set the threshold of the iou scores, in line with the sort\n max_flag = 1\n elif iou_score > min_threshold:\n min_flag = 1\n\n if max_flag == 1 or min_flag == 0:\n all_boxes.append(pre_bboxes[i])\n\n if cnt_box != []:\n for index_box in range(cnt_box.shape[0]):\n all_boxes.append(cnt_box[index_box])\n\n return np.asarray(all_boxes)",
"def isolate_burned_pixels(array, upper, lower):\n not_burned = numpy.logical_or(array <= lower,\n array >= upper)\n array[not_burned] = 0\n return array",
"def bin_data(y, num_bins, std_away):\n mean = np.mean(y)\n std = np.std(y)\n pitch_shifts = np.arange(-num_bins, num_bins + 1)\n thresholds = (std * std_away) * pitch_shifts + mean\n\n result = []\n for point in y:\n if point < thresholds[0]:\n result.append(pitch_shifts[0] - 1)\n elif point > thresholds[-1]:\n result.append(pitch_shifts[-1] + 1)\n else:\n for i in range(len(thresholds) - 1):\n if point >= thresholds[i] and point < thresholds[i + 1]:\n result.append(i - num_bins)\n return np.array(result)",
"def get_regions_above_threshold(self, threshold, values):\n\n xlocs = arange(0, len(values))\n\n # finds all turns, between above and below threshold\n # and generate areas to call peaks in, also\n # makes sure starting and stopping above maxima is caught\n # threshold is at or equal to values, need to correct this\n starts = xlocs[r_[True, diff(values >= threshold)] & (values >= threshold)]\n stops = xlocs[r_[diff(values >= threshold), True] & (values >= threshold)]\n # add to fix off by one bug\n stops += + 1\n\n # error correction incase my logic is wrong here, assuming that starts\n # and stops are always paired, and the only two cases of not being\n # pared are if the spline starts above the cutoff or the spline starts\n # below the cutoff\n assert len(starts) == len(stops)\n\n ### important note: for getting values x->y [inclusive]\n # you must index an array as ar[x:(y+1)]|\n # or else you end up with one-too-few values, the second\n # index is non-inclusive\n\n # gets all local minima, function taken from:\n # http://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array\n # Can't have local minima at start or end, that would get caught by\n # previous check, really need to think about that more\n\n local_minima = self.find_local_minima(values)\n\n # append to list any local minima above threshold\n for i, minima in enumerate(local_minima):\n if minima and values[i] >= threshold:\n starts = append(starts, i)\n stops = append(stops, i)\n\n starts = array(sorted(set(starts)))\n stops = array(sorted(set(stops)))\n starts_and_stops = []\n\n # making sure we aren't in some strange state\n assert len(starts) == len(stops)\n\n # get all contigous start and stops pairs\n while len(starts) > 0:\n stop_list = stops[stops > starts[0]]\n\n # if there are no more stops left exit the loop and return the\n # currently found starts and stops\n if len(stop_list) == 0:\n break\n stop = stop_list[0]\n starts_and_stops.append((starts[0], stop))\n starts = starts[starts >= stop]\n\n starts = array([x[0] for x in starts_and_stops])\n stops = array([x[1] for x in starts_and_stops])\n return starts_and_stops, starts, stops",
"def get_segments(weights, threshold):\n marker_list = [True if i >= threshold else False for i in weights]\n i = 0\n final_pairs = []\n while i < len(weights):\n if marker_list[i]:\n start = i\n while i < len(weights) and marker_list[i]:\n i = i + 1\n end = i - 1\n if end-start > 1:\n final_pairs.append(start)\n final_pairs.append(end)\n i = i + 1\n return np.array(final_pairs)",
"def bout_detect(raw_motion_data):\n import numpy as np\n window_length = 50 # Most bouts last much lesser than 20 frames,\n # so this is a really safe window to search for bouts.\n nonzero_indices = np.flatnonzero(raw_motion_data)\n all_bout_indices = [] # A list of all the indices that belong to all bouts.\n bout_indices = [] # A list of tuples of bout indices.\n bouts = [] # A list of bouts, stored as tuples.\n # Run through the list of nonzero indices and look for the first zero value\n # after it.\n do_not_append = False\n for i in nonzero_indices:\n if i not in all_bout_indices:\n bout_start = i # Bout starting point\n try:\n first_zero_after_start = np.flatnonzero(\n raw_motion_data[bout_start:bout_start+window_length+1] == 0)[0]\n except IndexError:\n try:\n first_zero_after_start = np.flatnonzero(\n raw_motion_data[bout_start:len(raw_motion_data)] == 0)[0]\n except IndexError:\n first_zero_after_start = len(raw_motion_data)\n do_not_append = True\n if not do_not_append:\n # Adjusted to start and end at 0\n bout = tuple(raw_motion_data[bout_start-1:bout_start+first_zero_after_start+1])\n bout_inds = tuple(range(bout_start-1, bout_start+first_zero_after_start+1))\n bout_indices.append(bout_inds)\n bouts.append(bout)\n for j in range(bout_start, bout_start+first_zero_after_start):\n all_bout_indices.append(j)\n\n # Filter out the bouts list to remove tuples which are smaller than 5 entries long.\n \"\"\"This is not really needed, because the fish can make some tiny movements\n that need to be detected and counted as bouts. In any case, the decision for this\n can be made much later. It is not really important to figure this out right now.\"\"\"\n # bouts[:] = [tup for tup in bouts if len(tup) > 5]\n # bout_indices[:] = [x for x in bout_indices if len(x) > 5]\n return bouts, bout_indices",
"def overlay_thresholding_function(threshold, positive=True):\n # from the interface class definition above, there will be 3 values\n # for the thresh type: inactive, less than, greater than\n t = threshold[0]\n if threshold[-1] == 'inactive':\n if positive:\n return lambda x: np.ones(x.shape, 'B')\n return lambda x: np.zeros(x.shape, 'B')\n elif threshold[-1] == 'less than':\n if positive:\n return lambda x: np.less(x,t)\n return lambda x: np.greater_equal(x,t)\n elif threshold[-1] == 'greater than':\n if positive:\n return lambda x: np.greater(x,t)\n return lambda x: np.less_equal(x,t)\n else:\n print 'unrecognized thresholding parameters:', threshold",
"def binarize(self, image, threshold):\n\n bin_img = image.copy()\n [h, w] = bin_img.shape\n opt_threshold = threshold\n print(opt_threshold)\n for row in range(h):\n for col in range(w):\n if bin_img[row, col] > opt_threshold: #greater than threshld white(general)\n bin_img[row, col] = 255 #0 instead of 1\n else: #less than threshold black(general)\n bin_img[row, col] = 0 #0 instead of 1\n\n\n #reverse the cases\n\n return bin_img",
"def cut_bonds(BL, xy, thres):\n i2cut = (xy[BL[:, 0], 0] - xy[BL[:, 1], 0]) ** 2 + (xy[BL[:, 0], 1] - xy[BL[:, 1], 1]) ** 2 < thres ** 2\n BLtrim = BL[i2cut]\n return BLtrim",
"def binarize(array, threshold=0.5):\r\n \r\n if (np.amax(array) > 1.0) or (np.amin(array) < 0.0):\r\n raise ValueError('Voxel value fed to lambda in converting to original labels was out of range.')\r\n \r\n # obtain binarized output\r\n binarized = array.copy()\r\n zero_mask = (binarized <= threshold)\r\n binarized[zero_mask] = 0.0\r\n binarized[~zero_mask] = 1.0\r\n \r\n return binarized",
"def filter_blinks(self):\n true_saccades = []\n for i, s in self.saccades.iterrows():\n blink = ((self.blinks.start > s.start)\n & (self.blinks.end < s.end)\n ).any()\n\n if blink:\n self.samples.loc[s.start:s.end, [\"x\", \"y\"]] = np.nan\n else:\n true_saccades.append(i)\n\n self.saccades = self.saccades.loc[true_saccades].reset_index(drop=True)\n\n return self",
"def boundary(gap, min_tags_in_window, average):\n\tassert min_tags_in_window >= 1;\n\ttemp = 0;\n\tfor i in range(0, min_tags_in_window): temp += poisson(i, average);\n\ttemp = pow(temp, gap+1); \n\treturn temp*temp; # start & end ",
"def check_bonuses(self, hit_index):\n result = []\n if hit_index >= 1 and self.balls[hit_index - 1].type < 0:\n result.append(hit_index - 1)\n if hit_index < len(self.balls) - 1 and \\\n self.balls[hit_index + 1].type < 0:\n result.append(hit_index + 1)\n return result",
"def threshold_mask(mask, threshold=0.5):\n mask[np.where(mask >= threshold)] = 1.\n mask[np.where(mask < threshold)] = 0.\n return mask",
"def MakeVenetianBlinds(self):\r\n\r\n amount = 128\r\n size = self.GetClientSize()\r\n region = wx.Region(0, 0, size.x, 1)\r\n\r\n for y in xrange(size.y):\r\n\r\n # Reverse the order of the bottom 4 bits\r\n j = (y & 8 and [1] or [0])[0] | (y & 4 and [2] or [0])[0] | \\\r\n (y & 2 and [4] or [0])[0] | (y & 1 and [8] or [0])[0]\r\n \r\n if 16*j+8 < amount:\r\n region.Union(0, y, size.x, 1)\r\n \r\n self.SetShape(region)",
"def binarize(self, image, threshold):\n bin_img = image.copy()\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n if image[i, j] >= threshold:\n bin_img[i, j] = 0\n else:\n bin_img[i, j] = 255\n return bin_img",
"def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))",
"def zeroCrossingNegSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\ttry:\r\n\t\t\tsplit_min_index = np.argmin(self.splitData)\r\n\t\t\tsplit_max_index = np.argmax(self.splitData[0:split_min_index])\r\n\t\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\t\tsplit_min_value = self.splitData[split_min_index]\r\n\t\texcept:\r\n\t\t\tzero_crossing = -3\r\n\t\t\treturn zero_crossing\r\n\t\t#print 'split',\tsplit_min_index, (self.splitBaseline-split_min_value), split_max_index,(split_max_value-self.splitBaseline)\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >= evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_max_index, split_min_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] > self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\tif self.splitData[index] <= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1\r\n\t\t\r\n\t\telse: \r\n\t\t\tzero_crossing = -2\r\n\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def threshold(X, thresh):\n Y = np.array(X)\n Y[Y >= thresh] = 1\n Y[Y < thresh] = 0\n return Y",
"def zeroCrossingPosSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData[0:split_max_index])\r\n\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\tsplit_min_value = self.splitData[split_min_index]\r\n\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >=evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_min_index, split_max_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] < self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\tif self.splitData[index] >= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1 \r\n\t\t\t\t\r\n\t\telse:\r\n\t\t\tzero_crossing = -2 \r\n\t\t\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def iron_threshold(self, threshold=0.02):\n portfolio = self.copy()\n for yesterday, today in zip(self.index[:-2], self.index[1:-1]):\n if (portfolio.weights.loc[today] - portfolio.weights.loc[yesterday]).abs().max() <= threshold:\n portfolio.forward(today, yesterday=yesterday)\n\n return portfolio",
"def get_predict(prediction, threshold):\n\n prediction[prediction < threshold] = 0\n prediction[prediction >= threshold] = 1\n \n return prediction",
"def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms"
] | [
"0.6095413",
"0.5878129",
"0.5754887",
"0.5733245",
"0.570352",
"0.56719947",
"0.5633101",
"0.5514651",
"0.5503244",
"0.5477215",
"0.54693437",
"0.54594445",
"0.543474",
"0.5420535",
"0.5395379",
"0.5358498",
"0.53468525",
"0.53458107",
"0.5283447",
"0.52825373",
"0.5280514",
"0.5277893",
"0.52770096",
"0.5260791",
"0.5260625",
"0.5210438",
"0.52012104",
"0.51626265",
"0.5155918",
"0.515196"
] | 0.71472293 | 0 |
Blips should not be propagated. If any column name contains word 'blip' warn user that they may be making a mistake. | def warn_blip(columns: Union[Set[str], List[str]]) -> None:
blip_columns = [b for b in columns if "blip" in b]
if blip_columns:
print(f"Warning: blip is being propagated: {blip_columns}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __warn_missing_col(col_name, action):\n msg = (\"Skipping {} values for {!r}: Unable to find column \"\n \"in hybrid meta. Did you forget to prefix with \"\n \"{!r} or {!r}? \")\n w = msg.format(action, col_name, SOLAR_PREFIX, WIND_PREFIX)\n logger.warning(w)\n warn(w, InputWarning)",
"def _suppress(self, key):\n return key in self.SUPPRESS",
"def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns",
"def violated(self) -> bool:\n ...",
"def is_blip(self) -> bool:\n return self.proto.is_blip",
"def _force_drop(self, msg) -> bool:\n return self.ALWAYSDROP_TEXT in msg",
"def is_ignored(self):",
"def warnings(self, d):\n\n if d['filter_nu'] == 220e9:\n if d['beam_shape'] == 'gaussian':\n warnings.warn('The nu dependency of the gausian beam FWHM '\n 'is not a good approximation in the 220 GHz band.')\n elif d['beam_shape'] == 'fitted_beam':\n warnings.warn('Beam and solid angle frequency dependence implementation '\n 'in the 220 GHz band for the fitted beam does not correctly describe '\n 'the true behavior')",
"def bad_column_positions(self, x):\n return x.is_null()",
"def bad(self):\n raise NotImplementedError",
"def bad(self):\n raise NotImplementedError",
"def warning(self, warning):\n pass",
"def ignored_columns(self):\n return self._parms.get(\"ignored_columns\")",
"def negations(self) -> str:",
"def warning(self, *args, **kwargs):",
"def _allowed_delta_reasons(self, conn, tblname):\n with conn.cursor() as cursor:\n cursor.execute(\"\"\"SELECT consrc\n FROM pg_constraint\n WHERE conrelid = %s::regclass\n AND consrc LIKE '(delta_reason%%'\"\"\",\n [tblname])\n result = cursor.fetchall()\n assert len(result) == 1\n constraint_src = result[0].consrc\n constraint_array_vals = re.sub(r'.*ARRAY\\[(.*)\\].*', r'\\1', constraint_src)\n split_array_vals = constraint_array_vals.split(',')\n return [re.sub(r\"\\s*.*\\'(.*)\\'::text\", r'\\1', v) for v in split_array_vals]",
"def test_columns_not_in_raw_var(self):\n\n self.validator.adata.raw = self.validator.adata\n self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\"ERROR: Column 'feature_is_filtered' must not be present in 'raw.var'.\"],\n )",
"def _should_ignore(self, name):\n _name = name.lower()\n return (_name.startswith(\"deprecated\") or\n _name.startswith(\"_\") or\n _name in (\"remote\", \"reserved\",\n \"dialogs_py\", \"dialogs_ipy\", \"dialogs_jy\"))",
"def _resluggable_column(self, column, labels_to_slugs, dframe):\n return (column in labels_to_slugs.keys() and (\n not column in labels_to_slugs.values() or (\n labels_to_slugs[column] != column and\n labels_to_slugs[column] not in dframe.columns)))",
"def warn():\n pass",
"def clean_table(self):\n return False",
"def _must_skip(self):\n if not self.magento_record :\n return \"Product attribute can not imported because it is not importable.\"\n apply_to = self.magento_record.get('apply_to')\n if apply_to and len(apply_to) > 0 and 'simple' not in apply_to:\n return \"Product attribute can not imported because it not for simple product.\"\n return",
"def checkIfColumnControlledVocab(self, column_name):\n try:\n con = self.getMetadataDatabaseConnection()\n valid_controlled_column=0\n db_output=con.cursor().callproc('check_if_column_controlled',\n [column_name.upper(),\\\n valid_controlled_column])\n if db_output[1]==0:\n return False\n else:\n return True\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False",
"def warning(self, msg, transfers):\n self.validation_exceptions.extend(self._create_exceptions(msg, transfers, ValidationType.WARNING))",
"def verify(self):\n for col in self.columns:\n if col not in self.table_obj.columns.keys():\n raise Exception('{} column not found in {}'.format(\n col, self.table_obj))",
"def __str__(self):\n return \"IgnoreWhite(%s)\" % str(self.__rule)",
"def warning(self, msg, *args, **kwargs):\n pass",
"def _fix_bias(self, op, attrs, num_inputs):\n if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]:\n return attrs\n if num_inputs == 3:\n attrs['no_bias'] = False\n elif num_inputs == 2:\n attrs['no_bias'] = True\n else:\n raise ValueError(\"Unexpected number of inputs for: {}\".format(op))\n return attrs",
"def strip_warnings(self, line):\n if line[0] == \"|\":\n return \"\"\n else:\n return line",
"def migrate_hidden_warnings_to_notes(apps, schema_editor):\n Infraction = apps.get_model('api', 'Infraction')\n\n for infraction in Infraction.objects.filter(type=\"warning\", hidden=True):\n infraction.type = \"note\"\n infraction.save()"
] | [
"0.59633464",
"0.5629486",
"0.5324889",
"0.52932507",
"0.5251429",
"0.5170503",
"0.5128109",
"0.51264954",
"0.51006246",
"0.50926596",
"0.50926596",
"0.5080148",
"0.5069726",
"0.50693744",
"0.50359195",
"0.50021553",
"0.5000355",
"0.49801546",
"0.4968183",
"0.49676052",
"0.49370918",
"0.49049893",
"0.48953333",
"0.48953265",
"0.48901004",
"0.48897135",
"0.48601422",
"0.4854011",
"0.48162004",
"0.4804034"
] | 0.82263887 | 0 |
Before joining the two dataframes, ensure that they are correctly aligned. Signal and blip should be shifted on dfg BEFORE upsampling. | def join(
df: pd.DataFrame,
dfg: pd.DataFrame,
upsampled_columns: List[str],
label: Literal["left", "right"],
) -> pd.DataFrame:
if label == "left":
joined_df = df.join(dfg[upsampled_columns])
elif label == "right":
dfg = dfg.shift(-1)
joined_df = df.join(dfg[upsampled_columns])
joined_df[upsampled_columns] = joined_df[upsampled_columns].shift(1)
else:
raise ValueError(f"label must be 'left' or 'right', '{label}' given")
return joined_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upsample(\n df: pd.DataFrame,\n dfg: pd.DataFrame,\n *,\n label: Literal[\"left\", \"right\"] = \"left\",\n keep: Optional[Union[str, Sequence[str]]] = None,\n propagate: Optional[Union[str, Sequence[str]]] = None,\n) -> pd.DataFrame:\n\n def warn_blip(columns: Union[Set[str], List[str]]) -> None:\n \"\"\"Blips should not be propagated. If any column name contains\n word 'blip' warn user that they may be making a mistake.\"\"\"\n blip_columns = [b for b in columns if \"blip\" in b]\n if blip_columns:\n print(f\"Warning: blip is being propagated: {blip_columns}\")\n\n def verify(data: Union[Sequence[str], str, Set]) -> List[str]:\n if isinstance(data, (str, int, float)):\n data = [\n data,\n ]\n data = set(data) # ensure no double entries inserted by user\n if not data.issubset(upsampled_columns):\n raise ValueError(\n f\"Cannot upsample: {list(data.difference(upsampled_columns))} \"\n f\"- not in columns.\"\n )\n return list(data)\n\n def join(\n df: pd.DataFrame,\n dfg: pd.DataFrame,\n upsampled_columns: List[str],\n label: Literal[\"left\", \"right\"],\n ) -> pd.DataFrame:\n \"\"\"\n Before joining the two dataframes, ensure that they are\n correctly aligned. Signal and blip should be shifted on dfg\n BEFORE upsampling.\n\n \"\"\"\n if label == \"left\":\n joined_df = df.join(dfg[upsampled_columns])\n elif label == \"right\":\n dfg = dfg.shift(-1)\n joined_df = df.join(dfg[upsampled_columns])\n joined_df[upsampled_columns] = joined_df[upsampled_columns].shift(1)\n else:\n raise ValueError(f\"label must be 'left' or 'right', '{label}' given\")\n\n return joined_df\n\n upsampled_columns = list(set(dfg.columns) - set(df.columns))\n # preserve types to be able to cast back into them\n types = dfg[upsampled_columns].dtypes.to_dict()\n\n # ffill and subsequent dropnas depend on dfg not having n/a's\n if len(dfg[dfg.isna().any(axis=1)]) != 0:\n raise ValueError(\"Lower frequency dataframe (dfg) must not have n/a values.\")\n\n joined_df = join(df, dfg, upsampled_columns, label)\n\n if not (keep or propagate):\n warn_blip(upsampled_columns)\n return joined_df.ffill().dropna()\n elif keep and propagate:\n keep = verify(keep)\n propagate = verify(propagate)\n assert not set(keep).intersection(\n propagate\n ), \"Columns in keep and propagate must not overlap.\"\n propagate.extend(list(set(upsampled_columns) - set(keep) - set(propagate)))\n else:\n if keep:\n keep = verify(keep)\n propagate = list(set(upsampled_columns) - set(keep))\n else:\n assert propagate is not None\n propagate = verify(propagate)\n keep = list(set(upsampled_columns) - set(propagate))\n joined_df[keep] = joined_df[keep].fillna(0)\n joined_df[propagate] = joined_df[propagate].ffill()\n warn_blip(propagate)\n return joined_df.dropna().astype(types) # type: ignore",
"def test_two_pd_alignment():\n out_dir = _TempDir()\n raw, _, events, _ = pd_parser.simulate_pd_data(prop_corrupted=0.)\n fname = op.join(out_dir, 'test-raw.fif')\n raw.save(fname)\n events2 = events[::2]\n events3 = events[1:][::2]\n # make behavior data\n np.random.seed(12)\n beh_events2 = events2[:, 0].astype(float) / raw.info['sfreq']\n offsets2 = np.random.random(len(beh_events2)) * 0.05 - 0.025\n beh_events2 += offsets2\n # make next one\n beh_events3 = events3[:, 0].astype(float) / raw.info['sfreq']\n offsets3 = np.random.random(len(beh_events3)) * 0.05 - 0.025\n beh_events3 += offsets3\n n_na = abs(len(beh_events2) - len(beh_events3))\n if len(beh_events2) > len(beh_events3):\n beh_events3 = list(beh_events3) + ['n/a'] * n_na\n elif len(beh_events3) > len(beh_events2):\n beh_events2 = list(beh_events2) + ['n/a'] * n_na\n beh = dict(trial=np.arange(len(beh_events2)),\n fix_onset_time=beh_events2,\n response_onset_time=beh_events3)\n behf = op.join(out_dir, 'behf-test.tsv')\n _to_tsv(behf, beh)\n pd_parser.parse_pd(fname, pd_event_name='Fixation', beh=beh,\n pd_ch_names=['pd'], beh_key='fix_onset_time',\n zscore=20, exclude_shift=0.05)\n pd_parser.parse_pd(fname, pd_event_name='Response', beh=beh,\n pd_ch_names=['pd'], beh_key='response_onset_time',\n zscore=20, add_events=True, exclude_shift=0.05)\n raw = _read_raw(fname)\n annot, pd_ch_names, beh2 = _load_data(raw)\n raw.set_annotations(annot)\n events4, event_id = mne.events_from_annotations(raw)\n np.testing.assert_array_equal(events4[events4[:, 2] == 1, 0],\n events2[:, 0])\n np.testing.assert_array_equal(events4[events4[:, 2] == 2, 0],\n events3[:, 0])\n assert pd_ch_names == ['pd']\n np.testing.assert_array_equal(beh2['pd_parser_sample'], events2[:, 0])",
"def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...",
"def precheck_align(a_mat, b_mat, a_cast, b_cast):\n\n # cast to DataFrame in case either is a Series\n a_mat = pd.DataFrame(a_mat)\n b_mat = pd.DataFrame(b_mat)\n\n # drop samples with all missing values\n a_mat = a_mat.dropna(how=\"all\", axis=0)\n b_mat = b_mat.dropna(how=\"all\", axis=0)\n\n # align samples\n a_mat, b_mat = a_mat.align(b_mat, axis=0, join=\"inner\")\n\n # check sample sizes\n num_samples = a_mat.shape[0] # number of samples for each variable\n if num_samples < 2:\n raise ValueError(\"x and y must have length at least 2.\")\n\n return a_mat, b_mat",
"def test_concatenate(self):\n header = BDFHeader.from_path(TestData.bdf_256)\n header2 = BDFHeader.from_path(TestData.bdf_256)\n assert header.nb_data_records == 60\n assert header.data_duration == 1\n assert header2.nb_data_records == 60\n assert header2.data_duration == 1\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header\n header.concatenate(header2)\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header\n assert header.nb_data_records == 120\n assert header.data_duration == 2\n assert header2.nb_data_records == 60\n assert header2.data_duration == 1\n header2.max_dimensions = [99999999] * header2.nb_channels\n header2.min_dimensions = [-9999999] * header2.nb_channels\n header2.max_digital = [99999999] * header2.nb_channels\n header2.min_digital = [-9999999] * header2.nb_channels\n header.concatenate(header2)\n assert header.nb_data_records == 180\n assert header.data_duration == 3\n assert header.max_dimensions == [99999999] * header2.nb_channels\n assert header.min_dimensions == [-9999999] * header2.nb_channels\n assert header.max_digital == [99999999] * header2.nb_channels\n assert header.min_digital == [-9999999] * header2.nb_channels\n assert (header.nb_channels + 1) * 256 == header.bytes_in_header",
"def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)",
"def dummy_join_fastq(inputs, outputs, log_files, batch_size=10000, gap=20, **kwargs):\n if gap >= 0:\n gap_seq = \"N\" * gap\n gap_qual = [0] * gap\n\n counts = {\n \"join_count\": 0,\n \"fwd_count\": 0,\n \"rev_count\": 0,\n \"total_joined\": 0,\n \"total_written\": 0,\n }\n faked_joins = []\n\n fwd_records = SeqIO.parse(get_file_name(inputs.fwd), \"fastq\")\n rev_records = SeqIO.parse(get_file_name(inputs.rev), \"fastq\")\n with open(get_file_name(outputs), \"w\") as out_fastq_stream:\n for frec, rrec in merge_record_iters(fwd_records, rev_records, **kwargs):\n # join seqs\n new_records = []\n if frec is None and rrec is None:\n logger.warning(\"Both ends missing from input\") # this shouldn't\n continue\n if frec is None:\n logger.debug(\"Forward seq trimmed to oblivion\")\n new_records.append(rev_comp_rec(rrec, qual=True, suffix=\".rev\"))\n counts[\"rev_count\"] += 1\n elif rrec is None:\n logger.debug(\"Reverse seq trimmed to oblivion\")\n new_records.append(frec)\n counts[\"fwd_count\"] += 1\n elif gap >= 0:\n counts[\"join_count\"] += 1\n # join sequence\n new_seq = (\n frec.seq\n + Seq.Seq(gap_seq, frec.seq.alphabet)\n + rrec.seq.reverse_complement()\n )\n new_record = SeqRecord.SeqRecord(\n new_seq, id=frec.id, name=frec.name, description=\"Faked join\"\n )\n # join quality\n new_record.letter_annotations[\"phred_quality\"] = (\n frec.letter_annotations[\"phred_quality\"]\n + gap_qual\n + list(reversed(rrec.letter_annotations[\"phred_quality\"]))\n )\n new_records.append(new_record)\n else:\n # gap < 0 means don't join...add separately\n new_records.append(frec)\n new_records.append(rev_comp_rec(rrec, qual=True, suffix=\".rev\"))\n\n faked_joins.extend(new_records)\n if len(faked_joins) >= batch_size:\n n_written = SeqIO.write(faked_joins, out_fastq_stream, format=\"fastq\")\n if n_written != len(faked_joins):\n logger.warning(\n \"Only %d of %d faked joins written!\"\n % (n_written, len(faked_joins))\n )\n counts[\"total_joined\"] += len(faked_joins)\n counts[\"total_written\"] += n_written\n del faked_joins[:]\n\n # at end of loop, write remaining cached records\n n_written = SeqIO.write(faked_joins, out_fastq_stream, format=\"fastq\")\n if n_written != len(faked_joins):\n logger.warning(\n \"Only %d of %d faked joins written!\" % (n_written, len(faked_joins))\n )\n counts[\"total_joined\"] += len(faked_joins)\n counts[\"total_written\"] += n_written\n\n # Report some counts\n msg = \"\"\"\n#======================\n# Faked joins\n# Total written: {total_written} of {total_joined}\n# Dummy Joins: {join_count}\n# FwdOnly: {fwd_count}\n# RevOnly: {rev_count}\n#======================\n\"\"\".format(\n **counts\n )\n\n with open(log_files[0], \"a\") as log_out_stream:\n log_out_stream.write(msg)\n\n logger.debug(msg)",
"def merge_both_tables():\n old = Table.read('data/data_table_cartesian_including_tims_stars_with_bg_ols_and_component_overlaps.fits')\n wanted = Table.read('data/scocen_candidates_300k_only_spatial_cut.fits')\n additional = Table.read('data/scocen_candidates_300k_only_spatial_cut_200k_to_determine_bg_ols.fits')\n\n d_old = dict(zip(old['source_id'], old['background_log_overlap']))\n d_add = dict(zip(additional['source_id'], additional['background_log_overlap']))\n d_old.update(d_add)\n dct = d_old\n\n ln_bg_ols = [dct[source_id] for source_id in wanted['source_id']]\n print\n len(ln_bg_ols), len(wanted)\n\n wanted['background_log_overlap'] = ln_bg_ols\n print\n wanted\n\n wanted.write('data/scocen_candidates_300k_only_spatial_cut.fits', overwrite=True, format='fits')",
"def downsampling(x_train, y_train, random_state=42):\n sampling = pd.concat([x_train, y_train], axis=1)\n big = sampling[y_train == y_train.value_counts().index[0]]\n small = sampling[y_train == y_train.value_counts().index[1]]\n\n downsampled = resample(big,\n replace=False,\n n_samples=len(small),\n random_state=random_state)\n downsampled = pd.concat([downsampled, small])\n x_train_bal = downsampled[downsampled.columns.values[:-1]]\n y_train_bal = downsampled[downsampled.columns.values[-1]]\n\n del sampling, big, small, downsampled\n return x_train_bal, y_train_bal",
"def test_merge_simplex_duplex(self):\n df_merge = small_variants.create_duplex_simplex_maf(self.s_maf, self.d_maf)\n df_merge = df_merge.sort_index()\n expected = pd.read_csv('tests/test_data/expected.maf', sep='\\t')\n expected = expected.set_index(self.mutation_key, drop=False)\n expected = expected.sort_index()\n\n pd.testing.assert_frame_equal(df_merge, expected)\n\n # SNP\n snp_index = (1, 8080157, 8080157, 'T', 'A')\n assert df_merge.loc[snp_index]['t_ref_count_fragment'] == 1549\n assert df_merge.loc[snp_index]['t_alt_count_fragment'] == 1\n assert df_merge.loc[snp_index]['t_total_count_fragment'] == 1550\n # INS\n insertion_index = (18, 48584855, 48584855, 'A', 'TTT')\n assert df_merge.loc[insertion_index]['t_ref_count_fragment'] == 694\n assert df_merge.loc[insertion_index]['t_alt_count_fragment'] == 4\n assert df_merge.loc[insertion_index]['t_total_count_fragment'] == 698\n # DEL\n deletion_index = (18, 57571783, 57571783, 'T', '-')\n assert df_merge.loc[deletion_index]['t_ref_count_fragment'] == 514\n assert df_merge.loc[deletion_index]['t_alt_count_fragment'] == 6\n assert df_merge.loc[deletion_index]['t_total_count_fragment'] == 520",
"def mix(self, other: \"DiscreteFactorTable\"):\n if (len(self.support) == 0):\n return other\n if (len(other.support) == 0):\n return self\n\n # NOTE: can this be relaxed?\n assert type(self.support[0]) == type(other.support[0])\n\n jsupport = []\n jlogits = []\n matchedrows = []\n unmatchedrows = []\n\n #check that all entries have same keys\n if isinstance(self.support[0], (dict, frozendict)):\n s_keys = tuple(self.support[0].keys())\n for si in self.support:\n assert tuple(si.keys()) == s_keys\n if isinstance(other.support[0], (dict, frozendict)):\n o_keys = tuple(other.support[0].keys())\n for oi in self.support:\n assert tuple(oi.keys()) == o_keys\n\n #first get inner join rows, tracking ones that don't match\n for si, oi in product(self.support, other.support):\n if isinstance(si, (dict, frozendict)) and isinstance(oi, (dict, frozendict)):\n if dict_match(si, oi): #not efficient if the cartesian product is large\n matchedrows.extend([si, oi])\n soi = dict_merge(si, oi)\n if soi in jsupport:\n continue\n jprob = np.exp(self.logit(si)) + np.exp(other.logit(oi))\n jlogit = np.log(jprob)\n\n if jlogit == -np.inf:\n continue\n jsupport.append(soi)\n jlogits.append(jlogit)\n else:\n unmatchedrows.extend([si, oi])\n else:\n soi = (si, oi)\n jprob = np.exp(self.logit(si)) + np.exp(other.logit(oi))\n jlogit = np.log(jprob)\n jsupport.append(soi)\n jlogits.append(jlogit)\n\n #add in the left and right outer join rows, ensuring that they were never matched\n for i in unmatchedrows:\n if (i in matchedrows) or (i in jsupport):\n continue\n logit = np.log(np.exp(self.logit(i)) + np.exp(other.logit(i)))\n if logit == -np.inf:\n continue\n jsupport.append(i)\n jlogits.append(logit)\n return DiscreteFactorTable(support=jsupport, logits=jlogits)",
"def _fix_austrian_lpis(self, eopatch):\n eopatch.vector_timeless[self.feature] = pd.merge(eopatch.vector_timeless[self.feature],\n self.mapping,\n on='SNAR_BEZEI')",
"def synchronize_data(self, is_master):\n\n if is_master:\n self.data_mapping.append(self.invalid_map)\n\n #\n # Default offsets before synchronization begins\n #\n if self.first_sync:\n if is_master and (len(self.band_1.timestamps) > 0):\n self.first_offset = len(self.band_1.timestamps) - 1\n elif (not is_master) and (len(self.band_2.timestamps) > 0):\n self.second_offset = len(self.band_2.timestamps) - 1\n\n #\n # Data needs to be synchronized\n #\n if (self.first_offset is not None) and (self.second_offset is not None):\n\n # Find minimum (timestamp) distance from first device, relative to second device's first timestamp\n # Note: Timestamps from both devices are enforced to be strictly increasing (elsewhere)\n #\n if self.first_sync:\n self.first_sync = False\n first_timestamp = self.band_1.timestamps[self.first_offset]\n sec_timestamp = self.band_2.timestamps[self.second_offset]\n min_distance = abs(first_timestamp - sec_timestamp)\n\n while self.first_offset > 0:\n temp_idx = self.first_offset - 1\n temp_timestamp = self.band_1.timestamps[temp_idx]\n new_distance = abs(temp_timestamp - sec_timestamp)\n\n if new_distance > min_distance:\n break\n else:\n self.first_offset = temp_idx\n min_distance = new_distance\n\n self.data_mapping[self.first_offset] = self.second_offset\n\n else:\n\n #\n # Need to wait for new data to arrive\n #\n if ((self.first_offset + 1 >= len(self.band_1.timestamps) - 1) or\n (self.second_offset + 1 >= len(self.band_2.timestamps) - 1)):\n return\n\n self.first_offset += 1\n self.second_offset += 1\n first_timestamp = self.band_1.timestamps[self.first_offset]\n sec_timestamp = self.band_2.timestamps[self.second_offset]\n min_distance = first_timestamp - sec_timestamp\n in_sync = False\n\n if abs(min_distance) < COPY_THRESHOLD:\n in_sync = True\n else:\n\n if min_distance > 0:\n while self.second_offset < len(self.band_2.timestamps) - 1:\n temp_timestamp = self.band_2.timestamps[self.second_offset]\n temp_distance = first_timestamp - temp_timestamp\n\n if abs(temp_distance) > abs(min_distance):\n break\n else:\n self.second_offset += 1\n min_distance = temp_distance\n\n if abs(min_distance) < COPY_THRESHOLD:\n in_sync = True\n\n # min_distance <= (-1) * COPY_THRESHOLD\n else:\n while self.first_offset < len(self.band_1.timestamps) - 1:\n temp_timestamp = self.band_1.timestamps[self.first_offset]\n temp_distance = temp_timestamp - sec_timestamp\n\n if abs(temp_distance) > abs(min_distance):\n break\n else:\n self.first_offset += 1\n min_distance = temp_distance\n\n if abs(min_distance) < COPY_THRESHOLD:\n in_sync = True\n\n # Data from two devices is (now) in sync\n if in_sync:\n self.data_mapping[self.first_offset] = self.second_offset",
"def cross_join(left, right, suffixes=(\"_left\", \"_right\")):\n left[\"_TMP_KEY\"] = 1\n right[\"_TMP_KEY\"] = 1\n res = pd.merge(left, right, on=\"_TMP_KEY\", suffixes=suffixes).drop(\"_TMP_KEY\", axis=1)\n left.drop(\"_TMP_KEY\", axis=1, inplace=True)\n right.drop(\"_TMP_KEY\", axis=1, inplace=True)\n return res",
"def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n assert all(\n set.issubset(set(a1), set(a2))\n for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids)\n )\n\n annot_uuids = ut.flatten(isect_gids1.annot_uuids)\n # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)\n aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)\n import numpy as np\n\n to_aids2 = dict(zip(aids1, aids2))\n # to_aids1 = dict(zip(aids2, aids1))\n\n # Step 1) Update individual annot properties\n # These annots need updates\n # np.where(aids1.visual_uuids != aids2.visual_uuids)\n # np.where(aids1.semantic_uuids != aids2.semantic_uuids)\n\n annot_unary_props = [\n # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']\n 'yaws',\n 'bboxes',\n 'thetas',\n 'qual',\n 'species',\n 'case_tags',\n 'multiple',\n 'age_months_est_max',\n 'age_months_est_min', # 'sex_texts'\n ]\n to_change = {}\n for key in annot_unary_props:\n prop1 = getattr(aids1, key)\n prop2 = getattr(aids2, key)\n diff_idxs = set(np.where(prop1 != prop2)[0])\n if diff_idxs:\n diff_prop1 = ut.take(prop1, diff_idxs)\n diff_prop2 = ut.take(prop2, diff_idxs)\n logger.info('key = {!r}'.format(key))\n logger.info('diff_prop1 = {!r}'.format(diff_prop1))\n logger.info('diff_prop2 = {!r}'.format(diff_prop2))\n to_change[key] = diff_idxs\n if to_change:\n changed_idxs = ut.unique(ut.flatten(to_change.values()))\n logger.info('Found %d annots that need updated properties' % len(changed_idxs))\n logger.info('changing unary attributes: {!r}'.format(to_change))\n if False and ut.are_you_sure('apply change'):\n for key, idxs in to_change.items():\n subaids1 = aids1.take(idxs)\n subaids2 = aids2.take(idxs)\n prop1 = getattr(subaids1, key)\n # prop2 = getattr(subaids2, key)\n setattr(subaids2, key, prop1)\n else:\n logger.info('Annot properties are in sync. Nothing to change')\n\n # Step 2) Update annotmatch - pairwise relationships\n infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False)\n\n # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)\n aids2 = ibs2.get_valid_aids(is_known=True)\n infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)\n infr2.reset_feedback('annotmatch', apply=True)\n\n # map feedback from ibs1 onto ibs2 using ibs2 aids.\n fb1 = infr1.read_wbia_annotmatch_feedback()\n fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}\n fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)\n\n # Add transformed feedback into ibs2\n infr2.add_feedback_from(fb1_df_t)\n\n # Now ensure that dummy connectivity exists to preserve origninal names\n # from wbia.algo.graph import nx_utils\n # for (u, v) in infr2.find_mst_edges('name_label'):\n # infr2.draw_aids((u, v))\n # cc1 = infr2.pos_graph.connected_to(u)\n # cc2 = infr2.pos_graph.connected_to(v)\n # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))\n # infr2.neg_redundancy(cc1, cc2)\n # infr2.pos_redundancy(cc2)\n\n infr2.relabel_using_reviews(rectify=True)\n infr2.apply_nondynamic_update()\n\n if False:\n infr2.wbia_delta_info()\n infr2.wbia_name_group_delta_info()\n\n if len(list(infr2.inconsistent_components())) > 0:\n raise NotImplementedError('need to fix inconsistencies first')\n # Make it so it just loops until inconsistencies are resolved\n infr2.prioritize()\n infr2.qt_review_loop()\n else:\n infr2.write_wbia_staging_feedback()\n infr2.write_wbia_annotmatch_feedback()\n infr2.write_wbia_name_assignment()\n\n # if False:\n # # Fix any inconsistency\n # infr2.start_qt_interface(loop=False)\n # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,\n # 5338, 2654]\n # import networkx as nx\n # nx.is_connected(infr2.graph.subgraph(test_nodes))\n # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)\n\n # # randomly sample some new labels to verify\n # import wbia.guitool as gt\n # from wbia.gui import inspect_gui\n # gt.ensure_qapp()\n # ut.qtensure()\n # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])\n # del old_groups['____']\n\n # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])\n\n # from wbia.algo.hots import simulate\n # c = simulate.compare_groups(\n # list(new_groups.values()),\n # list(old_groups.values()),\n # )\n # ut.map_vals(len, c)\n # for aids in c['pred_splits']:\n # old_nids = ibs2.get_annot_nids(aids)\n # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)\n # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)\n # aid1, aid2 = split_aids[0:2]\n\n # if False:\n # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)\n # infr2.start_qt_interface(loop=False)\n\n # if False:\n # # import wbia\n # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)\n # infr1.initialize_graph()\n # # infr1.reset_feedback('staging')\n # infr1.reset_feedback('annotmatch')\n # infr1.apply_feedback_edges()\n # infr1.relabel_using_reviews()\n # infr1.apply_review_inference()\n # infr1.start_qt_interface(loop=False)\n # delta = infr2.match_state_delta()\n # logger.info('delta = %r' % (delta,))\n\n # infr2.ensure_mst()\n # infr2.relabel_using_reviews()\n # infr2.apply_review_inference()\n\n # mst_edges = infr2.find_mst_edges()\n # set(infr2.graph.edges()).intersection(mst_edges)\n\n return\n \"\"\"\n TODO:\n Task 2:\n Build AnnotInfr for ibs2 then add all decision from\n ibs1 to the internal feedback dict.\n\n Ensure that all other (esp old name-id related) edges are correctly\n placed, then overrite with new vals (\n make sure implicit vals do not cuase conflicts with new\n explicit vals, but old explicit vals should cause a conflict).\n Then just commit to staging and then commit to annotmatch and\n re-infer the names.\n \"\"\"\n\n # Print some info about the delta\n # def _to_tup(x):\n # return tuple(x) if isinstance(x, list) else x\n # changetype_list = list(zip(\n # delta['old_decision'], delta['new_decision'],\n # map(_to_tup, delta['old_tags']),\n # map(_to_tup, delta['new_tags'])))\n # changetype_hist = ut.dict_hist(changetype_list, ordered=True)\n # logger.info(ut.align(ut.repr4(changetype_hist), ':'))\n\n # import pandas as pd\n # pd.options.display.max_rows = 20\n # pd.options.display.max_columns = 40\n # pd.options.display.width = 160\n # pd.options.display.float_format = lambda x: '%.4f' % (x,)\n\n # a, b = 86, 6265\n # c, d = to_aids1[a], to_aids1[b]\n # inspect_gui.show_vsone_tuner(ibs2, a, b)\n # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b])\n # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]],\n # [to_aids1[b]])\n # am2 = ibs2.get_annotmatch_rowids_between([a], [b])\n # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1))\n # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2))\n\n # inspect_gui.show_vsone_tuner(ibs2, 8, 242)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 103)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)",
"def merge_overwrap(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n for j in range(Ly):\n cff = z_u_w[j,N] - z_u_w[j,0]\n if self.hbls[j] + self.hbbl[j] > cff:\n self.hbls[j] = cff\n self.hbbl[j] = cff",
"def _compare_pre_post_sampling(X_train, y_train, X_new, y_new):\n train_data_info = _basic_data_info(X_train, y_train)\n new_data_info = _basic_data_info(X_new, y_new)\n\n print(\"\\nNum samples increased from {} to {} samples\\n\".format(train_data_info[\"Num_samples\"], new_data_info[\"Num_samples\"]))\n\n # Create pandas Dataframe\n df = pd.DataFrame(np.nan, index = train_data_info['classes'], columns = ['og_dist', 'og_prop', 'new_dist', 'new_prop'])\n df.iloc[:, 0] = train_data_info[\"counts\"]\n df.iloc[:, 1] = train_data_info[\"percs\"]\n df.iloc[:, 2] = new_data_info[\"counts\"]\n df.iloc[:, 3] = new_data_info[\"percs\"]\n\n df.index.name = \"classes\"\n\n # Difference in distributions\n print(\"Count comparison is as follows: \\n\", df)",
"def merge(self, other):\r\n self._train_datas = np.concatenate(\r\n [self._train_datas, other._train_datas], 0)\r\n self._train_labels = np.concatenate(\r\n [self._train_labels, other._train_labels], 0)",
"def combine_frames (data0, data1, datatype) :\n data0.n_processed += 1\n if (data1 is None) :\n return data0\n if (isinstance(data1, null_data)) :\n if (data1.file_error) :\n data0.n_file_error += 1\n elif (data1.low_signal) :\n data0.n_low_signal += 1\n elif (data1.wrong_bravais) :\n data0.n_wrong_bravais += 1\n elif (data1.wrong_cell) :\n data0.n_wrong_cell += 1\n elif (getattr(data1,\"reason\",None) is not None):\n if str(data1.reason)!=\"\":\n data0.failure_modes[str(data1.reason)] = data0.failure_modes.get(str(data1.reason),0) + 1\n elif repr(type(data1.reason))!=\"\":\n data0.failure_modes[repr(type(data1.reason))] = data0.failure_modes.get(repr(type(data1.reason)),0) + 1\n else:\n data0.failure_modes[\"other reasons\"] = data0.failure_modes.get(\"other reasons\",0) + 1\n return data0\n if (data1.accept) :\n data0.n_accepted += 1\n data0.completeness += data1.completeness\n data0.completeness_predictions += data1.completeness_predictions\n data0.summed_N += data1.summed_N\n data0.summed_weight += data1.summed_weight\n data0.summed_wt_I += data1.summed_wt_I\n data0.ISIGI.extend(data1.ISIGI)\n else :\n data0.n_low_corr += 1\n data0.uc_values.add_cell(data1.indexed_cell,\n rejected=(not data1.accept))\n if not data0.params.short_circuit:\n data0.observations.append(data1.n_obs)\n if (data1.n_obs > 0) :\n frac_rejected = data1.n_rejected / data1.n_obs\n data0.rejected_fractions.append(frac_rejected)\n data0.d_min_values.append(data1.d_min)\n data0.corr_values.append(data1.corr)\n data0.wavelength.append(data1.wavelength)\n data0.finished_db_mgr.sequencer += data1.finished_db_mgr.sequencer\n return data0",
"def test_process_barcode_paired_end_data_orientation_no_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'AYA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With no matches, should write to the not_oriented files, and keep\r\n # in the same order of file 1 and file 2\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def synchronise_signals(in_signal_1, in_signal_2, time_interval = -1, fs = 100):\n\n # signal segmentation\n in_signal_1 = in_signal_1[:time_interval*fs]\n in_signal_2 = in_signal_2[:time_interval*fs]\n\n #in_signal_2 = in_signal_2 - gravitational_filter(in_signal_2, fs)\n in_signal_1 = in_signal_1 * (-1)\n\n #in_signal_1[time_array[0] * fs:time_array[1] * fs] = in_signal_1[time_array[0] * fs:time_array[1] * fs] + 200\n #in_signal_2[time_array[4] * fs:time_array[5] * fs] = in_signal_2[time_array[4] * fs:time_array[5] * fs] + 200\n #in_signal_1[time_array[2] * fs:time_array[3] * fs] = in_signal_1[time_array[2] * fs:time_array[3] * fs] + 200\n #in_signal_2[time_array[6] * fs:time_array[7] * fs] = in_signal_2[time_array[6] * fs:time_array[7] * fs] + 200\n\n\n # signal normalisation\n mean_1, std_1, mean_2, std_2 = [np.mean(in_signal_1), np.std(in_signal_1), np.mean(in_signal_2),\n np.std(in_signal_2)]\n signal_1 = in_signal_1 - mean_1\n signal_1 /= std_1\n signal_2 = in_signal_2 - mean_2\n signal_2 /= std_2\n\n\n # zero padding signals so that they are of same length, this facilitates the calculation because\n # then the delay between both signals can be directly calculated\n # zero padding only if needed\n #if (len(signal_1) != len(signal_2)):\n\n # check which signal has to be zero padded\n # if (len(signal_1) < len(signal_2)):\n\n # pad first signal\n # signal_1 = np.append(signal_1, np.zeros(len(signal_2) - len(signal_1)))\n\n # else:\n\n # pad second signal\n # signal_2 = np.append(signal_2, np.zeros(len(signal_1) - len(signal_2)))\n\n\n N = len(signal_1) + len(signal_2) - 1\n # Calculate the cross-correlation between the two signals.\n #correlation = np.correlate(signal_1, signal_2, 'full')\n f1 = fft(signal_1, N)\n f2 = np.conj(fft(signal_2, N))\n correlation = np.real(ifft(f1 * f2))\n #correlation = fftshift(cc)\n\n\n # calculate tau / shift between both signals\n #tau = int(np.argmax(correlation) - (len(correlation)) / 2)\n tau = np.argmax(correlation)\n print(tau)\n if tau > len(correlation) // 2:\n tau = np.argmax(correlation) - len(correlation)\n print(tau)\n\n # crop signals to original length (removing zero padding)\n #signal_1 = signal_1[:len(in_signal_1)]\n #signal_2 = signal_2[:len(in_signal_2)]\n\n\n # check which signal has to be sliced\n if (tau < 0):\n # tau negative --> second signal lags\n signal_2 = signal_2[np.abs(tau):]\n\n elif (tau > 0):\n # tau positive ---> firs signal lags\n signal_1 = signal_1[np.abs(tau):]\n\n\n # revert signals to orignal scale\n result_signal_1 = signal_1 * std_1 + mean_1\n result_signal_2 = signal_2 * std_2 + mean_2\n\n return tau, result_signal_1, result_signal_2",
"def join_columns(self, other: \"MultiRegionTimeseriesDataset\") -> \"MultiRegionTimeseriesDataset\":\n if not other.latest_data.empty:\n raise NotImplementedError(\"No support for joining other with latest_data\")\n other_df = other.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n self_df = self.data_with_fips.set_index([CommonFields.LOCATION_ID, CommonFields.DATE])\n other_geo_columns = set(other_df.columns) & set(GEO_DATA_COLUMNS)\n other_ts_columns = (\n set(other_df.columns) - set(GEO_DATA_COLUMNS) - set(TimeseriesDataset.INDEX_FIELDS)\n )\n common_ts_columns = other_ts_columns & set(self.data_with_fips.columns)\n if common_ts_columns:\n # columns to be joined need to be disjoint\n raise ValueError(f\"Columns are in both dataset: {common_ts_columns}\")\n common_geo_columns = list(set(self.data_with_fips.columns) & other_geo_columns)\n # TODO(tom): fix geo columns check, no later than when self.data is changed to contain only\n # timeseries\n # self_common_geo_columns = self_df.loc[:, common_geo_columns].fillna(\"\")\n # other_common_geo_columns = other_df.loc[:, common_geo_columns].fillna(\"\")\n # try:\n # if (self_common_geo_columns != other_common_geo_columns).any(axis=None):\n # unequal_rows = (self_common_geo_columns != other_common_geo_columns).any(axis=1)\n # _log.info(\n # \"Geo data unexpectedly varies\",\n # self_rows=self_df.loc[unequal_rows, common_geo_columns],\n # other_rows=other_df.loc[unequal_rows, common_geo_columns],\n # )\n # raise ValueError(\"Geo data unexpectedly varies\")\n # except Exception:\n # _log.exception(f\"Comparing df {self_common_geo_columns} to {other_common_geo_columns}\")\n # raise\n combined_df = pd.concat([self_df, other_df[list(other_ts_columns)]], axis=1)\n return MultiRegionTimeseriesDataset.from_timeseries_df(\n combined_df.reset_index()\n ).append_latest_df(self.latest_data_with_fips.reset_index())",
"def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')",
"def _associate_unconnected_records(eia_generators_merged):\n # we're associating on the plant/pm level... but we only want to associated\n # these unassocaited records w/ the primary fuel type from stack_generators\n # so we're going to merge on energy_source_code_num and\n idx_pm = ['plant_id_eia', 'prime_mover_code',\n 'energy_source_code_num', 'report_date', ]\n # we're going to only associate these unconnected fuel records w/\n # the primary fuel so we don't have to deal w/ double counting\n connected_mask = eia_generators_merged.generator_id.notnull()\n eia_generators_connected = (\n eia_generators_merged[connected_mask]\n )\n eia_generators_unconnected = (\n eia_generators_merged[~connected_mask]\n .rename(columns={'fuel_type': 'fuel_type_unconnected'})\n .assign(energy_source_code_num='energy_source_code_1')\n .groupby(by=idx_pm).sum(min_count=1)\n .reset_index()\n )\n eia_generators = (\n pd.merge(\n eia_generators_connected,\n eia_generators_unconnected[\n idx_pm + ['net_generation_mwh_gf_tbl', 'fuel_consumed_mmbtu']],\n on=idx_pm,\n suffixes=('', '_unconnected'),\n how='left'\n )\n .assign(\n # we want the main and the unconnected net gen to be added together\n # but sometimes there is no main net gen and sometimes there is no\n # unconnected net gen\n net_generation_mwh_gf_tbl=lambda x: np.where(\n x.net_generation_mwh_gf_tbl.notnull()\n | x.net_generation_mwh_gf_tbl_unconnected.notnull(),\n x.net_generation_mwh_gf_tbl.fillna(0)\n + x.net_generation_mwh_gf_tbl_unconnected.fillna(0),\n np.nan\n ),\n fuel_consumed_mmbtu=lambda x: np.where(\n x.fuel_consumed_mmbtu.notnull()\n | x.fuel_consumed_mmbtu_unconnected.notnull(),\n x.fuel_consumed_mmbtu.fillna(0)\n + x.fuel_consumed_mmbtu_unconnected.fillna(0),\n np.nan\n ),\n ) # we no longer need these _unconnected columns\n .drop(columns=['net_generation_mwh_gf_tbl_unconnected',\n 'fuel_consumed_mmbtu_unconnected'])\n )\n return eia_generators",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n return res",
"def setup_merged_df(obs_df):\n obs_df = obs_df.assign(height=obs_df[\"measurement\"], weight=obs_df[\"measurement\"])\n obs_df.loc[obs_df.param == \"WEIGHTKG\", \"height\"] = np.NaN\n obs_df.loc[obs_df.param == \"HEIGHTCM\", \"weight\"] = np.NaN\n heights = obs_df[obs_df.param == \"HEIGHTCM\"]\n weights = obs_df[obs_df.param == \"WEIGHTKG\"]\n merged = heights.merge(\n weights, on=[\"subjid\", \"agedays\", \"ageyears\", \"sex\"], how=\"outer\"\n )\n only_needed_columns = merged.drop(\n columns=[\n \"param_x\",\n \"measurement_x\",\n \"clean_value_x\",\n \"weight_x\",\n \"id_y\",\n \"param_y\",\n \"measurement_y\",\n \"clean_value_y\",\n \"height_y\",\n ]\n )\n clean_column_names = only_needed_columns.rename(\n columns={\n \"clean_cat_x\": \"height_cat\",\n \"include_x\": \"include_height\",\n \"height_x\": \"height\",\n \"clean_cat_y\": \"weight_cat\",\n \"include_y\": \"include_weight\",\n \"weight_y\": \"weight\",\n \"reason_y\": \"reason\",\n \"id_x\": \"id\",\n }\n )\n clean_column_names[\"bmi\"] = clean_column_names[\"weight\"] / (\n (clean_column_names[\"height\"] / 100) ** 2\n )\n clean_column_names[\"rounded_age\"] = np.around(clean_column_names.ageyears)\n clean_column_names[\"include_both\"] = (\n clean_column_names[\"include_height\"] & clean_column_names[\"include_weight\"]\n )\n return clean_column_names",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n\r\n return res",
"def combine_stack_and_label(filesource_dataset_1,filesource_dataset_2,num_sample):\n\n x = filesource_dataset_1[0]\n x_utterances = len(filesource_dataset_1)\n for idx in tqdm(range(1, x_utterances)):\n x = np.hstack((x, filesource_dataset_1[idx]))\n #print(x.shape)\n y = filesource_dataset_2[0]\n y_utterances = len(filesource_dataset_2)\n for idx in tqdm(range(1, y_utterances)):\n y = np.hstack((y, filesource_dataset_2[idx]))\n X = np.hstack((x,y))\n Y = np.hstack((np.ones((x.shape[1])),np.zeros((y.shape[1]))))\n\n if (X.shape[1] > num_sample):\n idx = np.random.choice(X.shape[1], num_sample)\n X = X[:, idx]\n Y = Y[idx]\n return X, Y",
"def downsample_sam(self, factor):",
"def stitch_unw2frames(unw_data1: NDArray, conn_data1: NDArray, rdict1: dict,\n unw_data2: NDArray, conn_data2: NDArray, rdict2: dict,\n correction_method: Optional[str] = 'cycle2pi',\n range_correction: Optional[bool] = False,\n verbose: Optional[bool] = False) -> \\\n Tuple[NDArray, NDArray, dict]:\n\n # Adjust connected Component in Frame 2 to start\n # with last component number in Frame-1\n\n conn_data2 = conn_data2 + np.nanmax(conn_data1)\n conn_data2[conn_data2 == np.nanmax(conn_data1)] = 0.0\n\n # GET FRAME OVERLAP\n box_1, box_2 = frame_overlap(\n rdict1['SNWE'], rdict2['SNWE'],\n [rdict1['LAT_SPACING'], rdict1['LON_SPACING']],\n [rdict2['LAT_SPACING'], rdict2['LON_SPACING']])\n\n # LOOP OVER COMPONENTS WITHIN THE OVERLAP\n # Get connected component pairs\n if verbose:\n print('\\nGetting overlapping components')\n\n # Forward correction\n conn_pairs = get_overlapping_conn(conn_data1[box_1],\n conn_data2[box_2])\n\n for pair in conn_pairs:\n diff, cycles2pi, range_corr = _integer_2pi_cycles(\n unw_data1[box_1],\n conn_data1[box_1],\n np.float32(pair[1]),\n unw_data2[box_2],\n conn_data2[box_2],\n np.float32(pair[0]),\n range_correction=range_correction,\n print_msg=verbose)\n\n # Correction methods: mean difference, 2pi integer cycles\n if correction_method == 'cycle2pi':\n correction = cycles2pi\n elif correction_method == 'meanoff':\n correction = diff\n range_correction = False\n else:\n raise ValueError(f'Wrong correction method {correction_method}, ',\n 'Select one of available: \"cycle2pi\", \"meanoff\"')\n\n # add range correction\n if range_correction:\n correction += range_corr\n ik = conn_data2 == np.float32(pair[0])\n unw_data2[ik] += correction\n conn_data2[ik] = np.float32(pair[1])\n\n # Backward correction\n conn_reverse = get_overlapping_conn(conn_data2[box_2],\n conn_data1[box_1])\n\n # Keep only different componentes in pairing\n ik = np.where(conn_reverse[:, 0] != conn_reverse[:, 1])\n conn_reverse = conn_reverse[ik]\n\n for pair in conn_reverse:\n print('Going backward!') if verbose else None\n diff, cycles2pi, range_corr = _integer_2pi_cycles(\n unw1=unw_data1[box_1],\n concom1=conn_data1[box_1],\n ix1=np.float32(pair[0]),\n unw2=unw_data2[box_2],\n concom2=conn_data2[box_2],\n ix2=np.float32(pair[1]),\n range_correction=range_correction,\n print_msg=verbose)\n\n # Correction methods: mean difference, 2pi integer cycles\n if correction_method == 'cycle2pi':\n correction = cycles2pi\n elif correction_method == 'meanoff':\n correction = diff\n range_correction = False\n else:\n raise ValueError(f'Wrong correction method {correction_method}, ',\n 'Select one of available: \"cycle2pi\", \"meanoff\"')\n\n # add range correction\n if range_correction:\n correction += range_corr\n\n ik = conn_data1 == np.float32(pair[0])\n unw_data1[ik] -= correction\n conn_data1[ik] = np.float32(pair[1])\n\n # Update connected component frame 2 naming\n idx1 = np.max(conn_data1)\n idx = np.unique(conn_data2[conn_data2 > idx1]).compressed()\n conn_data2 = update_connect_components(conn_data2, idx, idx1+1)\n\n # Combine corrected unwrappedPhase and connectedComponents arrays\n comb_snwe = [rdict1['SNWE'], rdict2['SNWE']]\n comb_latlon = [[rdict1['LAT_SPACING'], rdict1['LON_SPACING']],\n [rdict2['LAT_SPACING'], rdict2['LON_SPACING']]]\n\n (combined_unwrap,\n combined_snwe,\n combined_latlon_spacing) = combine_data_to_single(\n [_nan_filled_array(unw_data1),\n _nan_filled_array(unw_data2)],\n comb_snwe, comb_latlon,\n method='mean')\n combined_conn, _, _ = combine_data_to_single(\n [_nan_filled_array(conn_data1),\n _nan_filled_array(conn_data2)],\n comb_snwe, comb_latlon,\n method='min')\n # combined dict\n combined_dict = dict(SNWE=combined_snwe,\n LAT_SPACING=combined_latlon_spacing[0],\n LON_SPACING=combined_latlon_spacing[1])\n\n combined_unwrap = np.ma.masked_invalid(combined_unwrap)\n np.ma.set_fill_value(combined_unwrap, 0.)\n combined_conn = np.ma.masked_invalid(combined_conn)\n np.ma.set_fill_value(combined_conn, -1.)\n\n return combined_unwrap, combined_conn, combined_dict"
] | [
"0.6805154",
"0.58178794",
"0.56193626",
"0.54164696",
"0.52387846",
"0.52243114",
"0.5218583",
"0.52179146",
"0.5215392",
"0.52110416",
"0.52046835",
"0.51700354",
"0.5165345",
"0.51637405",
"0.51528937",
"0.51515895",
"0.51488835",
"0.5132604",
"0.51253265",
"0.5119885",
"0.5115748",
"0.5095478",
"0.5081523",
"0.5051182",
"0.50435674",
"0.5043098",
"0.5023684",
"0.5012056",
"0.5002782",
"0.49976453"
] | 0.60301346 | 1 |
Given a threshold, return True/False series indicating whether s prices are inside/outside (threshold, threshold) range. | def inout_range(
s: pd.Series, threshold: float = 0, inout: Literal["inside", "outside"] = "inside"
) -> pd.Series:
if threshold == 0:
raise ValueError("theshold cannot be zero, use: <zero_crosser>")
threshold = abs(threshold)
excess = s.abs() - threshold
if inout == "outside":
result = excess > 0
elif inout == "inside":
result = excess < 0
else:
raise ValueError("'inout' parameter must be either 'inside' or 'outside'")
result.name = inout
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isPrice(price, high, low):\n return (price >= low) and (price <= high)",
"def is_in_interval(self, low, high, value):\n return low <= value and value <= high",
"def chart_price(price_series, signal_series, threshold=0):\n chart_data = pd.DataFrame()\n chart_data[\"out\"] = price_series\n chart_data[\"long\"] = (signal_series > threshold) * price_series\n chart_data[\"short\"] = (signal_series < -threshold) * price_series\n chart_data.replace(0, np.nan, inplace=True)\n return chart_data.plot(figsize=(20, 10), grid=True)",
"def in_interval(value: float, s: float, e: float) -> bool:\n lower = value >= s\n upper = value <= e\n return lower and upper",
"def is_in_range(self, price):\r\n return price <= self.pmax and price >= self.pmin",
"def on(series, on_power_threshold=DEFAULT_ON_POWER_THRESHOLD):\n # TODO: replace this evil hack to handle dataframes(!)\n if isinstance(series, pd.DataFrame):\n series = series.icol(0)\n\n when_on = series >= on_power_threshold\n return when_on",
"def _in_interval(value, low, up):\n if low <= value <= up:\n return True\n else:\n return False",
"def _is_out_of_range(self, signal, y_range, threshold):\n out_of_range = [s for s in signal if s < y_range.min or s > y_range.max]\n out_of_range_percentage = len(out_of_range) / len(signal)\n\n return out_of_range_percentage > threshold",
"def thresholdInterval(self, threshold, interval):\n assert type(threshold) == float\n\n try:\n return float(interval.text.strip('\"')) > threshold\n except:\n sys.exit(\"thresholdInterval(): Unable to compare \" +\n interval.text + \" and \" + str(threshold))",
"def SpikesBetween(self, t_start, t_end):\n sp_bool = np.logical_and(np.array(self.spikes) >= t_start, np.array(self.spikes) < t_end)\n return np.sum(sp_bool)",
"def a_include_b(a: pd.Series, b: pd.Series) -> bool:\n return (a['high'] >= b['high']) and (a['low'] <= b['low'])",
"def crosser(ind: pd.Series, threshold: float) -> pd.Series:\n df = pd.DataFrame({\"ind\": ind})\n df[\"above_below\"] = (df[\"ind\"] >= threshold) * 1 - (df[\"ind\"] < threshold) * 1\n df[\"blip\"] = ((df[\"above_below\"].shift() + df[\"above_below\"]) == 0) * df[\n \"above_below\"\n ]\n df = df.dropna()\n return df[\"blip\"]",
"def _threshold_mask(data, mask, rms=None, threshold=0.0):\n if rms is None or threshold <= 0.0:\n return mask.astype('bool')\n rms = np.atleast_1d(rms)\n if rms.ndim == 2:\n sigma_mask = abs(data) >= (threshold * rms)[None, :, :]\n else:\n sigma_mask = abs(data) >= threshold * rms\n return np.logical_and(mask, sigma_mask).astype('bool')",
"def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)",
"def overlay_thresholding_function(threshold, positive=True):\n # from the interface class definition above, there will be 3 values\n # for the thresh type: inactive, less than, greater than\n t = threshold[0]\n if threshold[-1] == 'inactive':\n if positive:\n return lambda x: np.ones(x.shape, 'B')\n return lambda x: np.zeros(x.shape, 'B')\n elif threshold[-1] == 'less than':\n if positive:\n return lambda x: np.less(x,t)\n return lambda x: np.greater_equal(x,t)\n elif threshold[-1] == 'greater than':\n if positive:\n return lambda x: np.greater(x,t)\n return lambda x: np.less_equal(x,t)\n else:\n print 'unrecognized thresholding parameters:', threshold",
"def get_regions_above_threshold(self, threshold, values):\n\n xlocs = arange(0, len(values))\n\n # finds all turns, between above and below threshold\n # and generate areas to call peaks in, also\n # makes sure starting and stopping above maxima is caught\n # threshold is at or equal to values, need to correct this\n starts = xlocs[r_[True, diff(values >= threshold)] & (values >= threshold)]\n stops = xlocs[r_[diff(values >= threshold), True] & (values >= threshold)]\n # add to fix off by one bug\n stops += + 1\n\n # error correction incase my logic is wrong here, assuming that starts\n # and stops are always paired, and the only two cases of not being\n # pared are if the spline starts above the cutoff or the spline starts\n # below the cutoff\n assert len(starts) == len(stops)\n\n ### important note: for getting values x->y [inclusive]\n # you must index an array as ar[x:(y+1)]|\n # or else you end up with one-too-few values, the second\n # index is non-inclusive\n\n # gets all local minima, function taken from:\n # http://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array\n # Can't have local minima at start or end, that would get caught by\n # previous check, really need to think about that more\n\n local_minima = self.find_local_minima(values)\n\n # append to list any local minima above threshold\n for i, minima in enumerate(local_minima):\n if minima and values[i] >= threshold:\n starts = append(starts, i)\n stops = append(stops, i)\n\n starts = array(sorted(set(starts)))\n stops = array(sorted(set(stops)))\n starts_and_stops = []\n\n # making sure we aren't in some strange state\n assert len(starts) == len(stops)\n\n # get all contigous start and stops pairs\n while len(starts) > 0:\n stop_list = stops[stops > starts[0]]\n\n # if there are no more stops left exit the loop and return the\n # currently found starts and stops\n if len(stop_list) == 0:\n break\n stop = stop_list[0]\n starts_and_stops.append((starts[0], stop))\n starts = starts[starts >= stop]\n\n starts = array([x[0] for x in starts_and_stops])\n stops = array([x[1] for x in starts_and_stops])\n return starts_and_stops, starts, stops",
"def in_period(self, value, lower_bound, upper_bound, func=lambda _x: _x):\n if ((lower_bound==None) and (upper_bound==None)):\n return True\n if (lower_bound==None):\n return func(value) <= upper_bound\n if (upper_bound==None):\n return func(value) >= lower_bound\n return ((func(value) >= lower_bound) and (func(value) <= upper_bound))",
"def _single_value_min(data, threshold):\r\n amin = np.min(data)\r\n amax = np.max(data)\r\n limit = amin + (amax - amin) * threshold\r\n return data < limit",
"def is_close(x, y, thresh=1e-8):\n\n diff = x - y\n return diff > (-thresh) and diff < thresh",
"def __isSupport(self, df, i):\n\n c1 = df['low'][i] < df['low'][i - 1]\n c2 = df['low'][i] < df['low'][i + 1]\n c3 = df['low'][i + 1] < df['low'][i + 2]\n c4 = df['low'][i - 1] < df['low'][i - 2]\n support = c1 and c2 and c3 and c4\n return support",
"def within_value(v1, v2):\n percentage = 0.1\n error_allowed = percentage * v1\n high = v1 + error_allowed\n low = v1 - error_allowed\n\n return low <= v2 <= high",
"def in_range(cls, lhs, rhs):\n return rhs[0] <= lhs <= rhs[1]",
"def g_in_bounds(x, lo, hi):\n\n return (x >= lo) and (x <= hi)",
"def within(vals, extr, edges=True, all=False, inv=False):\n\n extr_bnds = minmax(extr)\n\n # Include edges for WITHIN bounds (thus not including is outside)\n if(edges): retval = np.asarray(((vals >= extr_bnds[0]) & (vals <= extr_bnds[1])))\n # Don't include edges for WITHIN (thus include them for outside)\n else: retval = np.asarray(((vals > extr_bnds[0]) & (vals < extr_bnds[1])))\n\n # Convert to single return value\n if(all): retval = np.all(retval)\n\n # Invert results\n if(inv): retval = np.invert(retval)\n\n return retval",
"def discrete_potential(function, threshold):\n\n return np.where(function >= threshold, 1, 0)",
"def is_in_interval(self, x):\r\n \r\n bool_intval = False\r\n\r\n if np.logical_and(x > self.intval[0], x < self.intval[1]):\r\n bool_intval = True\r\n \r\n return bool_intval",
"def round_using_t(prediction, threshold):\n return (prediction >= threshold).astype('int')",
"def check_threshold(data, threshold, above=True, flexibility=.02, cushion=3):\n if above:\n across = (data > threshold) * 1\n across_secondary = (data > (threshold * (1-flexibility))) * 1\n else:\n across = (data < threshold) * 1\n across_secondary = (data < (threshold * (1+flexibility))) * 1\n\n index_backdown = [i + 1 for i, x in enumerate(np.diff(across_secondary)) if x == -1]\n step_down = np.diff(np.concatenate(([0.], np.cumsum(across)[index_backdown])))\n across[index_backdown] = -step_down\n test_across = np.cumsum(across)\n times_across = sum(test_across == cushion)\n\n return across, test_across, times_across",
"def in_range_bev(\n self, point_range: Union[Tensor, np.ndarray,\n Sequence[float]]) -> Tensor:\n in_range_flags = ((self.bev[:, 0] > point_range[0])\n & (self.bev[:, 1] > point_range[1])\n & (self.bev[:, 0] < point_range[2])\n & (self.bev[:, 1] < point_range[3]))\n return in_range_flags",
"def _single_value_max(data, threshold):\r\n amin = np.min(data)\r\n amax = np.max(data)\r\n limit = amax - (amax - amin) * threshold\r\n return data > limit"
] | [
"0.6682226",
"0.6377357",
"0.618865",
"0.6125125",
"0.5998406",
"0.5900148",
"0.58637464",
"0.5804012",
"0.5767467",
"0.574018",
"0.5720942",
"0.56339824",
"0.5613235",
"0.5606552",
"0.5596668",
"0.5575905",
"0.5566276",
"0.5561228",
"0.55224955",
"0.5517698",
"0.55140156",
"0.55099803",
"0.5506471",
"0.5500499",
"0.54959357",
"0.54800284",
"0.5453001",
"0.541418",
"0.5382468",
"0.5355741"
] | 0.66740066 | 1 |
entry is the output of _range_entry entry will be signed same as price when entering range. | def _signed_range_entry(entry: pd.Series, sign: pd.Series) -> pd.Series:
return (_range_entry(entry) * np.sign(sign)).astype(int) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_range_entry(var, win):\n\tentry = Gtk.Scale.new(Gtk.Orientation.HORIZONTAL,\n\t\tGtk.Adjustment(var.get(), var.type.low, var.type.up))\n\tentry.set_value(var.get())\n\tentry.set_digits(0)\n\tentry.set_hexpand(True)\n\tobs = RangeEntryObserver(var)\n\tentry.connect(\"change-value\", obs.on_change_value)\n\treturn entry",
"def checkEntryInRange(self, entry, min=(-1.0 * float('inf')), max=float('inf'), inclusive=True, errorTitle='Entry Out Of Range', errorMessage='', errorDescription=None):\n\n # if user does not provide an error description generate one automatically\n if not errorDescription:\n errorDescription = 'relevant entry name: ' + str(entry.objectName())\n\n # check to make sure the entry is populated\n if entry.text() != '':\n val = float(int(float(entry.text())))\n else:\n raise ValueError, (errorTitle, errorMessage, errorDescription)\n\n # check to make sure value is in range\n if inclusive:\n if val < min or val > max:\n raise ValueError, (errorTitle, errorMessage, errorDescription)\n else:\n if val <= min or val >= max:\n raise ValueError, (errorTitle, errorMessage, errorDescription)\n\n return int(val)",
"def provider_range_lookup(self, record):\n pass",
"def entrycalc(self, lows, o):\n price = float(self.price)\n \n #print(nextTrade==price,nextTradeSeller==price)\n for i in range(2, self.entries + 1):\n if len(self.entryprices) > 0:\n avgentryprice = sum(self.entryprices) / len(self.entryprices)\n #if previous entry has been placed and current hasn't and other args are met\n if self.dentry[\"placedOrder\" + str(i - 1) + self.chartnumber] and price < avgentryprice and float(price) < lows[-2] and float(price) < float(o) and not self.dentry[\"placedOrder\" + str(i) + self.chartnumber]:\n self.dentry[\"placedOrder\" + str(i) + self.chartnumber] = True\n #add these to dict\n print(\"trade number\",str(i))\n self.dentry[\"tradeEntries\" + str(i) + self.chartnumber] += 1\n #self.totalentries += 1\n \n #I changed these from price to nextTrade\n self.dentry[\"orderPrice\" + str(i) + self.chartnumber] = price\n #self.dentry[\"orderPrice\" + str(i) + chartnumber] = self.nextTrade\n \n #altbuy = int(self.dentry[\"buy\" + str(i) + chartnumber] / price)\n altbuy = int(self.dentry[\"buy\" + str(i) + self.chartnumber] / self.nextTrade)\n \n #self.availablebase -= altbuy * price\n self.availablebase -= altbuy * self.nextTrade\n altbuy -= altbuy * .001\n self.amtofalt += altbuy\n ###HOW LONG TO WE WANT ENTRYPRICES TO BE??\n \n #self.entryprices.append(price)\n self.entryprices.append(self.nextTrade)\n if self.graphics:\n self.graph.buy(self.masterDick[\"currentPrice\" + self.chartnumber], self.masterDick[\"count\" + self.chartnumber], self.chartnumber, i)\n #print(\"Fun:\",self.amtofalt)\n print(\"Buy\" + str(i),self.dentry[\"buy\" + str(i) + self.chartnumber])\n break",
"def get_range_value(self, key):\n pass",
"def on_entry():\r\n try: # try to parse the entry string as a couple of integer values\r\n minvalue, maxvalue = win.entry.state.split(',')\r\n minvalue, maxvalue = int(minvalue), int(maxvalue)\r\n win.min, win.max = min(minvalue, maxvalue), max(minvalue, maxvalue)\r\n except Exception:\r\n pass # keep previous values if the parsing fails\r\n win.entry.state = f\"{win.min}, {win.max}\"",
"def range(self, value):\n self.value_range = tuple([float(x) for x in value.split(':')])",
"def adjust_entry(entry):\n if 'address2' not in entry:\n entry['address2'] = ''\n entry['hours_txt'] = ''\n if 'hours' in entry:\n strhours = []\n for elem in entry['hours']:\n strhours.append(elem['days']+' '+(' , '.join(elem['hours'])))\n entry['hours_txt'] = ('\\n').join(strhours)\n else:\n entry['hours'] = []\n entry['hours_parsed'] = get_hours_dict(entry['hours'])\n try:\n entry['cuisines_txt'] = ', '.join(entry['cuisines'])\n except KeyError:\n entry['cuisines_txt'] = ''\n if 'tags' not in entry:\n entry['tags'] = []\n entry['tags_txt'] = ', '.join(entry['tags'])\n\n\n if 'weighted_rating' not in entry:\n entry['weighted_rating'] = '0.0'\n\n\n entry['rating_parsed'] = int(round(float(entry['weighted_rating'])))\n\n if not 'veg_level' in entry:\n entry['veg_level'] = 5\n\n if int(entry['veg_level']) == 0:\n entry['color_txt'] = '#b00257'\n elif int(entry['veg_level']) == 1:\n entry['color_txt'] = '#97a509'\n elif int(entry['veg_level']) == 2:\n entry['color_txt'] = '#155196'\n elif int(entry['veg_level']) == 3:\n entry['color_txt'] = '#fab20a'\n elif int(entry['veg_level']) == 4:\n entry['color_txt'] = '#97a509'\n elif int(entry['veg_level']) == 5:\n entry['color_txt'] = '#16ac48'\n return entry",
"def is_in_range(self, price):\r\n return price <= self.pmax and price >= self.pmin",
"def parse_range(option):\n return {\"range\": timedelta(days=option)}",
"def range_validator_advice(validator_args):\n \n a_type, lb, ub, allow_none, error_msg = validator_args\n if lb == None and ub == None:\n return \"\"\n adv_str = 'x'\n if lb != None:\n adv_str = str(lb) + ' <= ' + adv_str\n if ub != None:\n adv_str += ' <= ' + str(ub)\n if allow_none:\n adv_str += ', None'\n return ' {' + adv_str + '}'",
"def ed(entries, cut):\n if not isinstance(entries, numbers.Real) and entries not in (\"nan\", \"inf\", \"-inf\"):\n raise TypeError(\"entries ({0}) must be a number\".format(entries))\n if not isinstance(cut, Container):\n raise TypeError(\"cut ({0}) must be a Container\".format(cut))\n if entries < 0.0:\n raise ValueError(\"entries ({0}) cannot be negative\".format(entries))\n out = Select(None, cut)\n out.entries = float(entries)\n return out.specialize()",
"def getAFeRange(brand):\n return afe_range[brand]",
"def _range_entry(s: pd.Series) -> pd.Series:\n\n return -((s.shift() - s) * s).fillna(0)",
"def test_getEntryByTerm(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n origEntry = {'term': 'foo', 'tags': 'a', 'value': '1'}\n b = self.g.add_entry(origEntry)\n self.assertTrue(b)\n retrievedEntry = self.g.get(origEntry['term'])\n self.assertEqual(retrievedEntry, origEntry)",
"def centry(self, e: int) -> float:\n result = self._read_inline(f\"centry({e})\")\n return result",
"def filter_range_e(self, name, field, start, end):\n self.__names[name] = {\n 'filter': {\n 'range': {\n field: {\n 'gte': start,\n 'lte': end\n }\n }\n }\n }\n self.__last_name = name\n return self",
"def cb_f1_entry_1_activate(self, entry_widget):\n # print(\"Got here activate\")\n # print(entry_field.get_text())\n radius = float(entry_widget.get_text())\n print(radius)",
"def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError",
"def test_sell_ticket_price_range(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"testticket\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#price_sell\", \"101\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Ticket price outside of valid range\", \"#message\")\n\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"testticket\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#price_sell\", \"9\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Ticket price outside of valid range\", \"#message\")",
"def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])",
"def filter_range(self, name, field, start, end):\n self.__names[name] = {\n 'filter': {\n 'range': {\n field: {\n 'gt': start,\n 'lt': end\n }\n }\n }\n }\n self.__last_name = name\n return self",
"def test_get_range(self):\n pass",
"def add(self, attr):\n self.validate_type(attr)\n value = attr.value\n if not self.range:\n self.range = (value, value)\n else:\n self.range = min(self.range[0], value), max(self.range[1], value)",
"def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)",
"def key_func(entry):\n return (entry[1].name, entry[1].number, entry[1].price)",
"def _get_slt_entry(self, entry):\n # Slt ptr is the second word in the SLT region (first is fingerprint)\n # Note how we deliberately don't use any debug information here (e.g.\n # finding the location of $_audio_slt_table).\n if Arch.addr_per_word == 4:\n sltptr_addr = Arch.pRegions['SLT'][0] + Arch.addr_per_word\n slt_entry_addr = self.get_data_pm(sltptr_addr) # index 0 of slt\n else:\n sltptr_addr = Arch.dRegions['SLT'][0] + Arch.addr_per_word\n slt_entry_addr = self.get_data(sltptr_addr) # index 0 of slt\n # Run through the slt looking for the entry we want, if we can't find\n # the one we're looking for maybe it's not in the list\n while entry > self.get_data(slt_entry_addr):\n slt_entry_addr += 2 * Arch.addr_per_word\n\n if entry == self.get_data(slt_entry_addr):\n return self.get_data(slt_entry_addr + Arch.addr_per_word)\n\n return None",
"def pp_entry(self, entry):\n self.separator()\n print('Type: {}'.format(self.TRANSACTION_CODES[entry['Transaction Code']]))\n for item in entry:\n print(item.ljust(25, ' ') + ': {}'.format(entry[item]))\n self.separator()",
"def GetTRange(self):\n ...",
"def preprocess_entry(self, entry):\r\n raise NotImplementedError('BaseDataSource::preprocess_entry not implemented.')"
] | [
"0.61373144",
"0.6032945",
"0.54929155",
"0.5479415",
"0.515154",
"0.5149273",
"0.5042822",
"0.50140834",
"0.5003269",
"0.49909687",
"0.4943643",
"0.48825723",
"0.48644677",
"0.4860608",
"0.4840631",
"0.48357874",
"0.48082027",
"0.4736602",
"0.47233704",
"0.46627074",
"0.46605542",
"0.46382508",
"0.4628499",
"0.46249574",
"0.46239823",
"0.46229237",
"0.46029386",
"0.45817405",
"0.45723644",
"0.4572133"
] | 0.6240598 | 0 |
Blip when indicator enters or leaves range. Blip is signed the same as sign of the indicator. | def range_blip(
indicator: pd.Series,
threshold: float = 0,
inout: Literal["inside", "outside"] = "inside",
) -> pd.Series:
indicator = indicator.dropna()
r = inout_range(indicator, threshold, inout)
return _signed_range_entry(_range_entry(r), indicator) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clip(self):\n if self._value < self.lowBound:\n self._value = self.lowBound\n elif self._value > self.upBound:\n self._value = self.upBound",
"def crosser(ind: pd.Series, threshold: float) -> pd.Series:\n df = pd.DataFrame({\"ind\": ind})\n df[\"above_below\"] = (df[\"ind\"] >= threshold) * 1 - (df[\"ind\"] < threshold) * 1\n df[\"blip\"] = ((df[\"above_below\"].shift() + df[\"above_below\"]) == 0) * df[\n \"above_below\"\n ]\n df = df.dropna()\n return df[\"blip\"]",
"def process_pain(x, lb, ub):\n x = x.abs()\n x.loc[(x > ub)] = 8\n x.loc[(x < lb) | (x > ub)] = np.nan\n return x",
"def clip_by_bound(self) -> None:\n\n pass",
"def zeroCrossingNegSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\ttry:\r\n\t\t\tsplit_min_index = np.argmin(self.splitData)\r\n\t\t\tsplit_max_index = np.argmax(self.splitData[0:split_min_index])\r\n\t\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\t\tsplit_min_value = self.splitData[split_min_index]\r\n\t\texcept:\r\n\t\t\tzero_crossing = -3\r\n\t\t\treturn zero_crossing\r\n\t\t#print 'split',\tsplit_min_index, (self.splitBaseline-split_min_value), split_max_index,(split_max_value-self.splitBaseline)\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >= evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_max_index, split_min_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] > self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\tif self.splitData[index] <= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1\r\n\t\t\r\n\t\telse: \r\n\t\t\tzero_crossing = -2\r\n\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def clip(data,clip):\n data[data > clip] = clip\n data[data < -clip] = -clip\n return data",
"def lower_bri(self):\r\n for light in self.lights:\r\n bri = self.b.get_light(light,'bri')\r\n bri = bri - 50\r\n if bri < 0:\r\n bri = 1\r\n self.b.set_light(light,'bri',bri)",
"def zeroCrossingPosSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData[0:split_max_index])\r\n\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\tsplit_min_value = self.splitData[split_min_index]\r\n\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >=evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_min_index, split_max_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] < self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\tif self.splitData[index] >= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1 \r\n\t\t\t\t\r\n\t\telse:\r\n\t\t\tzero_crossing = -2 \r\n\t\t\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def clip(self, *args, **kwargs):\n return _uhd_swig.meta_range_t_clip(self, *args, **kwargs)",
"def __neg__(self):\n return Intervalo(-self.hi, -self.lo)",
"def zeroCrossing(self,evap_threshold):\r\n\t\tself.splitBaseline =(np.mean(self.splitData[0:10]))\t\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData)\r\n\r\n\t\tif split_max_index >= split_min_index:\r\n\t\t\treturn self.zeroCrossingPosSlope(evap_threshold)\r\n\t\t\r\n\t\tif split_max_index < split_min_index:\r\n\t\t\treturn self.zeroCrossingNegSlope(evap_threshold)",
"def clip(lo, x, hi):\n x = max(lo,x)\n x = min(x,hi)\n return x",
"def indicator(x_value, y_bar):\n if y_bar <= x_value:\n out = 1\n else:\n out = 0\n return out",
"def clip(self, x):\n return self.min_value if x<self.min_value else self.max_value if x > self.max_value else x",
"def indicator(self):\n return (~self.mask).astype(numpy.int_)",
"def upper_covers(self, x):",
"def updateIndicator(self):\n\t\tnewIndicatorX = self.getPosFromPitch(self.listener.pitch)\n\t\t\n\t\tself.triTip = (newIndicatorX, self.triTip[1])\n\t\tself.triLeft = (self.triTip[0] - self.width*0.01, self.height*.3)\n\t\tself.triRight = (self.triTip[0] + self.width*0.01, self.height*.3)\n\t\tself.indicatorCoords = ( self.triLeft, self.triTip, self.triRight)\n\t\tself.indicator.points = self.indicatorCoords\n\t\tself.indicator.fill = self.indicatorColor[self.inTune]",
"def clip(val):\n if val > 4.0:\n return 4.0\n elif val < -4.0:\n return -4.0\n else:\n return val",
"def ctrl_b(self):\n if self.index > 0:\n self.index -= 1",
"def sli(self, indicator=0):\n self.indicator = indicator\n return indicator",
"def __init__(self,\n low,\n high,\n clipping_lower_bound=-np.inf,\n clipping_upper_bound=np.inf):\n super().__init__()\n self._low = low\n self._high = high\n self._clipping_lower_bound = clipping_lower_bound\n self._clipping_upper_bound = clipping_upper_bound",
"def sidebounce(self):\r\n self.dx=-self.dx",
"def change_sign(self):\n if self._is_zero():\n return\n\n if self._digits[0] != self.Symbols.NEGATIVE.value:\n self._digits.appendleft(self.Symbols.NEGATIVE.value)\n else:\n self._digits.popleft()",
"def signal_hammer(icu, icu_slope, hammer_icu, hammer_slope):\n\n return (icu > hammer_icu and icu_slope > 0) or (icu_slope > hammer_slope)",
"def clip(x, min, max):\r\n # see decorator for function body\r\n # for grep: clamp, bound\r",
"def zero_crosser(indicator: pd.Series) -> pd.Series:\n indicator = indicator.fillna(0)\n return (((indicator.shift() * indicator) <= 0) * np.sign(indicator)).astype(int)",
"def cb_minus(event):\n delta_alpha = pm_rate\n # Decrease Alpha \n sAlpha0.set_val( np.clip(sAlpha0.val - delta_alpha, alpha_min[0], alpha_max[0]) )\n sAlpha1.set_val( np.clip(sAlpha1.val - delta_alpha, alpha_min[1], alpha_max[1]) )\n sAlpha2.set_val( np.clip(sAlpha2.val - delta_alpha, alpha_min[2], alpha_max[2]) )\n print(\"---\")",
"def test_outside_bottom_range(self):\n input_ = [\n self.indicator_record(date=datetime.date(2000, 2, 1), value=0.13),\n self.indicator_record(date=datetime.date(2000, 3, 1), value=0.22),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n expected = self.indicator_record(date=datetime.date(2000, 4, 1), value=0.22)\n actual = output[-1]\n\n self.assertEqual(expected, actual)",
"def Clip(x):\n return math_ops.maximum(math_ops.minimum(x, 1.), -1.)",
"def check_boundary(self):\n\n\t\tif self.Bubble_initial_pos[0] <= self.Bubble_radius or self.Bubble_initial_pos[0] >= self.tk_pic.width - self.Bubble_radius:\n\t\t\tself.Bubble_vel[0] = -self.Bubble_vel[0]"
] | [
"0.6277009",
"0.5953873",
"0.58876336",
"0.56213623",
"0.5533034",
"0.5480362",
"0.53790057",
"0.53341526",
"0.5326615",
"0.5307674",
"0.5303355",
"0.51896703",
"0.5184564",
"0.5129037",
"0.51242024",
"0.51144993",
"0.5104742",
"0.5090696",
"0.50842917",
"0.50722456",
"0.50697845",
"0.5058684",
"0.4999564",
"0.49993327",
"0.49979967",
"0.4993431",
"0.4975952",
"0.49623233",
"0.495636",
"0.49479532"
] | 0.68230635 | 0 |
Blip when indicator crosses zero. Blip is signed the same as sign of the indicator. When indicator value is exactly zero at some point, next value will be treated as having crossed zero. | def zero_crosser(indicator: pd.Series) -> pd.Series:
indicator = indicator.fillna(0)
return (((indicator.shift() * indicator) <= 0) * np.sign(indicator)).astype(int) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def zero_crossings(x):\n return np.array(np.where(np.diff(np.sign(x)))[0])",
"def crosser(ind: pd.Series, threshold: float) -> pd.Series:\n df = pd.DataFrame({\"ind\": ind})\n df[\"above_below\"] = (df[\"ind\"] >= threshold) * 1 - (df[\"ind\"] < threshold) * 1\n df[\"blip\"] = ((df[\"above_below\"].shift() + df[\"above_below\"]) == 0) * df[\n \"above_below\"\n ]\n df = df.dropna()\n return df[\"blip\"]",
"def zeroCrossingNegSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\ttry:\r\n\t\t\tsplit_min_index = np.argmin(self.splitData)\r\n\t\t\tsplit_max_index = np.argmax(self.splitData[0:split_min_index])\r\n\t\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\t\tsplit_min_value = self.splitData[split_min_index]\r\n\t\texcept:\r\n\t\t\tzero_crossing = -3\r\n\t\t\treturn zero_crossing\r\n\t\t#print 'split',\tsplit_min_index, (self.splitBaseline-split_min_value), split_max_index,(split_max_value-self.splitBaseline)\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >= evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_max_index, split_min_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] > self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\tif self.splitData[index] <= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1\r\n\t\t\r\n\t\telse: \r\n\t\t\tzero_crossing = -2\r\n\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def sign(v):\n return np.where(v < 0, -1.0, 1.0)",
"def Clip(x):\n return math_ops.maximum(math_ops.minimum(x, 1.), -1.)",
"def zeroCrossingPosSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData[0:split_max_index])\r\n\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\tsplit_min_value = self.splitData[split_min_index]\r\n\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >=evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_min_index, split_max_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] < self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\tif self.splitData[index] >= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1 \r\n\t\t\t\t\r\n\t\telse:\r\n\t\t\tzero_crossing = -2 \r\n\t\t\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def zeroCrossing(self,evap_threshold):\r\n\t\tself.splitBaseline =(np.mean(self.splitData[0:10]))\t\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData)\r\n\r\n\t\tif split_max_index >= split_min_index:\r\n\t\t\treturn self.zeroCrossingPosSlope(evap_threshold)\r\n\t\t\r\n\t\tif split_max_index < split_min_index:\r\n\t\t\treturn self.zeroCrossingNegSlope(evap_threshold)",
"def slide(line):\n none_zero_list = get_none_zero_list(line)\n zero_list = get_zero_list(line)\n return none_zero_list + zero_list",
"def clip(self):\n if self._value < self.lowBound:\n self._value = self.lowBound\n elif self._value > self.upBound:\n self._value = self.upBound",
"def invert0(x):\n return 0 if x > 0 else 1",
"def indicator(self):\n return (~self.mask).astype(numpy.int_)",
"def sign(a) :\n return (a>0) - (a<0)",
"def bias_clipping(self):\n max_bias = np.amax(np.abs(self._bias))\n self._bias = self._clipping*self._bias/max_bias",
"def pos(x):\r\n\r\n x[x < 0.] = 0.\r\n return x",
"def replace_lowest_one_with_zero(x):\n return x & (x-1)",
"def clip(val):\n if val > 4.0:\n return 4.0\n elif val < -4.0:\n return -4.0\n else:\n return val",
"def convex_conj(self):\n return IndicatorZero(self.domain, -self.constant)",
"def sign(a):\n return (a > 0) - (a < 0)",
"def signal(x):\r\n if x >= 0.0:\r\n return 1.0\r\n return -1.0",
"def nozero(arr):\n vals=sorted(list(set(np.array(arr).flatten())))\n if vals[0]<0:\n print(\"correcting for div/zero by replacing 0 with\",vals[1])\n arr[arr==0]=vals[1]\n return arr",
"def positive(x):\n return np.maximum(x, 0.0)",
"def zero_to_neg(array):\n ret = np.ones(len(array))\n for i in range(len(array)):\n if(array[i] == 0):\n ret[i] = -1\n return ret",
"def bar(self, value):\r\n if value < 0:\r\n raise ValueError(\"Must be >= 0\")\r\n self.x = value",
"def change_sign(self):\n if self._is_zero():\n return\n\n if self._digits[0] != self.Symbols.NEGATIVE.value:\n self._digits.appendleft(self.Symbols.NEGATIVE.value)\n else:\n self._digits.popleft()",
"def shift_down(line, result):\n \n for index in range(len(line)):\n current = index\n next_greater_zero = -1\n if line[index] == 0:\n #while the next value is still zero move right\n while current + 1 < len(line) and line[current] == 0:\n current +=1\n #if value is not equal to zero save index\n #of the next >0 value to assign current index that value\n if line[current] != 0:\n next_greater_zero = current\n break\n #assign result[next_greater_zero] to line[next_greater_zero]\n #change line[next_greater_zero] to zero\n next_value = line[next_greater_zero]\n line[next_greater_zero] = 0\n result[index] = next_value\n else:\n result[index] = line[index]\n return result",
"def sum_gt_zero(x):\r\n s = x[0] + x[1]\r\n if s > 0.0:\r\n return 1.0\r\n return 0.0",
"def min_zero_crossings(self):\n return self._MIN_ZERO_CROSSINGS",
"def replace_negative(arr, value):\n arr[arr < 0] = value\n return arr",
"def process_pain(x, lb, ub):\n x = x.abs()\n x.loc[(x > ub)] = 8\n x.loc[(x < lb) | (x > ub)] = np.nan\n return x",
"def sign(x):\n if x >= 0:\n return 1\n return -1"
] | [
"0.63800055",
"0.5952263",
"0.5914088",
"0.58691347",
"0.58293945",
"0.56045574",
"0.5598274",
"0.5479052",
"0.5435027",
"0.5390016",
"0.5358857",
"0.5321166",
"0.52676576",
"0.52672845",
"0.52667016",
"0.52649635",
"0.5256902",
"0.52521056",
"0.5209373",
"0.5192771",
"0.517935",
"0.51351595",
"0.5120547",
"0.5117379",
"0.51107603",
"0.5100953",
"0.50959694",
"0.5071035",
"0.50611156",
"0.5055692"
] | 0.706241 | 0 |
Weighted zscore. Can be used to test whether price is within/outside Bollinger Bands. | def weighted_zscore(df: pd.DataFrame, lookback: int) -> pd.Series:
wmean = rolling_weighted_mean(df["close"], df["volume"], lookback)
wstd = rolling_weighted_std(df["close"], df["volume"], lookback, wmean)
return ((df["close"] - wmean) / wstd).dropna() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_batter_z_score(batter_list, players_over_zero_dollars, one_dollar_players,\n dollar_per_fvaaz, player_pool_multiplier, add_original_value=False):\n player_pool = int(players_over_zero_dollars * player_pool_multiplier)\n # Standard Calculations\n run_list = []\n hr_list = []\n rbi_list = []\n sb_list = []\n ops_list = []\n avg_list = []\n # weighted_batter_list = []\n batter_dict_list = []\n if not isinstance(batter_list[0], dict):\n for batter in batter_list:\n b = model_to_dict(batter)\n batter_dict_list.append(b)\n else:\n batter_dict_list = batter_list\n for batter in batter_dict_list:\n if add_original_value:\n batter['original_value'] = batter['dollarValue']\n\n run_list.append(batter['r'])\n hr_list.append(batter['hr'])\n rbi_list.append(batter['rbi'])\n sb_list.append(batter['sb'])\n ops_list.append(batter['ops'])\n avg_list.append(batter['avg'])\n run_list_nlargest = heapq.nlargest(player_pool, run_list)\n hr_list_nlargest = heapq.nlargest(player_pool, hr_list)\n rbi_list_nlargest = heapq.nlargest(player_pool, rbi_list)\n sb_list_nlargest = heapq.nlargest(player_pool, sb_list)\n ops_list_nlargest = heapq.nlargest(player_pool, ops_list)\n avg_list_nlargest = heapq.nlargest(player_pool, avg_list)\n # Average Calculation\n r_avg = avg_calc(run_list_nlargest)\n hr_avg = avg_calc(hr_list_nlargest)\n rbi_avg = avg_calc(rbi_list_nlargest)\n sb_avg = avg_calc(sb_list_nlargest)\n ops_avg = avg_calc(ops_list_nlargest)\n avg_avg = avg_calc(avg_list_nlargest)\n # Standard Deviation Calculation\n r_std_dev = std_dev_calc(run_list_nlargest, r_avg)\n hr_std_dev = std_dev_calc(hr_list_nlargest, hr_avg)\n rbi_std_dev = std_dev_calc(rbi_list_nlargest, rbi_avg)\n sb_std_dev = std_dev_calc(sb_list_nlargest, sb_avg)\n ops_std_dev = std_dev_calc(ops_list_nlargest, ops_avg)\n avg_std_dev = std_dev_calc(avg_list_nlargest, avg_avg)\n # zScore Calculation\n for batter in batter_dict_list:\n batter['zScoreR'] = z_score_calc(batter['r'], r_avg, r_std_dev)\n batter['weightedR'] = batter['zScoreR'] * float(batter['ab'])\n batter['zScoreHr'] = z_score_calc(batter['hr'], hr_avg, hr_std_dev)\n batter['weightedHr'] = batter['zScoreHr'] * float(batter['ab'])\n batter['zScoreRbi'] = z_score_calc(batter['rbi'], rbi_avg, rbi_std_dev)\n batter['weightedRbi'] = batter['zScoreRbi'] * float(batter['ab'])\n batter['zScoreSb'] = z_score_calc(batter['sb'], sb_avg, sb_std_dev)\n batter['weightedSb'] = batter['zScoreSb'] * float(batter['ab'])\n batter['zScoreOps'] = z_score_calc(batter['ops'], ops_avg, ops_std_dev)\n batter['weightedOps'] = batter['zScoreOps'] * float(batter['ab'])\n batter['zScoreAvg'] = z_score_calc(batter['avg'], ops_avg, ops_std_dev)\n batter['weightedAvg'] = batter['zScoreAvg'] * float(batter['ab'])\n # weighted_batter_list.append(batter)\n # Weighted Calculations\n weighted_run_list = []\n weighted_hr_list = []\n weighted_rbi_list = []\n weighted_sb_list = []\n weighted_ops_list = []\n weighted_avg_list = []\n # for batter in weighted_batter_list:\n for batter in batter_dict_list:\n weighted_run_list.append(batter['weightedR'])\n weighted_hr_list.append(batter['weightedHr'])\n weighted_rbi_list.append(batter['weightedRbi'])\n weighted_sb_list.append(batter['weightedSb'])\n weighted_ops_list.append(batter['weightedOps'])\n weighted_avg_list.append(batter['weightedOps'])\n weighted_run_list_nlargest = heapq.nlargest(player_pool, weighted_run_list)\n weighted_hr_list_nlargest = heapq.nlargest(player_pool, weighted_hr_list)\n weighted_rbi_list_nlargest = heapq.nlargest(player_pool, weighted_rbi_list)\n weighted_sb_list_nlargest = heapq.nlargest(player_pool, weighted_sb_list)\n weighted_ops_list_nlargest = heapq.nlargest(player_pool, weighted_ops_list)\n weighted_avg_list_nlargest = heapq.nlargest(player_pool, weighted_avg_list)\n # Weighted Average Calculation\n weighted_r_avg = avg_calc(weighted_run_list_nlargest)\n weighted_hr_avg = avg_calc(weighted_hr_list_nlargest)\n weighted_rbi_avg = avg_calc(weighted_rbi_list_nlargest)\n weighted_sb_avg = avg_calc(weighted_sb_list_nlargest)\n weighted_ops_avg = avg_calc(weighted_ops_list_nlargest)\n weighted_avg_avg = avg_calc(weighted_avg_list_nlargest)\n # Weighted Standard Deviation Calculation\n weighted_r_std_dev = std_dev_calc(weighted_run_list_nlargest, weighted_r_avg)\n weighted_hr_std_dev = std_dev_calc(weighted_hr_list_nlargest, weighted_hr_avg)\n weighted_rbi_std_dev = std_dev_calc(weighted_rbi_list_nlargest, weighted_rbi_avg)\n weighted_sb_std_dev = std_dev_calc(weighted_sb_list_nlargest, weighted_sb_avg)\n weighted_ops_std_dev = std_dev_calc(weighted_ops_list_nlargest, weighted_ops_avg)\n weighted_avg_std_dev = std_dev_calc(weighted_avg_list_nlargest, weighted_avg_avg)\n # Weighted zScore Calculation\n for batter in batter_dict_list:\n batter['weightedZscoreR'] = z_score_calc(batter['weightedR'], weighted_r_avg,\n weighted_r_std_dev)\n batter['weightedZscoreHr'] = z_score_calc(batter['weightedHr'], weighted_hr_avg,\n weighted_hr_std_dev)\n batter['weightedZscoreRbi'] = z_score_calc(batter['weightedRbi'], weighted_rbi_avg,\n weighted_rbi_std_dev)\n batter['weightedZscoreSb'] = z_score_calc(batter['weightedSb'], weighted_sb_avg,\n weighted_sb_std_dev)\n batter['weightedZscoreOps'] = z_score_calc(batter['weightedOps'], weighted_ops_avg,\n weighted_ops_std_dev)\n batter['weightedZscoreAvg'] = z_score_calc(batter['weightedAvg'], weighted_avg_avg,\n weighted_avg_std_dev)\n # Calculate Values\n fvaaz_list = []\n for batter in batter_dict_list:\n # TODO: how to handle an avg version of this?\n batter['fvaaz'] = (batter['zScoreR'] + batter['zScoreHr'] + batter['zScoreRbi'] + batter['zScoreSb'] +\n batter['weightedZscoreOps'])\n fvaaz_list.append(batter['fvaaz'])\n players_over_one_dollar = players_over_zero_dollars - one_dollar_players\n fvaaz_list_over_zero = heapq.nlargest(players_over_zero_dollars, fvaaz_list)\n fvaaz_list_over_one = heapq.nlargest(players_over_one_dollar, fvaaz_list)\n for batter in batter_dict_list:\n if batter['fvaaz'] >= fvaaz_list_over_one[players_over_one_dollar - 1]:\n # TODO: dollar_per_fvaaz seems to be a circular reference, how to resolve this?\n batter['dollarValue'] = batter['fvaaz'] * dollar_per_fvaaz\n elif batter['fvaaz'] >= fvaaz_list_over_zero[players_over_zero_dollars - 1]:\n batter['dollarValue'] = 1.0\n else:\n batter['dollarValue'] = 0.0\n return sorted(batter_dict_list, key=operator.itemgetter('fvaaz'), reverse=True)\n # sorts by fvaaz (largest to smallest)",
"def get_weight(self):\n return self.W * self.get_z_mean()",
"def run_zbtest(self): # Unweighted z-test\r\n n = reduce(lambda x, y: x+(y.bwstats.mean > 0), self.sorted_r, 0)\r\n if n == 0: return (0, 0)\r\n avg = reduce(lambda x, y: x+y.bwstats.mean, self.sorted_r, 0)/float(n)\r\n def notlambda(x, y):\r\n if y.bwstats.mean <= 0: return x+0\r\n else: return x+(y.bwstats.mean-avg)*(y.bwstats.mean-avg)\r\n stddev = math.sqrt(reduce(notlambda, self.sorted_r, 0)/float(n))\r\n if not stddev: return (avg, stddev)\r\n for r in self.sorted_r:\r\n if r.bwstats.mean > 0:\r\n r.z_bw = abs((r.bwstats.mean-avg)/stddev)\r\n r.prob_zb = TorUtil.zprob(-r.z_bw)\r\n return (avg, stddev)",
"def z_score(self) -> float:\n return float((self.tsdf.pct_change().iloc[-1] - self.tsdf.pct_change().mean()) / self.tsdf.pct_change().std())",
"def z_score(self, x):\n return (x - self.n) / self.p",
"def w(self, xtest, ztest):\n if ztest > self._zi:\n return 0.0\n\n R = numpy.linalg.norm(\n (self._x - xtest) * numpy.array([1.0, 1.0, 0.0]))\n w = numpy.exp(-R / self._r) * self._w\n z_scale = numpy.clip(ztest / self._zi - 1.0, 0.0, numpy.inf)\n w *= numpy.exp(-z_scale * 4.0)\n return w",
"def get_zscore_data(self):\n self.update_filter_inds()\n return _z_score(self)",
"def z_score(num, mean, std_dev):\n\treturn (num - mean) / std_dev",
"def zscore(vals):",
"def get_weight(self):\n # FIXME: BELUM ADA KEPUTUSAN\n return 0",
"def z_score(self, x):\n\n mean = self.mean\n stddev = self.stddev\n\n z = (x - mean) / stddev\n\n return z",
"def getWeight(self) -> float:\n ...",
"def calcweight( self ):\n weight = 0\n zeroval = 0\n for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'):\n\t\treading = self.readings[sensor]\n\t\tcalibration = self.named_calibration[sensor]\n if sensor == 'right_top':\n zeroval = self.rtzv\n elif sensor == 'right_bottom':\n zeroval = self.rbzv\n elif sensor == 'left_top':\n zeroval = self.ltzv\n else:\n zeroval = self.lbzv\n\t\tif reading > calibration[2]:\n\t\t\tprint \"Warning, %s reading above upper calibration value\" % sensor\n\t\tif reading < calibration[1]:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[0]) / (calibration[1] - calibration[0])\n\t\telse:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[1]) / (calibration[2] - calibration[1]) + 1700\n\n if self.debug == 1:\n print \"weight calculated pre-conversion\", weight\n print \"return val\", self.converttolbs( weight / 100.0 )\n\n # return self.converttolbs( weight / 100.0 )\n return weight / 100.0",
"def brain_weight_oz(self):\r\n return Heart.heart_weight_oz(self) # Used method from Heart Class\r",
"def run_zrtest(self): # Unweighted z-test\r\n n = reduce(lambda x, y: x+(y.bw_ratio() > 0), self.sorted_r, 0)\r\n if n == 0: return (0, 0)\r\n avg = reduce(lambda x, y: x+y.bw_ratio(), self.sorted_r, 0)/float(n)\r\n def notlambda(x, y):\r\n if y.bw_ratio() <= 0: return x+0\r\n else: return x+(y.bw_ratio()-avg)*(y.bw_ratio()-avg)\r\n stddev = math.sqrt(reduce(notlambda, self.sorted_r, 0)/float(n))\r\n if not stddev: return (avg, stddev)\r\n for r in self.sorted_r:\r\n if r.bw_ratio() > 0:\r\n r.z_ratio = abs((r.bw_ratio()-avg)/stddev)\r\n r.prob_zr = TorUtil.zprob(-r.z_ratio)\r\n return (avg, stddev)",
"def calc_tolerance(wt):\n return 1 - wt",
"def compute_weight(self, model, random_score, cv=None):\n\n # compute the benefit (with cross-validation or not)\n score = super().compute_score_crossvalidation(model=model, cv=cv)\n\n # w = b_i - b_r\n return score - random_score",
"def Weighted_Average_Coupon(clo_df,col):\n #mask = (clo_df['Asset Type']=='Bond')\n mask = (clo_df[col] > 0) & (clo_df['Spread'].isna())\n WAC_stat = weighted_average(clo_df.loc[mask],[col,'All In Rate'])\n \n return WAC_stat",
"def z_score(x: np.ndarray) -> np.ndarray:\n return (x - np.mean(x)) / np.std(x)",
"def weight(self) -> float:\r\n return self._weight",
"def betaW(self):\n if self.maCruise > 1:\n return 0\n else:\n return sqrt(1 - self.maCruise**2)",
"def buy_indicator(self, feats):\n return (\n feats[\"z-score\"] < -1.5 or\n feats[\"z-score\"] > 1.5\n )",
"def calculate_weighted_results():\n pass",
"def heart_weight_oz(self):\r\n grams_to_oz = self.organ_weight_grams*.035\r\n return (float(grams_to_oz))",
"def taumBday(n_b, n_w, cost_b, cost_w, z):\n # 3 cases for how much it would cost to buy black or white present\n case1 = n_w * cost_w + n_b * cost_b\n case2 = n_w * (cost_b + z) + n_b * cost_b\n case3 = n_w * cost_w + n_b * (cost_w + z)\n return min(case1, case2, case3)",
"def compute_z_score(stats, columns, col_name):\n if stats[col_name]['data_type'] != DATA_TYPES.NUMERIC:\n return {}\n\n z_scores = list(map(abs,(st.zscore(columns[col_name]))))\n threshold = 3\n z_score_outlier_indexes = [i for i in range(len(z_scores)) if z_scores[i] > threshold]\n data = {\n 'z_score_outliers': z_score_outlier_indexes\n ,'mean_z_score': round(10 * (1 - np.mean(z_scores)))\n ,'z_test_based_outlier_score': round(10 * (1 - len(z_score_outlier_indexes)/len(columns[col_name])))\n ,'z_test_based_outlier_score_description':\"\"\"\n This score indicates the amount of data that are 3 STDs or more away from the mean. That is to say, the amount of data that we consider to be an outlir. A hgih z socre means your data contains a large amount of outliers.\n \"\"\"\n }\n return data",
"def calc_pitcher_z_score(pitcher_list, players_over_zero_dollars, one_dollar_players,\n dollar_per_fvaaz, player_pool_multiplier, add_original_value=False):\n player_pool = int(players_over_zero_dollars * player_pool_multiplier)\n # max_ip = max(pitcher['ips ']for pitcher in pitcher_list)\n # Standard Calculations\n win_list = []\n sv_list = []\n k_list = []\n era_list = []\n whip_list = []\n # weighted_pitcher_list = []\n pitcher_dict_list = []\n if not isinstance(pitcher_list[0], dict):\n for pitcher in pitcher_list:\n p = model_to_dict(pitcher)\n pitcher_dict_list.append(p)\n else:\n pitcher_dict_list = pitcher_list\n for pitcher in pitcher_dict_list:\n if add_original_value:\n pitcher['original_value'] = pitcher['dollarValue']\n # if pitcher['w'] < 0 or pitcher['sv'] < 0 or pitcher['k'] < 0 or pitcher['era'] <= 0 or pitcher['whip'] <= 0:\n # continue\n win_list.append(pitcher['w'])\n sv_list.append(pitcher['sv'])\n k_list.append(pitcher['k'])\n # TODO: is dividing by 15 the best route here?\n era_list.append(pitcher['era'])\n whip_list.append(pitcher['whip'])\n # era_list.append(pitcher['era'] * (pitcher['ip'] / 15))\n # whip_list.append(pitcher['whip'] * (pitcher['ip'] / 15))\n win_list_nlargest = heapq.nlargest(player_pool, win_list)\n sv_list_nlargest = heapq.nlargest(player_pool, sv_list)\n k_list_nlargest = heapq.nlargest(player_pool, k_list)\n era_list_nsmallest = heapq.nsmallest(player_pool, era_list)\n whip_list_nsmallest = heapq.nsmallest(player_pool, whip_list)\n # Average Calculation\n w_avg = avg_calc(win_list_nlargest)\n sv_avg = avg_calc(sv_list_nlargest)\n k_avg = avg_calc(k_list_nlargest)\n era_avg = avg_calc(era_list_nsmallest)\n whip_avg = avg_calc(whip_list_nsmallest)\n # Standard Deviation Calculation\n w_std_dev = std_dev_calc(win_list_nlargest, w_avg)\n sv_std_dev = std_dev_calc(sv_list_nlargest, sv_avg)\n k_std_dev = std_dev_calc(k_list_nlargest, k_avg)\n era_std_dev = std_dev_calc(era_list_nsmallest, era_avg)\n whip_std_dev = std_dev_calc(whip_list_nsmallest, whip_avg)\n # zScore Calculation\n for pitcher in pitcher_dict_list:\n pitcher['zScoreW'] = z_score_calc(pitcher['w'], w_avg, w_std_dev)\n pitcher['weightedW'] = pitcher['zScoreW'] * float(pitcher['ip'])\n pitcher['zScoreSv'] = z_score_calc(pitcher['sv'], sv_avg, sv_std_dev)\n pitcher['weightedSv'] = pitcher['zScoreSv'] * float(pitcher['ip'])\n pitcher['zScoreK'] = z_score_calc(pitcher['k'], k_avg, k_std_dev)\n pitcher['weightedK'] = pitcher['zScoreK'] * float(pitcher['ip'])\n pitcher['zScoreEra'] = z_score_calc_era_whip(pitcher['era'], era_avg, era_std_dev)\n pitcher['weightedEra'] = pitcher['zScoreEra'] * float(pitcher['ip'])\n pitcher['zScoreWhip'] = z_score_calc_era_whip(pitcher['whip'], whip_avg,\n whip_std_dev)\n pitcher['weightedWhip']= pitcher['zScoreWhip'] * float(pitcher['ip'])\n # weighted_pitcher_list.append(pitcher)\n # Weighted Calculations\n weighted_win_list = []\n weighted_sv_list = []\n weighted_k_list = []\n weighted_era_list = []\n weighted_whip_list = []\n # for pitcher in weighted_pitcher_list:\n for pitcher in pitcher_dict_list:\n weighted_win_list.append(pitcher['weightedW'])\n weighted_sv_list.append(pitcher['weightedSv'])\n weighted_k_list.append(pitcher['weightedK'])\n weighted_era_list.append(pitcher['weightedEra'])\n weighted_whip_list.append(pitcher['weightedWhip'])\n weighted_win_list_nlargest = heapq.nlargest(player_pool, weighted_win_list)\n weighted_sv_list_nlargest = heapq.nlargest(player_pool, weighted_sv_list)\n weighted_k_list_nlargest = heapq.nlargest(player_pool, weighted_k_list)\n weighted_era_list_nlargest = heapq.nlargest(player_pool, weighted_era_list)\n weighted_whip_list_nlargest = heapq.nlargest(player_pool, weighted_whip_list)\n # Weighted Average Calculation\n weighted_w_avg = avg_calc(weighted_win_list_nlargest)\n weighted_sv_avg = avg_calc(weighted_sv_list_nlargest)\n weighted_k_avg = avg_calc(weighted_k_list_nlargest)\n weighted_era_avg = avg_calc(weighted_era_list_nlargest)\n weighted_whip_avg = avg_calc(weighted_whip_list_nlargest)\n # Weighted Standard Deviation Calculation\n weighted_w_std_dev = std_dev_calc(weighted_win_list_nlargest, weighted_w_avg)\n weighted_sv_std_dev = std_dev_calc(weighted_sv_list_nlargest, weighted_sv_avg)\n weighted_k_std_dev = std_dev_calc(weighted_k_list_nlargest, weighted_k_avg)\n weighted_era_std_dev = std_dev_calc(weighted_era_list_nlargest, weighted_era_avg)\n weighted_whip_std_dev = std_dev_calc(weighted_whip_list_nlargest,\n weighted_whip_avg)\n # Weighted zScore Calculation\n for pitcher in pitcher_dict_list:\n pitcher['weightedZscoreW'] = z_score_calc(pitcher['weightedW'], weighted_w_avg,\n weighted_w_std_dev)\n pitcher['weightedZscoreSv'] = z_score_calc(pitcher['weightedSv'], weighted_sv_avg,\n weighted_sv_std_dev)\n pitcher['weightedZscoreK'] = z_score_calc(pitcher['weightedK'], weighted_k_avg,\n weighted_k_std_dev)\n pitcher['weightedZscoreEra'] = z_score_calc(pitcher['weightedEra'], weighted_era_avg,\n weighted_era_std_dev)\n pitcher['weightedZscoreWhip'] = z_score_calc(pitcher['weightedWhip'],\n weighted_whip_avg,\n weighted_whip_std_dev)\n # Calculate Values\n fvaaz_list = []\n for pitcher in pitcher_dict_list:\n # TODO: is 0.06 the best cutoff?\n if \"SP\" not in pitcher['pos'] or (\"RP\" in pitcher['pos'] and pitcher['winsip'] < 0.06):\n pitcher['fvaaz'] = (pitcher['zScoreSv'] + pitcher['zScoreK'] +\n pitcher['weightedZscoreEra'] + pitcher['weightedZscoreWhip'])\n else:\n pitcher['fvaaz'] = (pitcher['zScoreW'] + pitcher['zScoreSv'] +\n pitcher['zScoreK'] + pitcher['weightedZscoreEra'] +\n pitcher['weightedZscoreWhip'])\n # pitcher['fvaaz'] = (pitcher['weightedZscoreSv'] + pitcher['weightedZscoreK'] +\n # pitcher['weightedZscoreEra'] + pitcher['weightedZscoreWhip'])\n # else:\n # pitcher['fvaaz'] = (pitcher['weightedZscoreW'] + pitcher['weightedZscoreSv'] +\n # pitcher['weightedZscoreK'] + pitcher['weightedZscoreEra'] +\n # pitcher['weightedZscoreWhip'])\n fvaaz_list.append(pitcher['fvaaz'])\n players_over_one_dollar = players_over_zero_dollars - one_dollar_players\n fvaaz_list_over_zero = heapq.nlargest(players_over_zero_dollars, fvaaz_list)\n fvaaz_list_over_one = heapq.nlargest(players_over_one_dollar, fvaaz_list)\n for pitcher in pitcher_dict_list:\n if pitcher['fvaaz'] >= fvaaz_list_over_one[players_over_one_dollar - 1]:\n pitcher['dollarValue'] = pitcher['fvaaz'] * dollar_per_fvaaz\n elif pitcher['fvaaz'] >= fvaaz_list_over_zero[players_over_zero_dollars - 1]:\n pitcher['dollarValue'] = 1.0\n else:\n pitcher['dollarValue'] = 0.0\n return sorted(pitcher_dict_list, key=operator.itemgetter('fvaaz'), reverse=True)\n # sorts by fvaaz (largest to smallest)",
"def getZScorePvalue(zscore=None, twoSided=False):\n\timport scipy.stats as stats\n\tpvalue = stats.norm.sf(zscore)\n\tif twoSided:\n\t\tpvalue = pvalue* 2\n\treturn pvalue",
"def test_weight_is_positive(self):\n nt.assert_greater(self.herb.weight, 0)",
"def lz (inlist, score):\r\n z = (score-mean(inlist))/samplestdev(inlist)\r\n return z"
] | [
"0.64195716",
"0.6404542",
"0.623077",
"0.622927",
"0.61641794",
"0.6028003",
"0.5923462",
"0.5905907",
"0.5891175",
"0.5872673",
"0.58546215",
"0.5809704",
"0.57485",
"0.5692656",
"0.56759715",
"0.5666355",
"0.5651114",
"0.56005275",
"0.5595664",
"0.5595595",
"0.5575899",
"0.55734694",
"0.5561339",
"0.5541902",
"0.5535107",
"0.5530745",
"0.55198973",
"0.5505137",
"0.5473685",
"0.5439813"
] | 0.71250975 | 0 |
Wrapper to allow applying stop loss to any signal function. Args are passed to function, kwargs to stop loss. THIS SHOULDN'T BE USED WITHOUT FURTHER REVIEW. IT'S PROBABLY WRONG. | def stop_signal(
df, signal_func: Callable, /, *func_args, **stop_kwargs
) -> pd.DataFrame:
_df = df.copy()
_df["position"] = sig_pos(signal_func(_df["close"], *func_args))
stopped = stop_loss(_df, **stop_kwargs)
assert isinstance(stopped, pd.DataFrame)
return stopped["position"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def event_stop(self, **kwargs):\n del kwargs\n self.stop()",
"def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n def computation(unused_scalar_stopping_signal):\n return_value = op_fn()\n execute_ops = return_value['ops']\n signals = return_value['signals']\n with ops.control_dependencies(execute_ops):\n return _StopSignals.as_scalar_stopping_signal(signals)\n\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with ops.device(device):\n return control_flow_ops.while_loop(\n cond,\n computation, [_StopSignals.NON_STOPPING_SIGNAL],\n parallel_iterations=1)",
"def after_worker_stop(func):\n _func_only(func)\n worker_methods_db.register_after_stop(func)\n return func",
"def stop(self, signal):\n pass",
"def stop(self, signal):\n pass",
"def _no_grad_(func):\n\n def __impl__(*args, **kwargs):\n with _switch_tracer_mode_guard_(is_train=False):\n return func(*args, **kwargs)\n\n return __impl__",
"def Stop(self, *_):\n self.Log('Stopping...')\n self._stop = True",
"def skip_signal():\n def _skip_signal(signal_func):\n @wraps(signal_func)\n def _decorator(sender, instance, **kwargs):\n if hasattr(instance, 'skip_signal'):\n del instance.skip_signal\n return None\n return signal_func(sender, instance, **kwargs)\n return _decorator\n return _skip_signal",
"def Stop(self, *args, **kwargs):\r\n\t\tpayload = { \"Arg1\": self }\r\n\t\tfor i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\r\n\t\tfor item in kwargs.items(): payload[item[0]] = item[1]\r\n\t\treturn self._execute('stop', payload=payload, response_object=None)",
"def handle_stop(_):\n loop.force_unmute()",
"def StopStatelessTrafficBlocking(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('stopStatelessTrafficBlocking', payload=payload, response_object=None)",
"def stop_run(arn=None):\n pass",
"def Stop(self, *args, **kwargs):\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('stop', payload=payload, response_object=None)",
"def get_train_stop_fun(num_iter):\n def train_stop_fun(*args):\n count = args[3]\n return tf.less(count, num_iter)\n return train_stop_fun",
"def masked_loss_func(loss_function):\n def masked_loss_fn(predictions, targets): \n assert targets.ndim == 1\n target_mask = T.neq(targets, -1) \n valid_inds = T.nonzero(target_mask)\n return loss_function(predictions[valid_inds], targets[valid_inds])\n return masked_loss_fn",
"def Stop(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {\"Arg1\": self}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 2)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"stop\", payload=payload, response_object=None)",
"def Stop(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = { \"Arg1\": self }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('stop', payload=payload, response_object=None)",
"def _stop(self):",
"def stop(self, **kwargs):\n self.turn_off()",
"def weight_decay_loss_wrapper(\n loss_fn = gin.REQUIRED,\n factor = gin.REQUIRED,\n exclude = (),\n):\n traversal = traverse_util.ModelParamTraversal(\n lambda path, _: all([e not in path for e in exclude]))\n\n def wrapped_loss(outputs, *args, params, **kwargs):\n losses = loss_fn(outputs, *args, **kwargs)\n weight_decay_params = list(traversal.iterate(params))\n weight_l2 = sum([jnp.sum(x**2) for x in weight_decay_params])\n weight_penalty = factor * 0.5 * weight_l2\n\n if isinstance(losses, dict):\n if 'model_loss' not in losses:\n raise ValueError(\n 'Losses must contain `model_loss` key as total model loss.')\n losses['pre_weight_penalty_model_loss'] = losses['model_loss']\n losses['model_loss'] += weight_penalty\n losses['l2_regularization_loss'] = weight_penalty\n elif isinstance(losses, jnp.ndarray):\n losses += weight_penalty\n else:\n raise ValueError('Encountered invalid loss type: ', type(losses))\n\n return losses\n\n return wrapped_loss",
"def disable(func):\n return func",
"def Stop(self):\n raise NotImplementedError",
"def stop(self, *args, **kwargs):\n return self(AbilityId.STOP, *args, **kwargs)",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass",
"def stop_emission(self, detailed_signal): # reliably restored by inspect\n pass"
] | [
"0.58743846",
"0.5674448",
"0.56246555",
"0.55227876",
"0.55227876",
"0.5483376",
"0.5476944",
"0.54393923",
"0.54230165",
"0.5420852",
"0.541993",
"0.54120153",
"0.5409945",
"0.53929704",
"0.538413",
"0.5374481",
"0.5367693",
"0.5366256",
"0.5332575",
"0.53154844",
"0.5302389",
"0.5294792",
"0.5269419",
"0.52647454",
"0.52647454",
"0.52647454",
"0.52647454",
"0.52647454",
"0.52647454",
"0.52647454"
] | 0.5998561 | 0 |
Return df with log returns of long and short positions in r | def long_short_returns(r: Results) -> pd.DataFrame:
pos = r.positions
pos["return"] = np.log(pos["pnl"] / pos["open"].abs())
pos = pos.set_index("date_c")
long = pos[pos["open"] > 0]
short = pos[pos["open"] < 0]
combined = pd.DataFrame({"long": long["return"], "short": short["return"]})
combined = (combined + 1).fillna(1).cumprod()
return combined | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log(df, cols, base=2, invert=None):\r\n if base == 2:\r\n for c in cols:\r\n df[f\"log2_{c}\"] = np.log2(df[c])\r\n elif base==10:\r\n for c in cols:\r\n df[f\"log10_{c}\"] = np.log10(df[c])\r\n else:\r\n print(\"This base is not implemented!\")\r\n if invert is not None:\r\n lcols = df.filter(regex=\"^log\").columns\r\n df[lcols] = df[lcols] * invert\r\n return df",
"def log_transform(df, cols=None):\n\n if not cols:\n small_df = only_positive_values(df)\n else:\n cols = cols_in_df(df, partial_col_names=columns_to_transform_log,\n not_present=['MACD'])\n small_df = df[cols]\n small_df = only_positive_values(df[cols])\n result = np.log(small_df)\n result.columns = ['log ({})'.format(col) for col in small_df]\n # if replace_infs:\n # # result = result.replace(-np.inf, np.nan)\n # for col in result:\n # result[col] = result[col].fillna(result[col].min())\n return out(SETTINGS, df, result)",
"def log10_inplace(a):",
"def generate_log_df(log_columns, log_values):\n return pd.DataFrame(dict(zip(log_columns, log_values)), index=[0])",
"def logrels(rets):\n return np.log(rets + 1)",
"def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx",
"def log_column(data: DataFrame, column: str):\n return data[column].map(lambda x: np.log(np.absolute(x)))",
"def compute_log(tx, index_log, mean=[], std=[]):\n tx_new = np.log10(3+abs(tx[:,index_log]))\n return standardize(tx_new,mean,std)",
"def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def log_Schechter_log(self, logl, alpha, logls, logl0):\n phi = (logl - logls) * (alpha+1) * np.log(10.) - np.power(10., logl-logls)\n lik = phi.copy()\n lik [logl < logl0] = -1e99\n return lik",
"def log2_inplace(a):",
"def loglf2py(store):\n loglike=0.0\n return loglinear.logl(store['xb'],store['xmatf'], store['beta'],store['yvec'],loglike)",
"def log_inplace(a):",
"def loglog(self, **kwargs):\n return self.plot(plot=pylab.loglog, **kwargs)",
"def compute_log_likelihoods(df, error_rate=1e-3):\n df['log_likelihood_absent'] = df.apply(calculate_likelihood_absent, axis=1, args=(error_rate,))\n df['log_likelihood_present'] = df.apply(calculate_likelihood_present, axis=1, args=(error_rate,))\n\n return df",
"def log_shift(data):\n result = [np.log(1 + np.abs(d.copy())) for d in data]\n return result",
"def value_to_log(self, reverse: bool = False):\n if reverse:\n self.tsdf = np.exp(self.tsdf)\n self.valuetype = 'Price(Close)'\n self.tsdf.columns = pd.MultiIndex.from_product([[self.label], [self.valuetype]])\n else:\n self.tsdf = np.log(self.tsdf / self.tsdf.iloc[0])\n self.valuetype = 'Return(Total)'\n self.tsdf.columns = pd.MultiIndex.from_product([[self.label], [self.valuetype]])\n return self",
"def __convert_to_log(self):\n for i in range(self.nStates):\n if self.pi[i]>0:\n self.pi[i]=log(self.pi[i])\n else:\n self.pi[i]=float('-inf')\n for j in range(self.nStates):\n if self.t[i][j]>0:\n self.t[i][j]=log(self.t[i][j])\n else:\n self.t[i][j]=float('-inf')\n for j in range(self.nObs):\n if self.e[i][j]>0:\n self.e[i][j]=log(self.e[i][j])\n else:\n self.e[i][j]=float('-inf')\n self.logdomain=True",
"def log_trans(data, test=False):\n logs = ['Administrative', 'Administrative_Duration', 'Informational',\n 'Informational_Duration', 'ProductRelated',\n 'ProductRelated_Duration', 'BounceRates', 'ExitRates',\n 'PageValues']\n if test:\n data_test = data.loc[data['Train'] == 0]\n data = data.loc[data['Train'] == 1]\n\n for col in logs:\n zero_val = float((min(i for i in list(data[col]) if i > 0))/2)\n data[col] = data[col].apply(lambda x: zero_val if x == 0 else x)\n data[col] = data[col].apply(lambda x: np.log(x))\n if test:\n data_test[col] = data_test[col].apply(lambda x:\n zero_val\n if x == 0 else x)\n data_test[col] = data_test[col].apply(lambda x: np.log(x))\n if test:\n data = pd.concat([data, data_test])\n return data",
"def LogSp(start,stop,num=50,**kwargs):\n assert 'base' not in kwargs, \"The base is irrelevant.\"\n return np.logspace(log10(start),log10(stop),num=num,base=10)",
"def get_log_likelihoods(self, short=False):\n if short:\n return self.memory.get('log_likelihoods', self.s, self.e)\n else:\n return np.concatenate(\n (\n self.memory.get('log_likelihoods', self.s, self.e),\n self.tail_batch.log_likelihoods\n ), axis=0\n )",
"def test_loglike(dlm,Cl,noise,beam):\n lmax = Cl.shape[0]\n tt_exp = -1./2 * np.real(np.vdot(dlm.T,hp.almxfl(dlm,1/(beam[:lmax]**2*Cl[:,1]+noise[:lmax]))))\n #plt.plot(Cl[:,1])\n tt_det = - 1./2 *(np.arange(1,lmax+1)*np.log((noise[:lmax]+Cl[:,1]*beam[:lmax]**2))).sum() \n tt_f = tt_exp + tt_det\n return tt_exp,tt_det,tt_f#,Cl[:,1]",
"def wrap_log(to_wrap):\r\n with np.errstate(divide='ignore'):\r\n result = np.log(to_wrap)\r\n return result",
"def logtomo(self, psi):\n return -1j / self.wavenumber() * self.mlog(psi) / self.voxelsize",
"def log2FC_data(data):\n log2FC_df = pd.DataFrame()\n for i in range(0,len(data.columns),10):\n i = i\n data_subset = data[data.columns[i:i+10]]\n log_data = data_subset.apply(np.log2)\n \n new_df = pd.DataFrame()\n for j in range(len(log_data.columns)):\n tmp_col = log_data.iloc[:, j].name\n tmp_df = log_data.iloc[:,0] - log_data.iloc[:,j]\n new_df[tmp_col] = tmp_df\n \n log2FC_df = log2FC_df.append(new_df.T)\n log2FC_df = log2FC_df.T\n return log2FC_df",
"def get_log(self, logs, ref=None):\n log_list = list()\n output_list = list()\n if isinstance(logs, str):\n log_list.append(logs)\n elif isinstance(logs, list):\n log_list = logs\n for name in log_list:\n new_log = Log()\n new_log.name = name.lower()[:3] + '_' + \\\n self.well_name.lower().replace('-', '_')\n new_log.units = self.unit_dict[name]\n new_log.descr = name\n new_log.depth = np.array(self.data_frame['Depth(m)'].values)\n new_log.data = np.array(self.data_frame[\n '{}({})'.format(name, self.unit_dict[name])].values)\n if ref == 'sea':\n shift = int(self.kelly_bushing // 0.1)\n shift_data = np.full_like(new_log.data, np.nan, dtype=np.double)\n shift_data[:-shift] = new_log.data[shift:]\n new_log.data = shift_data\n output_list.append(new_log)\n if isinstance(logs, str):\n return output_list[0]\n else:\n return output_list",
"def create_log_column(self, df, name):\n # print(f\"mean of name {name} is {df[name].mean()}\")\n df['log_'+name] = np.log10(df[name].replace(0, df[name].mean()))\n df = self.map_values_to_color(df, 'log_'+name)\n return df",
"def _convert_normlogprice(self, series):\n try:\n return np.log(series.div(series[0]))\n except:\n raise TypeError('ERROR: Could not transform prices to log function. Check price history data.')",
"def log_likelihoods(self):\n return self.__data_frame.loc[:, \"ll\":\"ll\"].values[:-1]",
"def logarithm(requestContext, seriesList, base=10):\n results = []\n for series in seriesList:\n newValues = []\n for val in series:\n if val is None:\n newValues.append(None)\n elif val <= 0:\n newValues.append(None)\n else:\n newValues.append(math.log(val, base))\n newName = \"log(%s, %s)\" % (series.name, base)\n newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)\n newSeries.pathExpression = newName\n results.append(newSeries)\n return results"
] | [
"0.6381855",
"0.63051254",
"0.5932502",
"0.5920916",
"0.5877851",
"0.5838608",
"0.5811746",
"0.5811098",
"0.5806278",
"0.5783296",
"0.57466453",
"0.5741185",
"0.57286114",
"0.5686589",
"0.5680549",
"0.566111",
"0.5657998",
"0.5602356",
"0.55857456",
"0.5583285",
"0.5571597",
"0.55306375",
"0.5499071",
"0.548745",
"0.5468484",
"0.54664284",
"0.5414307",
"0.5408716",
"0.5404725",
"0.53994757"
] | 0.704547 | 0 |
Alternative (volume) grouper. Vector based. Difference with numba_tools grouper is about the treatment of the first/last bar in a grouped candle. This method has a small lookahead bias, i.e. is not usable for anything serious. | def vector_grouper(
df: pd.DataFrame,
number: int,
field: str = "volume",
label: Literal["left", "right"] = "left",
) -> pd.DataFrame:
df = df.copy()
df = df.reset_index(drop=False)
df["index_"] = (df[field].cumsum() // number).shift().fillna(0)
return (
df.groupby("index_")
.agg(
{
"date": "first",
"open": "first",
"high": "max",
"low": "min",
"close": "last",
"volume": "sum",
"barCount": "sum",
}
)
.set_index("date")
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def grouped_bins(self):\n # Load the vector version #\n df = self.grouped_vectors\n # Empty data frame to contain result #\n result = pandas.DataFrame()\n # Iterate #\n for i, row in df.iterrows():\n # Compute a data frame containing the recreated bins #\n current = binner(row[self.sum_col], self.sum_col, self.bin_width)\n # Keep the current values of the group columns as an index #\n col_values = [row[col] for col in self.group_cols]\n current = current.assign(**dict(zip(self.group_cols, col_values)))\n current = current.set_index(self.group_cols)\n # Append #\n result = result.append(current)\n # Return #\n return result",
"def group(seq):\n pass # replace with your solution",
"def Group(self) -> _n_5_t_0:",
"def Group(self) -> _n_5_t_0:",
"def group_by(self, *args) -> B[B, E]:",
"def group(df, dvmin, dvmax, step):\n\tr = step/2\n\tres = []\n\n\tfor ticker in range(dvmin, dvmax, step):\n\t\t#select values by left-right difference in sum in range (x-r, x+r). x is the middle value of a bucket. \n\t\tsubgroup = df.loc[(df['diff']>ticker-r) & (df['diff']<ticker+r)\n\t\t\t& (df['choice'] != 0.5)]\n\t\t#count frequency of choosing left\n\t\tnum = subgroup['choice'].sum()\n\t\t#total number of datapoints in the bucket\n\t\tdenom = subgroup.shape[0]\n\t\t#calculate and append the prob. append 0 if empty bucket\n\t\tres.append(num/denom) if denom else res.append(0)\n\treturn res",
"def test_grouped(self):\n\n grouped_avp = avp.GroupedAVP(\n 260, [\n avp.UTF8StringAVP(1, 'Hello'),\n avp.UTF8StringAVP(1, 'World'),\n ],\n )\n self._compare_avp(\n grouped_avp,\n (\n b'\\x00\\x00\\x01\\x04\\x00\\x00\\x00(\\x00\\x00'\n b'\\x00\\x01\\x00\\x00\\x00\\rHello\\x00\\x00\\x00'\n b'\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\rWorld\\x00'\n b'\\x00\\x00'\n ),\n )\n\n self._compare_avp(\n avp.GroupedAVP(260, [grouped_avp, grouped_avp]),\n (\n b'\\x00\\x00\\x01\\x04\\x00\\x00\\x00X\\x00\\x00\\x01\\x04'\n b'\\x00\\x00\\x00(\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\rHello'\n b'\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\rWorld'\n b'\\x00\\x00\\x00\\x00\\x00\\x01\\x04\\x00\\x00\\x00(\\x00\\x00'\n b'\\x00\\x01\\x00\\x00\\x00\\rHello\\x00\\x00\\x00\\x00\\x00'\n b'\\x00\\x01\\x00\\x00\\x00\\rWorld\\x00\\x00\\x00'\n ),\n )\n\n # Test filtering\n self.assertEqual(len(list(grouped_avp.filter_avps(0, 1))), 2)\n self.assertEqual(len(list(grouped_avp.filter_avps(0, 2))), 0)\n\n # Test find returns first\n self.assertEqual(grouped_avp.find_avp(0, 1).value, 'Hello')\n self.assertEqual(grouped_avp.find_avp(0, 2), None)",
"def _split_groups(self, ge_vec, mask):\n functional_grp = ge_vec[mask]\n diff = set(ge_vec) - set(functional_grp)\n control_grp = np.array(list(diff))\n return control_grp, functional_grp",
"def data_group():\n ...",
"def imprint_merge_each_group():\r\n \r\n G = cubit.get_entities(\"group\")\r\n for gid in G:\r\n vid = cubit.get_group_volumes(gid)\r\n if len(vid)>1:\r\n cubit.cmd(f\"imprint vol {list_to_str(vid)}\")\r\n cubit.cmd(f\"merge vol {list_to_str(vid)}\")",
"def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])",
"def squeeze_grouped_nb(a, group_lens, reduce_func_nb, *args):\n out = np.empty((a.shape[0], len(group_lens)), dtype=np.float_)\n from_col = 0\n for group in range(len(group_lens)):\n to_col = from_col + group_lens[group]\n for i in range(a.shape[0]):\n out[i, group] = reduce_func_nb(i, group, a[i, from_col:to_col], *args)\n from_col = to_col\n return out",
"def reduce_grouped_nb(a, group_lens, reduce_func_nb, *args):\n out = np.empty(len(group_lens), dtype=np.float_)\n from_col = 0\n for group in range(len(group_lens)):\n to_col = from_col + group_lens[group]\n out[group] = reduce_func_nb(group, a[:, from_col:to_col], *args)\n from_col = to_col\n return out",
"def setBarGroups(ngroups, gap):\n dislin.bargrp(ngroups, gap)",
"def extrude_balcony_grouped(bm, group, depth):\r\n\r\n def splitones(num):\r\n \"\"\"Return a list of numbers that add up to num where the largest value is one\"\"\"\r\n fract, intr = math.modf(num)\r\n result = [1 for _ in range(int(intr))]\r\n if fract > 0.0:\r\n result.append(fract)\r\n return result\r\n\r\n result = []\r\n inset_faces = group[:]\r\n valid_normals = [f.normal.to_tuple(3) for f in group]\r\n for num in splitones(depth):\r\n res = bmesh.ops.inset_region(\r\n bm, faces=inset_faces, depth=num, use_even_offset=True, use_boundary=True)[\"faces\"]\r\n bmesh.ops.dissolve_degenerate(\r\n bm, dist=0.001, edges=list({e for f in inset_faces for e in f.edges}))\r\n inset_faces = validate(inset_faces)\r\n inset_faces.extend([f for f in res if f.normal.to_tuple(3) in valid_normals])\r\n result.extend(res)\r\n return [f for f in validate(result) if f.normal.z > 0]",
"def additional_splitting(y_grouped, tX_grouped, ids_grouped, unwanted_value):\n y_grouped_new, tX_grouped_new, ids_grouped_new, masks_new, counts_new = [], [], [], [], []\n for i in range(len(tX_grouped)):\n y, tX, ids, masks, counts = split_in_groups_1(y_grouped[i], tX_grouped[i], ids_grouped[i], unwanted_value)\n for j in range(len(tX)):\n y_grouped_new.append(y[j])\n tX_grouped_new.append(tX[j])\n ids_grouped_new.append(ids[j])\n masks_new.append(masks[j])\n counts_new.append(counts[j])\n return y_grouped_new, tX_grouped_new, ids_grouped_new, masks_new, counts_new",
"def treat_volume(volume):\n labels = measure.label(volume.dataobj, background=0, connectivity=2)\n new_volume = np.asarray(volume.dataobj)\n new_volume[labels > 1] = 0\n new_volume = nib.Nifti1Image(new_volume, volume.affine)\n return new_volume",
"def full_groupby(iterable, key=None):\n return groupby(sorted(iterable, key=key), key=key)",
"def obv_custom_nb(close_ts, volume_ts):\n obv = generic_nb.set_by_mask_mult_nb(volume_ts, close_ts < generic_nb.fshift_nb(close_ts, 1), -volume_ts)\n obv = generic_nb.cumsum_nb(obv)\n return obv",
"def determine_axes_to_group_by(is_volume: bool) -> Set[Axes]:\n if is_volume:\n return {Axes.ROUND, Axes.CH}\n else:\n return {Axes.ROUND, Axes.CH, Axes.ZPLANE}",
"def reduce_grouped_to_array_nb(a, group_lens, reduce_func_nb, *args):\n out_inited = False\n from_col = 0\n for group in range(len(group_lens)):\n to_col = from_col + group_lens[group]\n group_out = reduce_func_nb(group, a[:, from_col:to_col], *args)\n if not out_inited:\n out = np.full((group_out.shape[0], len(group_lens)), np.nan, dtype=np.float_)\n out_inited = True\n out[:, group] = group_out\n from_col = to_col\n return out",
"def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)",
"def split_volume(image_name, output_name):\n nim = nib.load(image_name)\n Z = nim.header['dim'][3]\n affine = nim.affine\n image = nim.get_data()\n\n for z in range(Z):\n image_slice = image[:, :, z]\n image_slice = np.expand_dims(image_slice, axis=2)\n affine2 = np.copy(affine)\n affine2[:3, 3] += z * affine2[:3, 2]\n nim2 = nib.Nifti1Image(image_slice, affine2)\n nib.save(nim2, '{0}{1:02d}.nii.gz'.format(output_name, z))",
"def to_bar(self):\n group = GroupData()\n return group",
"def data_for_grouping() -> NoReturn:\n raise NotImplementedError",
"def data_for_grouping() -> NoReturn:\n raise NotImplementedError",
"def grouped_vectors(self):\n # Group #\n grouped = self.simulated.groupby(self.group_cols)\n # Iterate #\n result = []\n for col_values, df in grouped:\n # Keep the current values of the group columns #\n current = dict(zip(self.group_cols, col_values))\n # Compute a discrete numpy vector #\n current[self.sum_col] = aggregator(df, self.sum_col, self.bin_col)\n # Make a series and append #\n result.append(pandas.Series(current))\n # Put all series into a data frame #\n result = pandas.DataFrame(result)\n # Return #\n return result",
"def group_hook(self, accumulation, group):\n group_hook_directions(accumulation, group)\n group_hook_filter_directions(accumulation, group)\n if compute_gammas:\n group_hook_gammas(accumulation, group)\n if compute_lambdas:\n group_hook_lambdas(accumulation, group)\n group_hook_memory_cleanup(accumulation, group)",
"def test_group(self):\n obs_group, obs_nogroup = group(self.seqstruct, 0.75)\n exp_group = {'cluster_337': ['cluster_343', 'cluster_345',\n 'cluster_339'],\n 'cluster_347': ['cluster_338'],\n 'cluster_344': ['cluster_340']}\n exp_nogroup = [self.seqstruct[6], self.seqstruct[8]]\n\n self.assertEqual(obs_group, exp_group)\n self.assertEqual(obs_nogroup, exp_nogroup)",
"def group_and_vote_fractions():\n group_share = np.array([0, 0.2, 0.4, 0.6, 0.8])\n vote_share = np.array([0, 0.2, 0.4, 0.6, 0.8])\n return group_share, vote_share"
] | [
"0.55901647",
"0.5460391",
"0.5390095",
"0.5390095",
"0.5248864",
"0.5171182",
"0.50614727",
"0.50094837",
"0.496163",
"0.4935842",
"0.4923516",
"0.49227005",
"0.49226493",
"0.49063525",
"0.49047896",
"0.49033874",
"0.48544985",
"0.48540577",
"0.4852908",
"0.48239028",
"0.48168728",
"0.48072618",
"0.47967175",
"0.4794197",
"0.47749045",
"0.47749045",
"0.47463828",
"0.47444057",
"0.47428447",
"0.47352174"
] | 0.6425455 | 0 |
Updates all structures according to the success or failure of a given command. The given request must also contain a response (not None). | def apply_response(self, request):
assert request.response is not None
response = request.response
other_addr = self.get_other_address()
self.processor.process_command(
other_addr=other_addr,
command=request.command,
cid=request.cid,
status_success=request.is_success(),
error=response.error if response.error else None
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def UpdateFromResponse(self, response):\n for key in self.status:\n self.status[key] = response[key]",
"def process_request(self, request):\n self.req = request\n command = self.get_command()\n file_handler = filehandler.FileHandler(command)\n file_handler.handle_command()\n return command.result",
"def handle_response(self, response):\n assert isinstance(response, CommandResponseObject)\n\n # Is there was a protocol error return the error.\n if response.is_protocol_failure():\n raise OffChainProtocolError.make(response.error)\n\n request_cid = response.cid\n\n # If we have already processed the response.\n request = self.committed_commands.try_get(request_cid)\n if request:\n # Check the reponse is the same and log warning otherwise.\n if request.response != response:\n excp = OffChainException(\n 'Got different responses with cid {request_cid}.'\n )\n excp.response1 = request.response\n excp.response2 = response\n raise excp\n # This request may have concurrent modification\n # read db to get latest status\n return self.committed_commands[request_cid].is_success()\n\n request = self.my_pending_requests.try_get(request_cid)\n if not request:\n raise OffChainException(\n f'Response for unknown cid {request_cid} received.'\n )\n\n # Read and write back response into request.\n request.response = response\n\n # Add the next command to the common sequence.\n self.committed_commands[request.cid] = request\n del self.my_pending_requests[request_cid]\n self.register_dependencies(request)\n self.apply_response(request)\n return request.is_success()",
"def _handle( self, state, msg ):\n\t\tstate.requests[ msg.id ] = msg\n\t\tstatistics.requests.new()\n\t\tCORE.info( 'Incoming request of type %s' % msg.command )\n\t\tif not state.authenticated and msg.command != 'AUTH':\n\t\t\tres = Response( msg )\n\t\t\tres.status = BAD_REQUEST_UNAUTH\n\t\t\tself._response( res, state )\n\t\telif msg.command == 'AUTH':\n\t\t\tstate.authResponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tstate.authenticate( msg.body[ 'username' ], msg.body[ 'password' ] )\n\t\t\texcept ( TypeError, KeyError ), e:\n\t\t\t\tstate.authResponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\tstate.authResponse.message = 'insufficient authentification information'\n\t\telif msg.command == 'GET' and ( 'ucr' in msg.arguments or 'info' in msg.arguments ):\n\t\t\tresponse = Response( msg )\n\t\t\tresponse.result = {}\n\t\t\tresponse.status = SUCCESS\n\t\t\tif 'ucr' in msg.arguments:\n\t\t\t\tif not isinstance(msg.options, (list, tuple)):\n\t\t\t\t\traise InvalidOptionsError\n\t\t\t\tfor value in msg.options:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif not value:\n\t\t\t\t\t\t\t# make sure that 'value' is non-empty\n\t\t\t\t\t\t\tCORE.warn('Empty UCR variable requested. Ignoring value...')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif value.endswith('*'):\n\t\t\t\t\t\t\tvalue = value[ : -1 ]\n\t\t\t\t\t\t\tfor var in filter( lambda x: x.startswith( value ), ucr.keys() ):\n\t\t\t\t\t\t\t\tresponse.result[ var ] = ucr.get( var )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresponse.result[ value ] = ucr.get( value )\n\t\t\t\t\texcept ( TypeError, IndexError, AttributeError ), e:\n\t\t\t\t\t\tCORE.warn('Invalid UCR variable requested: %s' % (value,))\n\t\t\t\t\t\tresponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\t\t\tresponse.message = _('Invalid UCR variable requested: %s') % (value,)\n\n\t\t\telif 'info' in msg.arguments:\n\t\t\t\ttry:\n\t\t\t\t\tfd = gzip.open( '/usr/share/doc/univention-management-console-server/changelog.Debian.gz' )\n\t\t\t\t\tline = fd.readline()\n\t\t\t\t\tfd.close()\n\t\t\t\t\tmatch = MagicBucket.CHANGELOG_VERSION.match( line )\n\t\t\t\t\tif not match:\n\t\t\t\t\t\traise IOError\n\t\t\t\t\tresponse.result[ 'umc_version' ] = match.groups()[ 0 ]\n\t\t\t\t\tresponse.result[ 'ucs_version' ] = '{0}-{1} errata{2} ({3})'.format( ucr.get( 'version/version', '' ), ucr.get( 'version/patchlevel', '' ), ucr.get( 'version/erratalevel', '0' ), ucr.get( 'version/releasename', '' ) )\n\t\t\t\t\tresponse.result[ 'server' ] = '{0}.{1}'.format( ucr.get( 'hostname', '' ), ucr.get( 'domainname', '' ) )\n\t\t\t\t\tresponse.result[ 'ssl_validity_host' ] = int( ucr.get( 'ssl/validity/host', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\t\tresponse.result[ 'ssl_validity_root' ] = int( ucr.get( 'ssl/validity/root', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\texcept IOError:\n\t\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\t\t\tpass\n\n\t\t\tself._response( response, state )\n\t\telif msg.command == 'STATISTICS':\n\t\t\tresponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tpwent = pwd.getpwnam( state.username )\n\t\t\t\tif not pwent.pw_uid in ( 0, ):\n\t\t\t\t\traise KeyError\n\t\t\t\tCORE.info( 'Sending statistic data to client' )\n\t\t\t\tresponse.status = SUCCESS\n\t\t\t\tresponse.result = statistics.json()\n\t\t\texcept KeyError:\n\t\t\t\tCORE.info( 'User not allowed to retrieve statistics' )\n\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\tself._response( response, state )\n\t\telse:\n\t\t\t# inform processor\n\t\t\tif not state.processor:\n\t\t\t\tstate.processor = Processor( *state.credentials() )\n\t\t\t\tcb = notifier.Callback( self._response, state )\n\t\t\t\tstate.processor.signal_connect( 'response', cb )\n\t\t\tstate.processor.request( msg )",
"def handle_request(self, request):\n request.command.set_origin(self.other)\n\n # Keep track of object locks here.\n create_versions = request.command.get_new_object_versions()\n depends_on_version = request.command.get_dependencies()\n\n # Always answer old requests.\n previous_request = self.committed_commands.try_get(request.cid)\n if previous_request:\n if previous_request.is_same_command(request):\n\n # Invariant\n assert all(str(cv) in self.object_locks\n for cv in create_versions)\n\n # Re-send the response.\n logger.debug(\n f'(other:{self.other_address_str}) '\n f'Handle request that alerady has a response: '\n f'cid #{request.cid}.',\n )\n return previous_request.response\n else:\n # There is a conflict, and it will have to be resolved\n # TODO[issue 8]: How are conflicts meant to be resolved?\n # With only two participants we cannot tolerate errors.\n response = make_protocol_error(\n request, code=OffChainErrorCode.conflict)\n\n response.previous_command = previous_request.command\n logger.error(\n f'(other:{self.other_address_str}) '\n f'Conflicting requests for cid {request.cid}'\n )\n return response\n\n missing_deps, used_deps, locked_deps = self.get_dep_locks(request)\n # Check potential protocol errors and exit\n if missing_deps:\n # Some dependencies are missing but may become available later?\n response = make_protocol_error(\n request,\n code=OffChainErrorCode.wait,\n message=f'dependencies {\", \".join(missing_deps)} are missing',\n )\n return response\n\n # Note: if locked depedency exists and self is client, yield locks to server\n # (i.e. let this command take over conflict objects)\n if locked_deps and self.is_server():\n # The server requests take precedence, so make this wait.\n response = make_protocol_error(\n request,\n code=OffChainErrorCode.wait,\n message=f'dependencies {\", \".join(locked_deps)} are locked',\n )\n return response\n\n # Check potential command errors and apply to request\n if used_deps:\n response = make_command_error(\n request,\n code=OffChainErrorCode.used_dependencies,\n message=f'dependencies {\", \".join(used_deps)} were used',\n )\n\n else: # Everything looks good, try to check command's integrity\n try:\n command = request.command\n my_address = self.get_my_address()\n other_address = self.get_other_address()\n\n self.processor.check_command(\n my_address, other_address, command)\n\n response = make_success_response(request)\n except CommandValidationError as e:\n response = make_command_error(\n request,\n code=e.error_code,\n message=e.error_message)\n\n # Write back to storage\n request.response = response\n\n self.committed_commands[request.cid] = request\n self.register_dependencies(request)\n self.apply_response(request)\n\n return request.response",
"def update_request(request_entity, username):\n result = ResponseEntity()\n try:\n if check_request_authentication(id, username):\n raise Exception, 'This request not belong to this user'\n request_result = convert_RequestEntity_to_Request(request_entity)\n if request_result.success == True:\n request_result.data.save()\n result = request_result\n except Exception as e:\n print str(e)\n result.success = False\n finally:\n return result",
"def update_response(self, response):\r\n self.stri.update_response(response)",
"def update_response(self, response):\r\n self.stri.update_response(response)",
"def handle_request(self, query, request):\r\n request_pdu = None\r\n response_pdu = \"\"\r\n slave_id = None\r\n function_code = None\r\n func_code = None\r\n slave = None\r\n response = None\r\n\r\n try:\r\n # extract the pdu and the slave id\r\n slave_id, request_pdu = query.parse_request(request)\r\n if len(request_pdu) > 0:\r\n (func_code, ) = struct.unpack(\">B\", request_pdu[0])\r\n # 43 is Device Information\r\n if func_code == 43:\r\n # except will throw MissingKeyError\r\n slave = self.get_slave(slave_id)\r\n response_pdu = slave.handle_request(request_pdu)\r\n # make the full response\r\n response = query.build_response(response_pdu)\r\n # get the slave and let him execute the action\r\n elif slave_id == 0:\r\n # broadcast\r\n for key in self._slaves:\r\n response_pdu = self._slaves[key].handle_request(request_pdu, broadcast=True)\r\n response = query.build_response(response_pdu)\r\n elif slave_id == 255:\r\n r = struct.pack(\">BB\", func_code + 0x80, 0x0B)\r\n response = query.build_response(r)\r\n else:\r\n slave = self.get_slave(slave_id)\r\n response_pdu = slave.handle_request(request_pdu)\r\n # make the full response\r\n response = query.build_response(response_pdu)\r\n except (IOError, MissingKeyError) as e:\r\n # If the request was not handled correctly, return a server error response\r\n r = struct.pack(\">BB\", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE)\r\n response = query.build_response(r)\r\n\r\n if slave:\r\n function_code = slave.function_code\r\n\r\n return (response, {'request': request_pdu.encode('hex'),\r\n 'slave_id': slave_id,\r\n 'function_code': function_code,\r\n 'response': response_pdu.encode('hex')})",
"def update_response(self, response):\n\n if self.resource['operation'] in PyMongoEvent.INSERT_OPERATIONS:\n self.handle_insert_operations_response(response)\n\n elif self.resource['operation'] in PyMongoEvent.FILTER_OPERATIONS:\n self.handle_filter_operations_response(response)",
"def update_response(self, response):\r\n self.stri_ext.update_response(response)\r\n self.stri_int.update_response(response)",
"def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)",
"def handle_patch(self, api, command):\n return self._make_request_from_command('PATCH', command)",
"async def test_update(aresponses):\n aresponses.add(\n MATCH_HOST,\n \"/api/system/status\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"system-status.json\"),\n ),\n )\n\n aresponses.add(\n MATCH_HOST,\n \"/api/diskspace\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"diskspace.json\"),\n ),\n )\n\n aresponses.add(\n MATCH_HOST,\n \"/api/diskspace\",\n \"GET\",\n aresponses.Response(\n status=200,\n headers={\"Content-Type\": \"application/json\"},\n text=load_fixture(\"diskspace.json\"),\n ),\n )\n\n async with ClientSession() as session:\n client = Sonarr(HOST, API_KEY, session=session)\n response = await client.update()\n\n assert response\n assert isinstance(response.info, models.Info)\n assert isinstance(response.disks, List)\n\n response = await client.update()\n\n assert response\n assert isinstance(response.info, models.Info)\n assert isinstance(response.disks, List)",
"async def parse_handle_request(self, json_command):\n try:\n # Check signature\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n\n message = await other_key.verify_message(json_command)\n request = json.loads(message)\n\n # Parse the request whoever necessary.\n request = CommandRequestObject.from_json_data_dict(\n request, JSONFlag.NET\n )\n\n # Going ahead to process the request.\n logger.debug(\n f'(other:{self.other_address_str}) '\n f'Processing request seq #{request.cid}',\n )\n response = self.handle_request(request)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n response = make_parsing_error(f'{e}', code=OffChainErrorCode.invalid_signature)\n\n except JSONParsingError as e:\n logger.error(\n f'(other:{self.other_address_str}) JSONParsingError: {e}',\n exc_info=True,\n )\n response = make_parsing_error()\n except Exception as e:\n logger.error(\n f'(other:{self.other_address_str}) exception: {e}',\n exc_info=True,\n )\n raise e\n\n # Prepare the response.\n full_response = await self.package_response(response)\n return full_response",
"def post(self, request):\n result = None\n print(\"RESULT API: \", request.data)\n task_exec_update = TaskExecutionResult.objects.get(\n id=request.data['context']['taskExecutionID']\n )\n try:\n if request.data['result'].lower() == \"pass\":\n result = apisettings.PASS\n if request.data['result'].lower() == \"fail\":\n result = apisettings.FAIL\n if request.data['result'].lower() == \"abort\":\n result = apisettings.ABORT\n\n task_exec_update.result = result\n task_exec_update.save(update_fields=['result'])\n Log.summary_task_result(context=request.data.get(\"context\"), result=request.data['result'])\n return Response(status=HTTP_200_OK)\n except Exception as e:\n logger = Log.get_logger(__name__)\n logger.exception(e)\n return Response(status=HTTP_400_BAD_REQUEST)",
"def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:\n response[\"choices\"][0][\"text\"] += stream_response[\"choices\"][0][\"text\"]\n response[\"choices\"][0][\"finish_reason\"] = stream_response[\"choices\"][0][\n \"finish_reason\"\n ]\n response[\"choices\"][0][\"logprobs\"] = stream_response[\"choices\"][0][\"logprobs\"]",
"def _process_api_response(self, response, commands, raw_text=False):\n\n response_list = json.loads(response)\n if isinstance(response_list, dict):\n response_list = [response_list]\n\n # Add the 'command' that was executed to the response dictionary\n for i, response_dict in enumerate(response_list):\n response_dict[\"command\"] = commands[i]\n\n new_response = []\n for response in response_list:\n\n # Dectect errors\n self._error_check(response)\n\n # Some commands like \"show run\" can have a None result\n cmd_response = response.get(\"result\")\n if cmd_response is None:\n cmd_response = {}\n\n # Normalize the response data structure\n response_dict = {\"command\": response[\"command\"]}\n if response and raw_text:\n response_dict[\"result\"] = cmd_response.get(\"msg\")\n elif response and not raw_text:\n response_dict[\"result\"] = cmd_response.get(\"body\")\n else:\n raise NXAPIError(\"Unexpected value encountered processing response.\")\n new_response.append(response_dict)\n\n return new_response",
"def xnat_workflow_info_update(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info update: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\tjson_item = json_items[0]\n\tjson_data_fields = json_item['data_fields']\n\n\tput_url = \"http://\" + args.server + \"/REST/workflows\"\n\n\t# workflow identifying information\n\tput_url += \"?wrk:workflowData/id=\" + json_data_fields['ID']\n \tput_url += \"&wrk:workflowData/pipeline_name=\" + json_data_fields['pipeline_name']\n\tput_url += \"&wrk:workflowData/launch_time=\" + json_data_fields['launch_time']\n\tput_url += \"&wrk:workflowData/data_type=\" + json_data_fields['data_type']\n\t# workflow information to be updated\n \tput_url += \"&wrk:workflowData/status=\" + \"In Progress\"\n \tput_url += \"&wrk:workflowData/current_step_id=\" + args.step_id\n\tput_url += \"&wrk:workflowData/step_description=\" + args.step_description\n\tput_url += \"&wrk:workflowData/percentageComplete=\" + args.percent_complete\n\tput_url += \"&wrk:workflowData/current_step_launch_time=\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\tput_url = put_url.replace(\" \", \"%20\");\n\n\tprint(\"xnat_workflow_info update: put_url: \" + put_url)\n\n\tresponse = requests.put(put_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot update workflow\")\n\t\tprint(\"response.status_code: \" + str(response.status_code))\n\n\txnat_workflow_info_show(args)",
"def _handle_command(self, command: Command) -> None:\n if isinstance(command.result, LoadLabwareResult):\n # If the labware load refers to an offset, that offset must actually exist.\n if command.result.offsetId is not None:\n assert command.result.offsetId in self._state.labware_offsets_by_id\n\n definition_uri = uri_from_details(\n namespace=command.result.definition.namespace,\n load_name=command.result.definition.parameters.loadName,\n version=command.result.definition.version,\n )\n\n self._state.definitions_by_uri[definition_uri] = command.result.definition\n\n self._state.labware_by_id[\n command.result.labwareId\n ] = LoadedLabware.construct(\n id=command.result.labwareId,\n location=command.params.location,\n loadName=command.result.definition.parameters.loadName,\n definitionUri=definition_uri,\n offsetId=command.result.offsetId,\n displayName=command.params.displayName,\n )\n\n elif isinstance(command.result, MoveLabwareResult):\n labware_id = command.params.labwareId\n new_location = command.params.newLocation\n new_offset_id = command.result.offsetId\n\n self._state.labware_by_id[labware_id].offsetId = new_offset_id\n self._state.labware_by_id[labware_id].location = new_location",
"def xnat_workflow_info_complete(args):\n\trequest_url = \"http://\" + args.server + \"/data/services/workflows/workflowid/\" + args.workflow_id + \"?format=json\"\n\tprint(\"xnat_workflow_info complete: request_url: \" + request_url)\n\tresponse = requests.get(request_url, auth=(args.username, args.password))\n\n\tjson_response = json.loads(response.text)\n\tjson_items = json_response['items']\n\tjson_item = json_items[0]\n\tjson_data_fields = json_item['data_fields']\n\n\tput_url = \"http://\" + args.server + \"/REST/workflows\"\n\t# workflow identifying information\n\tput_url += \"?wrk:workflowData/id=\" + json_data_fields['ID']\n \tput_url += \"&wrk:workflowData/pipeline_name=\" + json_data_fields['pipeline_name']\n\tput_url += \"&wrk:workflowData/launch_time=\" + json_data_fields['launch_time']\n\tput_url += \"&wrk:workflowData/data_type=\" + json_data_fields['data_type']\n\t# workflow information to be updated\n \tput_url += \"&wrk:workflowData/status=\" + \"Complete\"\n \tput_url += \"&wrk:workflowData/current_step_id=\" + \"-1\"\n\tput_url += \"&wrk:workflowData/step_description=\" + \"End\"\n\tput_url += \"&wrk:workflowData/percentageComplete=\" + \"100.0\"\n\tput_url += \"&wrk:workflowData/current_step_launch_time=\" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\tput_url = put_url.replace(\" \", \"%20\");\n\n\tprint(\"xnat_workflow_info complete: put_url: \" + put_url)\n\n\tresponse = requests.put(put_url, auth=(args.username, args.password))\n\tif (response.status_code != 200):\n\t\tprint(\"Cannot update workflow\")\n\t\tprint(\"response.status_code: \" + str(response.status_code))\n\n\txnat_workflow_info_show(args)",
"def request(self, update, context):\n\n user = self.User(update)\n user = self.__find_matching_user(user)\n message = update.message.text.lower().split(\" \")\n if len(message) != 3:\n output = \"looks like you have a little mistake in the command\\n\" \\\n \"try /bus {bus number} {station number}\" \\\n \"for example /bus 14 3\"\n else:\n try:\n line = int(message[1])\n station = int(message[2])\n if len(user.stations) >= 3 and not self.data_base.check_admin(user):\n output = \"Sorry you cannot have more than 3 requests at a time.\"\n elif line in map(lambda x: x.line_number, user.stations) and not self.data_base.check_admin(user):\n station_to_cancel = \"Error\"\n for station in user.stations:\n if station.line_number == line:\n station_to_cancel = station.station_number\n output = \"looks like you already have a request for that line so you cannot place another one\\n\" \\\n f\"if that was a mistake you can cancel your request with /cancel {line} {station_to_cancel}\"\n elif line <= 0 or line > 999:\n output = f\"line {line}, doesn't exist. try a line withing the range of 1-999\"\n elif station <= 0 or station > BusController.MAX_STATION:\n output = f\"station {station}, doesn't exist. try a station withing the range of 1-{BusController.MAX_STATION}\"\n elif self.bus_controller.check_line(line):\n self.bus_controller.add_person_to_the_station(line, station)\n output = f\"request accepted, the bus is notified\"\n self.__message_sender.send_line(line, update_passengers=True)\n self.__add_to_users_dict(update)\n else:\n self.bus_controller.add_person_to_the_station(line, station)\n output = f\"request accepted, but there are no buses available for that line yet\"\n self.__add_to_users_dict(update)\n except Exception as e:\n print(e)\n output = \"both of the values you give must be number in order to work\" \\\n \"for example, /request 14 3\"\n\n self.data_base.log(user, update.message.text, output)\n update.message.reply_text(output)",
"def process(self, command):\n self.responses = []\n if(len(command) == 0):\n self.greet()\n return self.say_all()\n\n # prepare the command\n command = command.strip()\n\n # Some hard coded patterns: If first word is help, activate help\n # moudule\n help_regex = re.compile(\"help\\s+([^\\s]+)\")\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n # Try to find libraries with the keyword and print their help\n\n for lib in self.libraries:\n if keyword in lib.keywords:\n # Print the help text\n help_content = lib.help()\n self.display_help(help_content)\n return\n\n matched = False\n\n for lib in self.libraries:\n\n lib_obj = lib(command)\n\n # try to match the command with the library\n lib_obj.process_input()\n\n if lib_obj.status == HalLibrary.SUCCESS or lib_obj.status == HalLibrary.INCOMPLETE:\n\n matched = True\n\n lib_obj.process()\n\n resp = lib_obj.get_response()\n\n for r in resp:\n self.add_response(r)\n\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response(\"ERROR: \" + lib_obj.get_error())\n else:\n # Failure to match\n pass\n\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n\n return self.say_all()",
"def _executeOperation(self, request:CSERequest, reqRi:str) -> Result:\n\t\t# Execute the actual operation\n\t\trequest.args.operation == Operation.RETRIEVE and (operationResult := CSE.dispatcher.processRetrieveRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.CREATE and (operationResult := CSE.dispatcher.processCreateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.UPDATE and (operationResult := CSE.dispatcher.processUpdateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.DELETE and (operationResult := CSE.dispatcher.processDeleteRequest(request, request.headers.originator)) is not None\n\n\t\t# Retrieve the <request> resource\n\t\tif (res := CSE.dispatcher.retrieveResource(reqRi)).resource is None:\t\n\t\t\treturn Result(status=False) \t\t\t\t\t\t\t\t\t\t\t\t\t\t# No idea what we should do if this fails\n\t\treqres = res.resource\n\n\t\t# Fill the <request>\n\t\treqres['ors'] = {\t# operationResult\n\t\t\t'rsc'\t: operationResult.rsc,\n\t\t\t'rqi'\t: reqres.rid,\n\t\t\t'to'\t: request.id,\n\t\t\t'fr'\t: reqres.org,\n\t\t\t'ot'\t: reqres['mi/ot'],\n\t\t\t'rset'\t: reqres.et\n\t\t}\n\t\tif operationResult.rsc in [ RC.OK, RC.created, RC.updated, RC.deleted ] :\t\t\t# OK, created, updated, deleted -> resource\n\t\t\treqres['rs'] = RequestStatus.COMPLETED\n\t\t\tif operationResult.resource is not None:\n\t\t\t\treqres['ors/pc'] = operationResult.resource.asDict()\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Error\n\t\t\treqres['rs'] = RequestStatus.FAILED\n\t\t\tif operationResult.dbg is not None:\n\t\t\t\treqres['ors/pc'] = { 'm2m:dbg' : operationResult.dbg }\n\n\t\t# Update in DB\n\t\treqres.dbUpdate()\n\n\t\treturn Result(resource=reqres, status=True)",
"def process_response(response):\n # Print it and exit with 1 if operation wasn't successful\n print(response['message'])\n if response['status'] != 'success':\n sys.exit(1)",
"async def _async_status_request(self) -> None:\n try:\n # status_response = await self._hass.async_add_executor_job(\n # self._mc_status.status, self._MAX_RETRIES_STATUS\n # )\n if self.access_token:\n if (time.time() - self.last_request) > 1800:\n phantom = await self._hass.async_add_executor_job(\n self._phantom_load\n )\n if phantom.status_code == HTTP_OK:\n self.phantom_load = round(phantom.json().get(\"power\") / 1000, 3)\n else:\n _LOGGER.warning(phantom.content)\n\n # Got answer to request, update properties.\n live = await self._hass.async_add_executor_job(self._live_data)\n\n if live.status_code == HTTP_OK:\n self.power_usage = round(abs(live.json().get(\"power\")) / 1000, 3)\n else:\n _LOGGER.warning(live.content)\n\n self.last_request = time.time()\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.power_usage = None\n self.phantom_load = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s' failed - OSError: %s\",\n self.unique_id,\n error,\n )\n self._last_status_request_failed = True",
"def test_zmq_execution_update_success(self, pool):\n s, c = self._create_zmq_execution_mocks(pool)\n\n command = stellr.UpdateCommand(TEST_ZMQ)\n command.add_documents({'id': 69, 'value': 'sixty-nine'})\n data = command.execute()\n\n # check the mocks\n s.send.assert_called_once_with(\n ('/update/json?wt=json '\n '{\"add\": {\"doc\": {\"id\": 69, \"value\": \"sixty-nine\"}}}'))\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data['responseHeader']['status'], 0)\n\n # verify name is returned\n data, name = command.execute(return_name=True)\n self.assertEqual(name, 'update')",
"async def _update_transfer_request(self, lta_rc: RestClient, bundle: BundleType) -> None:\n # look up the TransferRequest associated with the bundle\n request_uuid = bundle[\"request\"]\n self.logger.info(f\"Querying status of all bundles for TransferRequest {request_uuid}\")\n response = await lta_rc.request('GET', f'/Bundles?request={request_uuid}')\n results = response[\"results\"]\n deleted_count = len(results)\n self.logger.info(f\"Found {deleted_count} bundles for TransferRequest {request_uuid}\")\n # check each constituent bundle for \"deleted\" or \"finished\" status\n for bundle_uuid in results:\n result = await lta_rc.request('GET', f'/Bundles/{bundle_uuid}')\n self.logger.info(f\"Bundle {result['uuid']} has status {result['status']}\")\n if (result[\"status\"] == \"deleted\") or (result[\"status\"] == \"finished\"):\n deleted_count = deleted_count - 1\n else:\n self.logger.info(f'{result[\"status\"]} is not \"deleted\" or \"finished\"; TransferRequest {request_uuid} will not be updated.')\n # if there are some bundles that have not reached \"deleted\" or \"finished\" status\n if deleted_count > 0:\n self.logger.info(f'TransferRequest {request_uuid} has {deleted_count} Bundles still waiting for status \"deleted\" or \"finished\"')\n # put the bundle at the back of the line to be checked later\n bundle_id = bundle[\"uuid\"]\n right_now = now()\n patch_body: Dict[str, Union[bool, str]] = {\n \"claimed\": False,\n \"update_timestamp\": right_now,\n \"work_priority_timestamp\": right_now,\n }\n self.logger.info(f\"PATCH /Bundles/{bundle_id} - '{patch_body}'\")\n await lta_rc.request('PATCH', f'/Bundles/{bundle_id}', patch_body)\n return\n # otherwise, we're ready to complete the TransferRequest\n self.logger.info(f\"Updating TransferRequest {request_uuid} to mark as completed.\")\n right_now = now()\n patch_body = {\n \"claimant\": f\"{self.name}-{self.instance_uuid}\",\n \"claimed\": False,\n \"claim_timestamp\": right_now,\n \"status\": \"completed\",\n \"reason\": \"\",\n \"update_timestamp\": right_now,\n }\n self.logger.info(f\"PATCH /TransferRequests/{request_uuid} - '{patch_body}'\")\n await lta_rc.request('PATCH', f'/TransferRequests/{request_uuid}', patch_body)\n # update each of the constituent bundles to status \"finished\"\n for bundle_id in results:\n patch_body = {\n \"claimant\": f\"{self.name}-{self.instance_uuid}\",\n \"claimed\": False,\n \"claim_timestamp\": right_now,\n \"status\": self.output_status,\n \"reason\": \"\",\n \"update_timestamp\": right_now,\n }\n self.logger.info(f\"PATCH /Bundles/{bundle_id} - '{patch_body}'\")\n await lta_rc.request('PATCH', f'/Bundles/{bundle_id}', patch_body)",
"def job_update(request):\n try:\n\n if request.method == 'GET':\n query_dict = request.GET\n else:\n query_dict = json.loads(request.body)\n\n update = {}\n p_update = {}\n\n for key in ['t_id', 'file_link']:\n if key in query_dict:\n update['job.' + key] = query_dict[key]\n if 'status' in query_dict:\n p_update['job.status'] = {\n 'status': query_dict['status'],\n 'time': datetime.now()\n }\n\n for key in ['customer_count', 'sms_sent', 'sms_failed', 'errors']:\n if key in query_dict:\n update['job.report.' + key] = query_dict[key]\n\n if 'id' not in query_dict or not (update or p_update):\n return jsonResponse({\"success\": False, \"query\": query_dict, \"update\": update, \"p_update\": p_update})\n else:\n oid = query_dict['id']\n if oid.endswith('_segment'):\n oid = oid.replace('_segment', '')\n collection = db.segment_jobs\n else:\n collection = db.jobs\n\n final_update = {}\n if update:\n final_update[\"$set\"] = update\n if p_update:\n final_update[\"$push\"] = p_update\n\n collection.update_one({\"_id\": ObjectId(oid)}, final_update)\n return jsonResponse({\"success\": True})\n except Exception, e:\n return basic_error(e)",
"def process_cmd(self, cmd):\n\n resp = self.COMMANDS[cmd.cmd](cmd)\n\n logger.debug(\"Resp: %s\" % resp)\n # send to resp_queue\n # if type == G.CTRL_TYPE:\n #\n # response = json.dumps((corr_id, routing_key, resp))\n # logger.debug(\"Sending response: %s\" % response)\n # self.out_queue.put(response)\n\n response = cmd.make_response(resp)\n logger.debug(\"Sending response: %s\" % response)\n self.out_queue.put(str(response))"
] | [
"0.5620076",
"0.54718906",
"0.54160595",
"0.5376729",
"0.53587043",
"0.5354233",
"0.52760416",
"0.52760416",
"0.52670175",
"0.52434963",
"0.5225124",
"0.5163943",
"0.51545334",
"0.5146088",
"0.51351076",
"0.51208454",
"0.5117091",
"0.51013863",
"0.5090993",
"0.5047707",
"0.5043555",
"0.5043161",
"0.50405616",
"0.502956",
"0.50122404",
"0.50079316",
"0.50068015",
"0.49994874",
"0.49720088",
"0.4971398"
] | 0.57980293 | 0 |
Get a request object's dependencies lock status, aka reads | def get_dep_locks(self, request):
depends_on_version = request.command.get_dependencies()
dep_locks = {dv: self.object_locks.try_get(str(dv)) for dv in depends_on_version}
missing_deps = []
used_deps = []
locked_deps = []
for dep, lock in dep_locks.items():
if lock is None:
missing_deps.append(str(dep))
elif lock == LOCK_EXPIRED:
used_deps.append(str(dep))
elif lock != LOCK_AVAILABLE:
locked_deps.append(str(dep))
if missing_deps:
logger.error(
f'Reject request {request.cid} -- missing dependencies: '
f'{", ".join(missing_deps)}'
)
if used_deps:
logger.error(
f'Reject request {request.cid} -- used dependencies: '
f'{", ".join(used_deps)}'
)
if locked_deps:
logger.warning(
f'Reject request {request.cid} -- locked dependencies: '
f'{", ".join(locked_deps)}'
)
return DepLocks(missing_deps, used_deps, locked_deps) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dependencies(self, resource):\n\n rel_path = resource.relative_path\n deps = self.deps[rel_path] if rel_path in self.deps \\\n else self.update_deps(resource)\n return deps",
"def lock_status(self) -> Dict[str, str]:\n self.__logger.debug('Eva.lock_status called')\n return self.__http_client.lock_status()",
"def get_lock(self):\n \n svc = \"urn:micasaverde-com:serviceId:DoorLock1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n \n status = self.get_variable(svc, \"Status\")\n return status == 1",
"def locks(self):\r\n params = {'f' : 'json'}\r\n url = \"%s/lockInfos\" % self._url\r\n return self._con.post(url, params)['lockInfos']",
"def _get_locks():\n locks = getattr(_local, 'entry_transaction', None)\n if locks is None:\n locks = []\n _set_locks(locks)\n return locks",
"def deps_status(self):\n if not self.deps:\n return [self.S_OK]\n\n return [d.status for d in self.deps]",
"def locked(self):\n return self.__lock.locked()",
"def locked(self):\n\t\treturn self.__locked",
"def v_locked(self):\n return self._locked",
"def svn_fs_get_lock(*args):\r\n return _fs.svn_fs_get_lock(*args)",
"def locked(self):\n return self._filelock.locked",
"def locked(self):\n return self._locked",
"def locked(self):\n return self._locked",
"def locked(self):\n return self._locked",
"def lock(self):\r\n return self._lock",
"def locks(self):\n return self._locks_by_id.values()",
"def load_balancer_operation_locks(self) -> Sequence['outputs.GetLoadBalancersBalancerLoadBalancerOperationLockResult']:\n return pulumi.get(self, \"load_balancer_operation_locks\")",
"def required_cache(self):\n return self._required_cache",
"def getAccessLock(self):\n return self._dataLock",
"def dependencies(self) -> typing.Optional[typing.List[aws_cdk.core.IDependable]]:\n return self._values.get('dependencies')",
"def spinlocks(self):\n return self._spinlocks",
"def rest_api_status(self):\n with self.resource_lock:\n pass",
"def statistics(self) -> LockStatistics:\n return LockStatistics(self.locked(), self._owner_task, len(self._waiters))",
"def lock(self):\n return self._lock",
"def get_lockable_objects(self):\n if not self.__lockable_objects:\n return set([\"lock\"])\n return self.__lockable_objects",
"def oplocks(self):\n return self._oplocks",
"def locked(self):\n return self.partner_state.locked.outstanding",
"def lock(self):\n print(\"DEPRECATED lock\")\n return self._operations.lock()",
"def svn_info_t_lock_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass",
"def dependOnJob(self):\n return self.data.depend_on_job"
] | [
"0.5808544",
"0.5736591",
"0.5684779",
"0.5626385",
"0.56198746",
"0.560007",
"0.5549846",
"0.55438083",
"0.5450805",
"0.54364485",
"0.543053",
"0.5421912",
"0.5421912",
"0.5421912",
"0.5407252",
"0.5367982",
"0.534507",
"0.5331659",
"0.5323035",
"0.5317566",
"0.5312923",
"0.53040373",
"0.52918375",
"0.52816236",
"0.5259555",
"0.5256933",
"0.5244777",
"0.5223037",
"0.5222202",
"0.52081996"
] | 0.69462824 | 0 |
The local VASP attempts to sequence a new offchain command. | def sequence_command_local(self, off_chain_command):
off_chain_command.set_origin(self.get_my_address())
request = CommandRequestObject(off_chain_command)
# Before adding locally, check the dependencies
missing_deps, used_deps, locked_deps = self.get_dep_locks(request)
if missing_deps:
raise DependencyException(f'Dependencies not present: {", ".join(missing_deps)}')
if used_deps:
raise DependencyException(f'Dependencies used: {", ".join(used_deps)}')
if locked_deps:
raise DependencyException(f'Dependencies locked: {", ".join(locked_deps)}')
create_versions = request.command.get_new_object_versions()
existing_writes = []
for cv in create_versions:
if str(cv) in self.object_locks:
existing_writes.append(cv)
if existing_writes:
raise DependencyException(f'Object version already exists: {", ".join(existing_writes)}')
self.processor.check_command(
self.get_my_address(),
self.get_other_address(),
off_chain_command)
# Add the request to those requiring a response.
self.my_pending_requests[request.cid] = request
for dv in off_chain_command.get_dependencies():
self.object_locks[str(dv)] = request.cid
# Send the requests outside the locks to allow
# for an asyncronous implementation.
return request | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command(self, cmd):\n self.lmp.command(cmd)",
"def veto(self):\n self.console.write('veto')\n self.callvote = None",
"def cmd(self):",
"def chain_cmd(ctx):\n pass",
"def _doBack(self):\n argin = [\"2\", \"0\"] # to send string array with two arg...\n self._executeServerTask(self._cmdBack, argin)",
"def arun(ctx, user_cmd):\n connecter = ScalingoInterface(ctx.obj)\n connecter.detached = True\n connecter.run(user_cmd)",
"def normal(self):\n self.run_command('normal')",
"def chain_test():\n print(f\"Running {__file__}::{chain_test.__name__}()\")\n con = Connection(State())\n arm_ctrl = BasicController(con)\n force_calib_ctrl = EMAForceCalibrator(arm_ctrl)\n touch_ctrl = TouchController(force_calib_ctrl)\n\n cmd = Command().make(kind =UR_CMD_KIND_MOVE_TOOL_POSE, target=Tool(0.1,0.1,0.1,0,0,0), force_low_bound=Tool(-1,-1,-1,-1,-1,-1), force_high_bound=Tool(1,1,1,1,1,1))\n state = State()\n touch_ctrl.execute(cmd, state)\n print(\"Passed.\")",
"def command():\n pass",
"def reset_ospl_command(self):\r\n if self.ospl_home_bin != \"\":\r\n self.process.set_command(self.ospl_home_bin + OSPL.command)\r\n else:\r\n self.process.set_command(OSPL.command)",
"def makecmd(self, options):",
"def __init__(self):\n Cmd.__init__(self)\n self.calc = ReversePolishCalc()",
"def off(self):\n self.rs485.write_command('#{}bf'.format(self.address))\n time.sleep(1)",
"def onCommandGenerationStartOldVersion(self, event):\n self.turnOffTimerTrans()\n self.turnOffTimerXML()\n logging.info(\"Start command generation\")\n self.turnOnTimerItem()\n self.turnOnTimerCmd()\n\n xcor_text = self.XMLCorr.GetValue().strip()\n self.formerCorrCommand = self.CorrCommand.GetValue().strip()\n if len(xcor_text) > 0:\n commands = self.conceptGenerator.recognizeString(xcor_text, isStrict=False)\n if len(commands) > 0:\n cmdText = '\\n'.join(commands)\n else:\n cmdText = 'NO_COMMAND'\n\n if cmdText.strip() == self.formerCorrCommand:\n logging.info(\"Command generation failed\")\n self.Btn_CMD.SetLabel('Get Command')\n self.cmdButtonMode = BUTTON_GENERATE\n self.formerCorrCommand = None\n else:\n self.CorrCommand.SetValue(cmdText)\n logging.info(\"Command generation completed\")\n self.Btn_CMD.SetLabel('Undo Command')\n self.cmdButtonMode = BUTTON_REVERT\n\n self.OnUpdatePlantCtrl(event)",
"def off(self) -> None:\n ...",
"def do(self, line): \n self.interface.onecmd(line)",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def controlUp(*args):",
"def takeoff(self, request, context):\n print \"Arming motors\"\n # Copter should arm in GUIDED mode\n self.vehicle.mode = VehicleMode(\"GUIDED\")\n self.vehicle.armed = True \n\n # Confirm vehicle armed before attempting to take off\n timeout = 0\n while not self.vehicle.armed: \n print \" Waiting for arming...\"\n time.sleep(1)\n timeout = timeout + 1\n if timeout == 20:\n return self.getPosition(request, context)\n\n print \"Taking off!\"\n self.vehicle.simple_takeoff(request.altitude) # Take off to target altitude\n\n # Wait until the vehicle reaches a safe height before processing the goto (otherwise the command \n # after Vehicle.simple_takeoff will execute immediately).\n while True:\n print \" Altitude: \", self.vehicle.location.global_relative_frame.alt \n #Break and return from function just below target altitude. \n if self.vehicle.location.global_relative_frame.alt >= request.altitude * 0.95: \n print \"Reached target altitude\"\n break\n time.sleep(1)\n \n return self.getPosition(request, context)",
"def goto_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the taken off message\n\tprint a2, ' >> ', msg\n\tif msg != 'Taken Off':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'GOTO'\n\t\tnew_msg['arg1'] = init2\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1",
"def takeoff_first():\n\tglobal c1\n\tglobal a1\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c1.recv(BUF_SIZE) # wait for the armed message\n\tprint a1, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init1[2]\n\t\tc1.send(json.dumps(new_msg))\n\t\tstate += 1"
] | [
"0.5558248",
"0.5348713",
"0.53144515",
"0.5292959",
"0.52843213",
"0.5257564",
"0.5176616",
"0.5172852",
"0.51587677",
"0.5153389",
"0.51395804",
"0.5100976",
"0.5100821",
"0.5076194",
"0.50715107",
"0.5045304",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.5016509",
"0.501635",
"0.50117433",
"0.4993102"
] | 0.6367771 | 0 |
Packages up to a `number` (int) of earlier requests without a reply to send to the the other party. Returns a list of `NetMessage` instances. | async def package_retransmit(self, number=1):
return await asyncio.gather(
*[
self.package_request(m) for m in self.get_retransmit(number)
]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_requests_with_n_recipients(num, url):\n recipients = [str(x) for x in xrange(num)]\n\n data = create_request('TEST MESSAGE!', recipients)\n return call_api_endpoint(url, data)",
"def get_first_n_pending_links(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT link FROM link WHERE chunk_id IS NULL AND state = 'pending' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)",
"def recv(self, n=1, timeout=0):\n msgs = []\n for i in range(n):\n try:\n m = self.recvbuffer.pop(0)\n except IndexError:\n if timeout:\n time.sleep(timeout)\n try:\n m = self.recvbuffer.pop(0)\n except IndexError:\n msgs.append(None)\n break\n else:\n msgs.append(None)\n break\n msgs.append(m)\n return msgs if len(msgs) > 1 else msgs[0]",
"def get_messages(self, number: int):\n self.sql_lock.acquire()\n query: str = \"SELECT * FROM messages order by id desc LIMIT ?\" \n messages = []\n for item in self.cursor.execute(query,(number, )):\n text, user, id = item\n\n messages.append({\"user\": user, \"text\": f\"message- {text}\", \"id\": id})\n \n self.sql_lock.release()\n return messages",
"def requests(self, node_count, responses, *pairs):\n community = self._community\n nodes = self._nodes[:node_count]\n meta = self._messages[0].meta\n\n # flush incoming socket buffer\n for node in nodes:\n node.drop_packets()\n\n # request missing\n sequence_numbers = set()\n for low, high in pairs:\n sequence_numbers.update(xrange(low, high + 1))\n for node in nodes:\n node.give_message(node.create_dispersy_missing_sequence(community.my_member, meta, low, high, community.global_time, community.my_candidate), cache=True)\n # one additional yield. Dispersy should batch these requests together\n yield 0.001\n\n for node in nodes:\n self.assertEqual(node.receive_messages(message_names=[meta.name]), [], \"should not yet have any responses\")\n\n yield 0.11\n\n # receive response\n for node in nodes:\n for i in responses:\n _, response = node.receive_message(message_names=[meta.name])\n self.assertEqual(response.distribution.sequence_number, i)\n\n # there should not be any no further responses\n for node in nodes:\n self.assertEqual(node.receive_messages(message_names=[meta.name]), [], \"should not yet have any responses\")",
"def get_pieces_to_request(self):\n missing_pieces = list(self.partners_sequences - self.own_sequences)\n missing_pieces.sort(key=lambda p: len(self.partners_by_piece[p]))\n\n return missing_pieces[:self.MAX_REQUESTS]",
"def stream_n_messages(n):\n response = get_dict(\"url\", \"args\", \"headers\", \"origin\")\n n = min(n, 100)\n\n def generate_stream():\n for i in range(n):\n response[\"id\"] = i\n yield json.dumps(response) + \"\\n\"\n\n return Response(generate_stream(), headers={\"Content-Type\": \"application/json\"})",
"def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)",
"def _send_messages(number_range, partition=0, topic=topic, producer=kafka_producer, request=request):\n messages_and_futures = [] # [(message, produce_future),]\n for i in number_range:\n # request.node.name provides the test name (including parametrized values)\n encoded_msg = '{}-{}-{}'.format(i, request.node.name, uuid.uuid4()).encode('utf-8')\n future = kafka_producer.send(topic, value=encoded_msg, partition=partition)\n messages_and_futures.append((encoded_msg, future))\n kafka_producer.flush()\n for (msg, f) in messages_and_futures:\n assert f.succeeded()\n return [msg for (msg, f) in messages_and_futures]",
"def delete_packets(self, num):\n for i in range(num):\n del self._packets[0]",
"def _GetNewRequests(self):\n new_requests = self._GetRequestsByState(self._REQUESTED)\n if new_requests:\n while self._MakeRequestId() == new_requests[-1]:\n pass\n for request_id in new_requests:\n self._TransitionRequest(request_id, self._REQUESTED, self._PENDING)\n return new_requests",
"def block_numbers(self, numbers):\n request = self.create_recipients_request_body('BlockNumbersBodyType', numbers)\n return self.client.service.blockNumbers(self.authentication, request)",
"def generate_packets():\n num_packets = randrange(10)\n temp_packets = []\n for i in range(num_packets):\n temp_packets.append(randrange(1000))\n return temp_packets",
"def wait_until_got_number_of_callbacks(self, number):\n slept = 0.0\n time_step = 0.1\n while not len(self.futures) == number and slept < TIMEOUT:\n time.sleep(time_step)\n slept += time_step",
"def read_requests(self, clients_for_reading, all_clients):\n messages = []\n for sock in clients_for_reading:\n try:\n message = self.get_message(sock)\n print(message)\n messages.append(message)\n except:\n print('Client {} {} has disconnected'.format(sock.fileno(), sock.getpeername()))\n all_clients.remove(sock)\n\n return messages",
"def get_blocked_numbers(self, maximum_numbers=None):\n request_body = self.create('GetBlockedNumbersBodyType')\n\n if maximum_numbers is int and maximum_numbers >= 0:\n request_body.maximumRecipients = maximum_numbers\n\n response = self.client.service.getBlockedNumbers(self.authentication, request_body)\n return response.recipients.recipient if 'recipient' in response.recipients else []",
"def get_messages(self,honeypotids,expect_dict):\n if type(honeypotids) == str:\n honeypotids = [honeypotids]\n if \"ALL\" in honeypotids:\n msg_list = self.network.wait_for_messages()\n if msg_list:\n msg_list = self.extract_messages(msg_list)\n msg_list = self.check_answer(msg_list,honeypotids,expect_dict)\n else:\n msg_count = len(honeypotids)\n msg_list = []\n while(msg_count > 0):\n msgs = self.network.get_message()\n if msgs:\n msgs = self.extract_messages(msgs)\n msgs = self.check_answer(msgs,honeypotids,expect_dict)\n if msgs:\n msg_list = msg_list + msgs\n msg_count -= len(msgs)\n else:\n msg_count = 0\n return msg_list",
"def get_first_n_crawled_chunks(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM crawler WHERE c_task = 'crawled' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)",
"def recvall(self, n):\n data = bytearray()\n while len(data) < n:\n packet = self.recv(n - len(data))\n if not packet:\n break\n if not packet == None:\n data.extend(packet)\n return data",
"def package_lists_request(self, apiUrl, timestamp, packages_directory):\n META = \"meta\"\n NEXT = \"next\"\n LIMIT = \"limit\"\n COUNT = \"total_count\"\n IN_PROGRESS = \"IN PROGRESS\"\n (\n base_url,\n request_url_without_api_key,\n request_url,\n ) = format_api_url_with_limit_offset(apiUrl)\n # First packages request.\n packages = make_request(request_url, request_url_without_api_key)\n packages_count = 1\n # Calculate how many package list files will be downloaded based on\n # total number of packages and the download limit\n total_packages = int(packages.get(META, {}).get(COUNT, 0))\n total_package_lists = int(total_packages / int(apiUrl.get(LIMIT))) + (\n total_packages % int(apiUrl.get(LIMIT)) > 0\n )\n # There may be more packages to download to let's access those here.\n # TODO: `request_url_without_api_key` at this point will not be as\n # accurate. If we have more time, modify `format_api_url_with_limit_offset(...)`\n # to work with raw offset and limit data to make up for the fact\n # that an API key is plain-encoded in next_url.\n next_url = packages.get(META, {}).get(NEXT, None)\n write_packages_json(packages_count, packages, packages_directory)\n while next_url is not None:\n next_request = \"{}{}\".format(base_url, next_url)\n next_packages = make_request(next_request, request_url_without_api_key)\n packages_count += 1\n write_packages_json(packages_count, next_packages, packages_directory)\n next_url = next_packages.get(META, {}).get(NEXT, None)\n self.update_state(\n state=IN_PROGRESS,\n meta={\n \"message\": \"Total packages: {} Total package lists: {}\".format(\n total_packages, total_package_lists\n )\n },\n )\n return {\n \"totalPackageLists\": total_package_lists,\n \"totalPackages\": total_packages,\n \"timestampStr\": timestamp,\n }",
"def get_phone_numbers_to_send_to(self):\n # Get the phone numbers we want to send to, excluding those that have\n # already done the thing we want to remind them of\n phone_numbers = self.PhoneModel.objects.exclude(phone_number__in=self.to_exclude())\\\n .values_list('phone_number', flat=True)\n\n message_text = self.get_message_text()\n # Set from_number to REPORTS_SHORT_CODE so that recipient can\n # simply just respond to this message with their report.\n from_shortcode = settings.REPORTS_SHORT_CODE\n for phone_number in phone_numbers:\n yield phone_number, message_text, from_shortcode",
"def unblock_numbers(self, numbers):\n request = self.create_recipients_request_body('UnblockNumbersBodyType', numbers)\n return self.client.service.unblockNumbers(self.authentication, request)",
"def dial_numbers():\n for number in DIAL_NUMBERS:\n print(\"Dialing \" + number)\n # set the method to \"GET\" from default POST because Amazon S3 only\n # serves GET requests on files. Typically POST would be used for apps\n client.calls.create(to=number, from_=TWILIO_PHONE_NUMBER,\n url=TWIML_INSTRUCTIONS_URL, method=\"GET\")",
"def _retire_packets_with_seqnum_up_to(self, acknum):\n if not self._sending_window:\n return\n lowest_seqnum = iter(self._sending_window).__next__()\n if acknum >= lowest_seqnum:\n for seqnum in range(lowest_seqnum, acknum):\n self._retire_scheduled_packet_with_seqnum(seqnum)\n self._attempt_enabling_looping_send()",
"def get_msgs(self):\n msgs = []\n while True:\n try:\n msgs.append(self.get_msg(block=False))\n except Empty:\n break\n return msgs",
"def get_msgs(self):\n msgs = []\n while True:\n try:\n msgs.append(self.get_msg(block=False))\n except Empty:\n break\n return msgs",
"def get_new_messages(self):\n inbox = list(self.reddit.inbox.unread(limit=10))\n inbox.reverse()\n return inbox",
"def _workout_messages(self, msgs_bunch):\n if msgs_bunch != []:\n while True:\n r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))\n # request success condition below - to end the handler\n if r.status_code == 200:\n break\n print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')\n time.sleep(self.timeout)\n # next bunch of messages will not be read until this function ends\n # current bunch of messags will be deleted in next request if delete_flag = True is set",
"def downloadMessages(iinmap4, uids, process_message):\n\ttotal_amount = str(len(uids))\n\tfor i in uids:\n\t\tprint('Fetching message No.' + str(i)+'/' + total_amount + '...')\n\t\tmail = M.fetch(str(i),'(RFC822)')[1][0][1]\n\t\tprocess_message(mail)",
"def _sendJunk(self, n):\n junk = ''.join(['x' for i in range(n)])\n written = 0\n while written < n:\n retval = self.s.send(junk[written:])\n self.sent += retval\n written += retval\n self.assertEquals(n, written)"
] | [
"0.595882",
"0.5633911",
"0.5543614",
"0.5421572",
"0.53338075",
"0.5250904",
"0.5175569",
"0.5017793",
"0.49893928",
"0.4988267",
"0.49087372",
"0.4898715",
"0.4894271",
"0.48901066",
"0.48861828",
"0.48834553",
"0.488342",
"0.48803717",
"0.48681366",
"0.48187768",
"0.4800141",
"0.47906274",
"0.47822037",
"0.47623086",
"0.47534397",
"0.47534397",
"0.47528428",
"0.4751525",
"0.47489047",
"0.47339097"
] | 0.64028597 | 0 |
Returns true if there are any pending retransmits, namely requests for which the response has not yet been received. | def would_retransmit(self):
return not self.my_pending_requests.is_empty() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def has_pending_packets_to_be_sent(self):\n return self.num_packets != 0",
"def no_more_acks() -> bool:\n return not any(not op.is_set() for op in self._pending_operations.values())",
"def is_polling_done(self):\n if self.message_request_more:\n return False\n \n if self.message_cache:\n return False\n \n return True",
"def messages_pending(self):\r\n return bool(self._log_buffer)",
"def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0",
"def check_sync(self):\r\n if not self.awaiting_sync:\r\n return True\r\n self.check_ack_queue()\r\n return not self.awaiting_sync",
"def pending_runs(self) -> bool:\n # If there are futures available, it translates\n # to runs still not finished/processed\n return len(self.futures) > 0",
"def expects_reply(self) -> bool:\n return self.opcode in [\n OPCODE_WRITE_8,\n OPCODE_READ_8,\n OPCODE_FENCE,\n OPCODE_FINISH,\n ]",
"def tcp_ack(self):\n return self.tcp_flags & dpkt.tcp.TH_ACK != 0",
"def no_excessive_retrans():\n test_str = DEBUG_IGNORE + \"r3tr4n5m15510ns~~~~~~~\\n\"\n server = start_server(reference=True)\n client = start_client()\n\n # Send a segment to reference server, which should ignore it. See how many\n # times it was sent.\n write_to(client, test_str)\n segments = read_segments_from(server)\n if not segments or len(segments) != 6:\n return False\n\n # All segments should have the same content.\n orig_segment = segments[0]\n for segment in segments:\n if (\n segment.source != orig_segment.source or\n segment.source_port != orig_segment.source_port or\n segment.dest != orig_segment.dest or\n segment.dest_port != orig_segment.dest_port or\n segment.seqno != orig_segment.seqno or\n segment.ackno != orig_segment.ackno or\n segment.length != orig_segment.length or\n not segment.has_same_flags(orig_segment) or\n segment.window != orig_segment.window or\n segment.checksum != orig_segment.checksum\n ):\n return False\n\n return True",
"def any(self) -> bool:\n return len(self.queue) > 0",
"def can_fit_more(self):\n\n return len(self._requeue_jobs) < MAX_NUM",
"def check_saved_acks():\n log('Looking through saved ACKS')\n if (BUFFER):\n for decoded in RECEIVED_ACKS:\n lowest_seq = min(BUFFER.keys())\n send_from_buffer(decoded['ack'], lowest_seq)\n # if we removed the last item from the buffer break out of loop\n if not BUFFER:\n break",
"def no_ack(self):\n\n return self._block.tx_policies[self._lvap.addr].no_ack",
"def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True",
"def is_request_sent(self, request, relations):\n states = self.get_request_states(request, relations)\n for rid in states.keys():\n if not states[rid]['sent']:\n return False\n\n return True",
"def ready(self):\n return not self._wanted",
"def is_ready_to_reap(self):\n self.calc_progress()\n return self._num_results > 0 and (\n self._num_results == self.num_sown_batches\n )",
"def pending_work(self) -> bool:\n return len(self.ongoing) > 0",
"def active(self):\n return len(self.queue) > 0",
"def complete(self):\n\n return any(\n [\n self.is_complete(),\n self.is_failed(),\n self.is_out_of_memory(),\n self.is_timeout(),\n ]\n )",
"def more(self):\n # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment\n tries = 0\n while self.Q.qsize() == 0 and not self.stopped and tries < 5:\n time.sleep(0.1)\n tries += 1\n\n return self.Q.qsize() > 0",
"def is_pending(self):\n return self.is_disarming() or self.is_arming()",
"def ack(self):\n return (self.status == self.STATUS_ACK)",
"def reply_received():\n return call_id in self._reply_inbox",
"def noqueue(self) -> bool:\n return not self.orders",
"def _is_acknowledged(self):\n response = self._port_handle.read(1)\n if len(response) == 0:\n raise DfuException('DFU did not send the answer.')\n else:\n if response != self.__RESPONSE['ack']:\n print('dfu answered nack (0x{})'.format(response.hex()))\n return response == self.__RESPONSE['ack']",
"def _check_for_life_signs(self):\n self._lock.acquire()\n if not self._running.is_set():\n return False\n try:\n if self._writes_since_check == 0:\n self.send_heartbeat()\n if self._reads_since_check == 0:\n self._threshold += 1\n if self._threshold >= 2:\n self._running.set()\n message = (\n 'Connection dead, no heartbeat or data received in >= '\n '%ds' % (\n self._interval * 2\n )\n )\n why = AMQPConnectionError(message)\n if self._exceptions is None:\n raise why\n self._exceptions.append(why)\n return False\n else:\n self._threshold = 0\n finally:\n self._reads_since_check = 0\n self._writes_since_check = 0\n self._lock.release()\n if self._timer:\n self._start_new_timer()\n return True",
"def canSend(self):\n return self._lte.isconnected()",
"def has_msg(self):\n return self.bufsize >= 4 and self.bufsize - 4 >= struct.unpack('!I', str(self.buf.peek(0, 4)))[0]"
] | [
"0.7243682",
"0.69872236",
"0.6487534",
"0.6383383",
"0.6312614",
"0.627255",
"0.610029",
"0.60920763",
"0.6070019",
"0.6066764",
"0.60588837",
"0.6021426",
"0.5998724",
"0.59974205",
"0.59756446",
"0.5919119",
"0.58142316",
"0.58030236",
"0.5796859",
"0.5787215",
"0.57860917",
"0.57835823",
"0.5766212",
"0.57448244",
"0.57184",
"0.57086635",
"0.57083875",
"0.5699899",
"0.56815255",
"0.56623995"
] | 0.8584883 | 0 |
Infinite loop to listen for client connections and route to request_handler | def serve_requests(self):
while True:
self.server_socket.listen(self.request_queue_size)
client_connection, client_address = self.server_socket.accept()
self.request_handler(client_connection) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _listen_to_requests(self):\n while True:\n try:\n request = self._client.recv(1024)\n except socket.error as err:\n if DEBUG_LEVEL >= 1:\n print \"Got socket error: {}\".format(err.message)\n self._client.close()\n return True\n\n if not request:\n if DEBUG_LEVEL >= 0:\n print \"Closing connection\"\n self._client.close()\n return True\n\n if DEBUG_LEVEL >= 2:\n print request\n\n if not HTTPValidation.validate_request(request):\n if DEBUG_LEVEL >= 0:\n print \"Invalid request, closing...\"\n self._client.send(public_response_functions.get_error_response())\n self._client.close()\n return True\n\n if not self._send_response(request):\n if DEBUG_LEVEL >= 0:\n print \"Closing connection...\"\n self._client.close()\n return",
"def run(self):\n while self.running:\n self.handle_request()",
"def server(conn, address):\n print(\"Client Connection Open\")\n while True:\n request = server_read(conn)\n if request:\n print(request)\n manage_client(request, conn)",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))",
"def serve_forever(self):\n try:\n while True:\n client, from_addr = self._socket.accept()\n LOGGER.debug(client)\n secure_sock = self._ssl_ctx.wrap_socket(client, server_side=True)\n new_client_conn = ClientHandlerThread(from_addr, secure_sock)\n new_client_conn.start()\n Server.CLIENT_CONNS.append(new_client_conn)\n except ssl.SSLError:\n LOGGER.exception('SSLError')\n except KeyboardInterrupt:\n self.cleanup()\n sys.exit(0)\n except socket.error as sock_err:\n LOGGER.warning(str(sock_err))\n self.cleanup()\n sys.exit(0)\n except Exception:\n LOGGER.exception('Unknown exception encountered!')",
"def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False",
"def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()",
"def run(self):\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5) # Allows up to 5 waiting clients\n\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n self.myView.updateStatus('... connected from ' + str(address))\n handler = ClientHandler(client, self.bank, self.myView)\n handler.start()\n\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus(\"Server shutting down.\")",
"def serve_forever(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((self.host, self.port))\n server_socket.listen(self.backlog)\n logging.info('Start listening on %s', server_socket.getsockname())\n # https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor\n with ThreadPoolExecutor(max_workers=self.workers) as executor:\n while True:\n try:\n client_socket, client_address = server_socket.accept()\n logging.info('Request from %s is accepted', client_socket.getpeername()) # client_socket.getpeername() == client_address\n executor.submit(self.handler, client_socket, client_address, **self.handler_params)\n except KeyboardInterrupt:\n executor.shutdown(wait=True)\n logging.info('Server %s stopped', server_socket.getsockname())\n break",
"def serve_forever(self, unused_parameter=0.5):\r\n self.stop = False\r\n while not self.stop:\r\n self.handle_request()",
"def _handle_connection(self, conn):\n conn.serve_all()",
"def serveThread(self):\r\n while True:\r\n try:\r\n client = self.clients.get()\r\n self.serveClient(client)\r\n except Exception, x:\r\n logging.exception(x)",
"def _dispatch_to_client_request(self):\n # Listen for client connection\n self._to_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1)\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n self._to_client_connections.append(client_conn)\n print(\"Sending replies to [\" + client_addr[0] + \", \" + str(client_addr[1]) + ']')",
"def on_open(self):\n def event_loop():\n logger.debug(pformat(self.query.request))\n self.send(json.dumps(self.query.request))\n while not self.event.is_set():\n #print('Waiting around on the socket: %s' % self.gettimeout())\n self.event.wait(self.gettimeout())\n \n logger.debug('Event loop terminating.')\n \n self.thread = threading.Thread(\n target=event_loop)\n self.thread.setDaemon(True)\n self.thread.start()",
"def request_handler(self):\n\n size = 1024\n while True:\n # accept message from client\n clientSock, addr = self.socket.accept()\n self.printLine()\n print('connect to {}'.format(addr))\n\n # print client message content\n msg = clientSock.recv(size).decode('utf-8')\n self.printLine()\n print(\"sent message :\")\n print(msg)\n\n self.printLine()\n self._set_fileName(msg)\n\n # check for existance of file in the server (with name of file.txt)\n data, isFileExist = self._send_file_handler()\n\n self.printLine()\n print('data of file :')\n print(data)\n\n # create header for response message\n if isFileExist:\n header = self._generate_headers(200)\n else:\n header = self._generate_headers(404)\n\n response = header.encode() + data.encode()\n\n # send response in http protocol\n clientSock.send(response)\n\n # close the signal\n clientSock.close()",
"def server_activate(self):\n\t\tself.socket.listen(self.request_queue_size)",
"def _accept_loop(self):\r\n while self.running:\r\n # This line will suspend the server tasklet until there is a connection\r\n s, addr = self.sock_server.accept()\r\n \r\n # See if we have already been asked to stop\r\n if not self.running:\r\n return\r\n \r\n # Initialize the WSGI environment\r\n environ = self.environ.copy()\r\n environ[\"SERVER_SOFTWARE\"] = \"%s WSGI Server\" % self.version\r\n environ[\"ACTUAL_SERVER_PROTOCOL\"] = self.protocol\r\n environ[\"SERVER_NAME\"] = self.server_name\r\n environ[\"SERVER_PORT\"] = str(self.bind_addr[1])\r\n environ[\"REMOTE_ADDR\"] = addr[0]\r\n environ[\"REMOTE_PORT\"] = str(addr[1])\r\n \r\n # self.connection_class is a reference to a class that will\r\n # take care of reading and parsing requests out of the connection\r\n conn = self.connection_class(s, self.wsgi_app, environ)\r\n \r\n # We create a new tasklet for each connection. This is similar\r\n # to how threaded web servers work, except they usually keep a thread\r\n # pool with an upper limit on number of threads. We just create new tasklets\r\n # blindly without regard for how many requests the server is serving\r\n # already. This is possible because of the light-weight nature of\r\n # tasklets compared to threads.\r\n def comm(connection):\r\n try:\r\n connection.communicate()\r\n finally:\r\n connection.close()\r\n self.tasklet_class(comm)(conn)",
"def run(self):\n try:\n try:\n while self.socket.fileno() > 0:\n read, _, _ = select.select([self.socket], [], [], 1)\n if read:\n connection, address = self.socket.accept()\n\n self.handler(self, (connection, address))\n except (select.error, socket.error) as err:\n if hasattr(err, 'errno'):\n if err.errno != errno.EBADF: # pylint: disable=no-member\n raise\n else:\n code = next(iter(err.args))\n if code != errno.EBADF:\n raise\n except Exception:\n self.exceptions.put_nowait(sys.exc_info())\n raise",
"def threading_handler(client_connection, client_address):\r\n\r\n while True:\r\n request_method, request_path, request_proto, request_data = parse_request(client_connection)\r\n\r\n if not request_data:\r\n break\r\n\r\n collect_request_data(request_path, client_address)\r\n write_data(access_entries)\r\n\r\n if request_method == 'GET':\r\n if request_path in allowed_path:\r\n requested_file = open(os.getcwd() + request_path, 'rb')\r\n data = requested_file.read(os.path.getsize(os.getcwd() + request_path))\r\n requested_file.close()\r\n request_handler(client_connection, request_path, request_proto, '200', 'OK', data, True)\r\n break\r\n elif '/www' == request_path or '/' == request_path:\r\n request_handler(client_connection, request_path, request_proto, '403', 'Forbidden', '403 Unauthorized',\r\n False)\r\n break\r\n elif '/www/access_entries.html' == request_path:\r\n final_tag = construct_access_entries_data()\r\n request_handler(client_connection, '/www/access_list.txt', request_proto, '200', 'OK', final_tag, True)\r\n break\r\n else:\r\n request_handler(client_connection, request_path, request_proto, '404', 'Not found', '404 Not Found',\r\n False)\r\n break\r\n else:\r\n request_handler(client_connection, request_path, request_proto, '405', 'Method Not Allowed',\r\n '405 Method Not Allowed', False)\r\n\r\n client_connection.close()\r\n print (\" --- Connection Closed --- \", client_address)",
"def start_server(self):\n while True:\n if DEBUG_LEVEL >= 0:\n print \"Awaiting connection...\"\n\n (client_socket, client_address) = self._socket.accept()\n if DEBUG_LEVEL >= 0:\n print \"Got connection from: {}\".format(client_address)\n\n self._client = client_socket\n\n self._listen_to_requests()\n\n self._socket.close()",
"def run_loop(self):\r\n server_log.info('Server now accepting client connections.')\r\n while not self.clients_done():\r\n asyncore.loop(timeout=config[\"server_timeout\"], count=config[\"server_loop_count\"])",
"def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)",
"def run(self):\n self.poller = select.epoll()\n self.pollmask = select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR\n self.poller.register(self.server,self.pollmask)\n self.timeout = float(self.webconfig.parameters[\"timeout\"])\n lastSweep = time.time()\n\n while True:\n # poll sockets\n\n if (time.time() - lastSweep) > .5: #sweet through every half second\n self.socketCheck()\n lastSweep = time.time()\n try:\n fds = self.poller.poll(timeout=1.0)\n except:\n return\n fd = 0\n for (fd,event) in fds:\n # handle errors\n if event & (select.POLLHUP | select.POLLERR):\n self.handleError(fd)\n continue\n # handle the server socket\n if fd == self.server.fileno():\n self.handleServer()\n continue\n # handle client socket\n result = self.handleClient(fd)",
"def handle_request(self):\n\t\ttry:\n\t\t\tr,w,e=select.select([self.socket],[],[], 1.0)\n\t\t\tif not r:\n\t\t\t\treturn\n\t\t\trequest, client_address=self.socket.accept()\n\t\texcept:\n\t\t\treturn\t\t\n\t\t\n\t\ttry:\n\t\t\tif self.debug:\n\t\t\t\tprint \"got request\"\n\t\t\tself.process_request(request, client_address)\n\t\texcept:\n\t\t\tself.handle_error(request, client_address)",
"def handle(self):\n try:\n peers = Peers([\n gevent.spawn(self.route.proxy_input, self.client.sock,\n self.sock, self.buf, self.extra),\n gevent.spawn(self.route.proxy_connected, self.sock, \n self.client.sock, self.extra)])\n gevent.joinall(peers.greenlets)\n finally:\n self.sock.close()",
"def _handle_requests(self):\n for request in self._requests[:]:\n self.logger.debug(\"Handling request: %r\", request)\n\n # an orphan request, client is not alive.\n if not request.server_request and not request.worker.is_alive:\n self.logger.warning(\"Client %r disconnected, request dropped\",\n request.worker.name)\n self._requests.remove(request)\n continue\n\n try:\n request_handler = self._get_request_handler(request)\n reply = request_handler(request)\n\n except _WaitingForResourceException as ex:\n self.logger.exception(str(ex))\n continue\n\n except Exception as ex:\n if isinstance(ex, ServerError):\n code = ex.ERROR_CODE\n content = ex.get_error_content()\n\n else:\n code = ServerError.ERROR_CODE\n content = str(ex)\n\n self.logger.exception(str(ex))\n reply = ErrorReply(code=code, content=content)\n\n reply.request_id = request.message.msg_id\n self._reactor.callFromThread(request.respond, reply)\n\n self._requests.remove(request)",
"def run(self):\n try:\n while True:\n self.__listen()\n except (ConnectionResetError, ConnectionAbortedError):\n self.session.close()\n return",
"def run(self):\n while self._num_workers > 0:\n self.server.handle_request()\n self._graph = None",
"def listen_for_client(self):\n #PART 2:LISTEN FOR CLIENT We wait for the clients connection request and once a\n #successful connection is made we dispatch the request in a separate thread,\n #making ourselves available for the next request.\n #This allows us to handle multiple requests simultaneously which boosts the performance of the \n #server multifold times. -> we need a function for threading and to get client name!!!\n\n\n while True:\n (clientSocket, client_address) = self.serverSocket.accept() # Establish the connection\n d = threading.Thread(name=self._getClientName(client_address), target=self.proxy_thread, args=(clientSocket, client_address))\n d.setDaemon(True)\n d.start()\n self.shutdown(0,0)",
"def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)"
] | [
"0.7345777",
"0.7169281",
"0.7169157",
"0.70783734",
"0.7046629",
"0.7006674",
"0.69998455",
"0.69914407",
"0.69864285",
"0.6963859",
"0.6918372",
"0.6907004",
"0.6852823",
"0.6837102",
"0.6827365",
"0.6768687",
"0.6760217",
"0.6739094",
"0.67388463",
"0.67078495",
"0.6694091",
"0.66785866",
"0.6677861",
"0.6667433",
"0.6666642",
"0.66399884",
"0.6612469",
"0.6598286",
"0.65964115",
"0.65825796"
] | 0.8256458 | 0 |
Method to parse requests by path, route them to the correct getter or setter and return a response and HTTP status code | def parse_request(self, request):
response=''
http_code = 200
request_line = request.splitlines()[0]
request_method, path, request_version = request_line.split()
#Try to split path into it's components: the operation requested and the keyvalue
try:
request_op, request_keyvalue = path.split('?')
request_op = request_op[1:]
#If request is a get we split in a different order than if it's a set
if request_op == 'get':
request_value, request_key = request_keyvalue.split('=')
response, http_code = self.get_value(request_key)
elif request_op == 'set':
request_key, request_value = request_keyvalue.split('=')
response, http_code = self.set_value(request_key, request_value)
else:
response = 'Unknown operation in URL. Must be either GET or SET.'
http_code = 400
except ValueError: #Catch any paths that don't match the form we're interested in
response = dedent("""Incorrect path (%s)
Requested URL must take the form http://%s:%s/[operation]?[value]""" % (path, self.server_address, self.server_port))
http_code = 400
return response, http_code
return response, http_code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_GET(self):\n\n try:\n self.parsed_url = urllib.parse.urlsplit(self.path)\n self.request_params = dict(urllib.parse.parse_qsl(self.parsed_url.query))\n\n handler = self._dispatch_on_path()\n\n if isinstance(handler, Handler):\n\n self._check_required_params(handler)\n params = self._convert_parameters(handler)\n\n handler.handler(**params)\n\n elif isinstance(handler, RawHandler):\n handler.handler()\n\n except (ValidationError, HTTPValidationError) as exc:\n logger.error(\"HTTP validation error: %s\", exc)\n self.send_json_error_response(\n exc, status_code=HTTPStatus.UNPROCESSABLE_ENTITY\n )\n\n except Exception as exc:\n logger.exception(\"Unhandled exception: %s\", exc)\n self.send_json_error_response(exc)",
"def _calculate_response(self, base_path, url_args, body_args=None):\n\n if base_path == '/v1/reflect/me':\n # A test URI that is used by tests. In some cases it is impossible\n # to reuse SRV record path.\n return self._reflect_request(base_path, url_args, body_args)\n\n match = self.SRV_QUERY_REGEXP.search(base_path)\n if match:\n return self.__srv_permissions_request_handler(match.group(1))\n\n raise EndpointException(\n code=500,\n content=\"Path `{}` is not supported yet\".format(base_path))",
"def process_GET_request(self, path, http_s_obj):\n try:\n self.check_and_print_debug_message(\"GET directory path: \" + path)\n if path[-1] == '/':\n http_s_obj.setData(json.dumps(\n os.listdir(path)).encode(\"utf-8\"))\n http_s_obj.setHeader(\"Content-Type\", \"application/json\")\n http_s_obj.setHeader('Content-Disposition', 'inline')\n http_s_obj.setStatusCode(200)\n else:\n if os.path.exists(path):\n http_s_obj.setStatusCode(200)\n mime_type = magic.from_file(path, mime=True)\n http_s_obj.setHeader(\"Content-Type\", mime_type)\n data = self.fileread(path)\n http_s_obj.setData(data)\n http_s_obj.setHeader(\"Content-Length\", str(len(data)))\n else:\n http_s_obj.setStatusCode(404)\n http_s_obj.setData(MAPPING_DICT.get(404))\n except Exception as e:\n self.check_and_print_debug_message(str(e))\n http_s_obj.setStatusCode(400)\n http_s_obj.setData(MAPPING_DICT.get(400))\n\n return http_s_obj",
"def handle(self, path, method='GET'):\r\n depr(\"This method will change semantics in 0.10. Try to avoid it.\")\r\n if isinstance(path, dict):\r\n return self._handle(path)\r\n return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})",
"def do_GET(self):\n parsed_path = urlparse.urlparse(self.path)\n if parsed_path.path == '/books':\n return self.books()\n elif parsed_path.path == '/my_loans':\n return self.my_loans()\n return self.send_response(404)",
"def do_GET(self):\n print \"GET REQUEST: \", self.path\n if self.path == \"/hello\":\n try:\n self.sendResponse(self, 200, \"my message\")\n except:\n self.sendResponse(self, 400, \"\")\n traceback.print_exc(file=sys.stdout)\n return\n\n if self.path == \"/getChartsData\":\n try:\n self.sendResponse(self, 200, structure.toString())\n structure.clear()\n except:\n self.sendResponse(self, 400, \"\")\n traceback.print_exc(file=sys.stdout)\n return\n\n url = self.path\n parsed = urlparse.urlparse(url)\n if string.find(self.path, \"/putHeartRate\") != -1:\n try:\n value = float(urlparse.parse_qs(parsed.query)['value'][0])\n time = float(urlparse.parse_qs(parsed.query)['time'][0])\n structure.addHartRate(time, value)\n self.sendResponse(self, 200, \"\")\n except:\n self.sendResponse(self, 400, \"\")\n traceback.print_exc(file=sys.stdout)\n return\n\n if string.find(self.path, \"/putNumSteps\") != -1:\n try:\n value = float(urlparse.parse_qs(parsed.query)['value'][0])\n time = float(urlparse.parse_qs(parsed.query)['time'][0])\n structure.addNumSteps(time, value)\n self.sendResponse(self, 200, \"\")\n except:\n self.sendResponse(self, 400, \"\")\n traceback.print_exc(file=sys.stdout)\n return\n\n if string.find(self.path, \"/putTemperature\") != -1:\n try:\n value = float(urlparse.parse_qs(parsed.query)['value'][0])\n time = float(urlparse.parse_qs(parsed.query)['time'][0])\n structure.addTemperature(time, value)\n self.sendResponse(self, 200, \"\")\n except:\n self.sendResponse(self, 400, \"\")\n traceback.print_exc(file=sys.stdout)\n return\n\n if string.find(self.path, \"/putHumidity\") != -1:\n try:\n value = float(urlparse.parse_qs(parsed.query)['value'][0])\n time = float(urlparse.parse_qs(parsed.query)['time'][0])\n structure.addHumidity(time, value)\n self.sendResponse(self, 200, \"\")\n except:\n self.sendResponse(self, 400, \"\")\n traceback.print_exc(file=sys.stdout)\n return",
"def __call__(self, environ, start_response):\n\t\tpath_info = environ.get('PATH_INFO', '')\n\t\turl = request.construct_url(environ, with_query_string=False)\n\n\t\t# Redirect /introspector to /introspector/ to ensure consistent URLs\n\t\tif path_info == '':\n\t\t\tstart_response('302 Found', [('Location', url + '/')])\n\t\t\treturn [ '' ]\n\n\t\t# Index page\n\t\tif path_info == '/':\n\t\t\tstart_response('200 OK', [('Content-Type', 'text/html')])\n\t\t\treturn [ self.introspect(url, \"chiral.web.introspector\", \"index\") ]\n\n\t\t# Parse the URL: [/introspector/]module/namespace/item\n\t\tpath = path_info.split('/')[1:]\n\t\tif len(path) < 3:\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\n\t\tmodule, namespace, item = path\n\t\tscript_name = environ.get('SCRIPT_NAME', '') + \"/\"\n\n\t\tif module not in sys.modules or not hasattr(sys.modules[module], '_chiral_introspection'):\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\n\t\t# Commands are slightly different: they must be POST, and the namespace has \"cmd_\" at the beginning\n\t\tif environ[\"REQUEST_METHOD\"] == \"POST\":\n\t\t\ttry:\n\t\t\t\tifunc = getattr(sys.modules[module]._chiral_introspection(), \"cmd_\" + namespace)\n\t\t\texcept AttributeError:\n\t\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\t\treturn [ \"404 Not Found\" ]\n\n\t\t\tnext_url = ifunc(item)\n\t\t\tstart_response('302 Found', [('Location', script_name + next_url)])\n\t\t\treturn [ \"\" ]\n\n\t\t# Prevent shenanigans involving commands sent as GET\n\t\tif namespace.startswith(\"cmd_\"):\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\t\t\t\t\t\n\t\tout_string = self.introspect(environ.get('SCRIPT_NAME', '') + '/', module, namespace, item)\n\n\t\tif out_string is None:\n\t\t\tstart_response('404 Not Found', [('Content-Type', 'text/html')])\n\t\t\treturn [ \"404 Not Found\" ]\n\n\t\tstart_response('200 OK', [('Content-Type', 'text/html')])\n\t\treturn [ out_string ]",
"def handle_request(self, path=None):\n req = get_request()\n resp = super().handle_request(req)\n return to_response(resp)",
"def matchResult(self, method, path):\n pass",
"def get(self, path):\n return self.request(path, method='GET')",
"def get(self, path):\n response = self._request(\"GET\", path)\n return self._handle_response(response)",
"def do_GET(self):\n try:\n \n # parse the requested page and see if it's valid\n parse_status, explanation_str = self.parse_header(self.path)\n \n # parse_status:\n # -1: error\n # 0: /log/* request\n # 1: /detailed/node/timestamp request\n print str(self.parse_header(self.path))\n \n explanation_str = str(explanation_str)\n \n # error\n if parse_status == -1:\n # invalid header, close the connection and die but notify user\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid request ('+explanation_str+')')\n print '-1'\n return\n \n # 1: /detailed/node/timestamp request\n elif parse_status == 1:\n print '1'\n # just need to respond with the file that's contained in explanation_str\n # and once we verify that it exists, we're golden\n \n # path to the \"detailed\" file\n file_path = explanation_str\n \n if os.path.isfile(file_path):\n try:\n # TODO: make HTML here to nav around previous node things\n detailed_file_handle = open(file_path, 'r')\n self.send_response(200)\n self.send_header('Content-type',\t'text/plain')\n self.end_headers() \n self.wfile.write(detailed_file_handle.read())\n detailed_file_handle.close()\n return\n except Exception, e:\n print 'Error while sending detailed log file'\n print e\n return\n else:\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid file request')\n return\n \n # 0: /log/* request\n elif parse_status == 0:\n print '0'\n # request was successfull, we just want the filename from index\n log_index = explanation_str\n \n success_status, log_filename = self.get_filename_from_index(log_index)\n \n if success_status == -1:\n # some kind of error of which the description is stored in log_filename\n #sockobj.send('The server encountered an error opening the file, please'+\\\n # ' try your request again')\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers() \n self.wfile.write('The server encountered an error opening the file, please'+\\\n ' try your request again')\n return\n \n # the file exists!\n # just dump the file at this point, and then...\n \n # send the HTML file\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.send_html_file(log_filename, log_index)\n return\n\n # invalid type\n else:\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid request type 2')\n return\n \n except IOError:\n self.send_error(404,'File Not Found: %s' % self.path)\n \n return",
"def handle(conn, method, addr, data):\n\tif (addr == \"/api/project\") or addr.startswith(\"/api/project/\"):\n\t\treturn handleApiProject(conn, method, addr, data)\n\tif addr == \"/api/projectsCount\":\n\t\treturn handleApiProjectsCount(conn, method, addr, data)\n\tif (addr == \"/api/getProject\") or addr.startswith(\"/api/getProject?\"):\n\t\treturn handleApiGetProject(conn, method, addr, data)\n\tif (addr == \"/api/getProjects\") or addr.startswith(\"/api/getProjects?\"):\n\t\treturn handleApiGetProjects(conn, method, addr, data)\n\tif addr == \"/api/getProjectsCount\":\n\t\treturn handleApiGetProjectsCount(conn, method, addr, data)\n\tif (addr == \"/api/getProjectCard\") or addr.startswith(\"/api/getProjectCard?\"):\n\t\treturn handleApiGetProjectCard(conn, method, addr, data)\n\tif addr == \"/api/getProject\":\n\t\tcore.sendAnswer(conn, \"400 Bad Request\")\n\t\treturn True\n\treturn False",
"def _make_http_request_read(self, path):\n url = self.url_base + path\n if url not in self._requests_cache:\n self._requests_cache[url] = self._perform_http_request(url)[2]\n return self._requests_cache[url]",
"def _GetPaths(self) -> Dict[str, Dict[Any, Any]]:\n\n # The `Paths Object` `paths` field of the root `OpenAPI Object`.\n paths_obj: DefaultDict[str, Dict[Any, Any]] = collections.defaultdict(dict)\n\n router_methods = self.router.__class__.GetAnnotatedMethods()\n for router_method in router_methods.values():\n # To extract optional path parameters, all the routes associated with this\n # router method must be analysed and grouped.\n ungrouped_routes = []\n for http_method, path, _ in router_method.http_methods:\n path_components = path.split(\"/\")\n # Remove any empty strings from the list of path components.\n path_components = [comp for comp in path_components if comp]\n\n ungrouped_routes.append([http_method] + path_components)\n\n grouped_routes = _GetGroupedRoutes(ungrouped_routes)\n for route_info in grouped_routes:\n # Components (comps) are URL components, including Werkzeug path\n # arguments such as `<client_id>` or `<path:file_path>`.\n route_comps, req_path_param_comps, opt_path_param_comps = route_info\n http_method = route_comps[0]\n path = \"/\".join(route_comps[1:])\n\n # Separate the route parameters into path params, query params and\n # request body params.\n path_params, query_params, body_params = self._SeparateFieldsIntoParams(\n http_method, path, router_method.args_type)\n\n # Separate the path params into required and optional path params.\n # First, extract path param names by normalizing the Werkzeug path arg\n # components to OpenAPI path args and remove the surrounding brackets.\n req_path_param_names = [\n _NormalizePathComponent(comp)[1:-1] for comp in req_path_param_comps\n ]\n opt_path_param_names = [\n _NormalizePathComponent(comp)[1:-1] for comp in opt_path_param_comps\n ]\n req_path_params = []\n opt_path_params = []\n for path_param in path_params:\n path_param_name = casing.SnakeToCamel(path_param.name)\n if path_param_name in req_path_param_names:\n req_path_params.append(path_param)\n elif path_param_name in opt_path_param_names:\n opt_path_params.append(path_param)\n else:\n raise AssertionError(\n f\"Path parameter {path_param_name} was not classified as \"\n f\"required/optional.\")\n\n normalized_path = _NormalizePath(path)\n path_obj = paths_obj[normalized_path]\n path_obj[http_method.lower()] = (\n self._GetOperationDescription(router_method, req_path_params,\n opt_path_params, query_params,\n body_params))\n\n return paths_obj",
"async def rest_handler(request):\n # verify the request\n valid, reason = await verify_rest_request(request)\n if not valid:\n return generate_error(reason, 400)\n json = await request.json()\n # get the parameters\n cmd = json['cmd']\n params = json['params']\n # pass off to the correct target handler\n if cmd == 'find':\n response = await _find_handler(request, params)\n elif cmd == 'stats':\n response = await _stats_handler(request, params)\n elif cmd == 'download':\n response = await _download_handler(request, params)\n elif cmd == 'upload':\n response = await _upload_handler(request, params)\n elif cmd == 'provision':\n response = await _provision_handler(request, params)\n # return the response we get back fgrom the handler\n return response",
"def parse_request(request):\n\n method, path, version = request.split(\"\\r\\n\")[0].split(\" \")\n if method != \"GET\":\n raise NotImplementedError\n return path",
"def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()",
"def Get(self, path):\n\n # try to request\n try:\n request = requests.get(self.config[\"url\"] + \"/weaviate/v1\" + path)\n except urllib.error.HTTPError as error:\n return None, json.loads(error.read().decode('utf-8'))\n\n return request.status_code, request.json()",
"def traverse(object, path, default=None, request=None):",
"def simulate_get(self, path='/', **kwargs) -> _ResultBase:\n return self.simulate_request('GET', path, **kwargs)",
"def _http_get(self, path):\n # Prepare the request path\n if path[0] == '/':\n path = path[1:]\n path = urljoin(self.servlet_path, path)\n\n # Request the end points\n conn = httplib.HTTPConnection(\"localhost\", self.port)\n conn.request(\"GET\", path)\n result = conn.getresponse()\n data = result.read()\n conn.close()\n\n # Convert the response to a string\n return result.status, to_str(data)",
"def _handle_request(self, method, url, handler):\n if not(method in self.handlers):\n handler.set_status(405) # Method Not Allowed\n handler.write({})\n return\n for (path, fn) in self.handlers[method].items():\n if re.match(path, url):\n fn(url, handler)\n return\n handler.set_status(404) # Not Found\n handler.write({})",
"def route_request(self,):\n # Let's parse and prepare url, path, query etc..\n up = urlparse(self.path, 'http')\n self.log_data['url'] = path = up.path\n splitpath = path.split('/')\n if len(splitpath) == 1:\n raise HTTPError(404, 'Not found.')\n self.query = parse_qs(up.query)\n\n # Get the route\n route = self.get_route(self.http_method, path.encode('utf-8'))\n # Parse URL path\n urlvars = self.parse_path(path.encode('utf-8'), route)\n post_raw = None\n # Load POST content if any\n if self.http_method == 'POST':\n # TODO: raise an HTTP error if the content-length is\n # too large.\n try:\n post_raw = self.rfile.read(int(self.headers['Content-Length']))\n except Exception as e:\n logger.exception(str(e))\n logger.debug(self.headers)\n logger.error('Unable to read post data')\n raise HTTPError(400, 'Unable to read post data')\n\n username = None\n checked = False\n\n # Authentication checking out\n\n # 1. Try the auth' by key: if this method is available for this API\n # and 'key' arg exists.\n key = self.headers.get('X-TemBoard-Agent-Key')\n if key:\n logger.debug(\"Authentication by key from header.\")\n elif 'key' in self.query:\n # TODO: Remove auth from query in 8.0\n key = self.query['key'][0]\n logger.debug(\"Authentication by key from argument.\")\n\n if route['check_key'] and key:\n if self.app.config.temboard.key is None:\n raise HTTPError(401, \"Authentication key not configured\")\n if key != self.app.config.temboard.key:\n raise HTTPError(401, \"Invalid key\")\n checked = True\n\n # 2. Check session ID if available and not previously auth'd by key\n if not checked and route['check_session']:\n username = check_sessionid(self.headers, self.sessions)\n checked = True\n\n # 3. At this point, if not yet checked out and auth' by key is\n # available then we need to raise an error because 'key' arg hasn't\n # been passed and auth' by key is the only available method.\n if not checked and route['check_key']:\n raise HTTPError(401, \"Missing key\")\n\n try:\n # Load POST content expecting it is in JSON format.\n if self.http_method == 'POST':\n self.post_json = json.loads(post_raw.decode('utf-8'))\n except Exception as e:\n logger.exception(str(e))\n logger.error('Invalid json format')\n raise HTTPError(400, 'Invalid json format')\n\n http_context = dict(\n headers=self.headers,\n query=self.query,\n post=self.post_json,\n urlvars=urlvars,\n username=username,\n )\n\n # Handle the request\n func = getattr(sys.modules[route['module']], route['function'])\n self.log_data['handler'] = route['module'] + '.' + route['function']\n if route['module'] == 'temboardagent.api':\n # some core APIs need to deal with sessions\n return (200, func(http_context, self.app, self.sessions))\n else:\n # plugin\n return (200, func(http_context, self.app))",
"def traverse(self, path, response=None, validated_hook=None):\n request = self\n request_get = request.get\n if response is None:\n response = self.response\n\n # remember path for later use\n browser_path = path\n\n # Cleanup the path list\n if path[:1] == '/':\n path = path[1:]\n if path[-1:] == '/':\n path = path[:-1]\n clean = []\n for item in path.split('/'):\n # Make sure that certain things that dont make sense\n # cannot be traversed.\n if item in ('REQUEST', 'aq_self', 'aq_base'):\n return response.notFoundError(path)\n if not item or item == '.':\n continue\n elif item == '..':\n del clean[-1]\n else:\n clean.append(item)\n path = clean\n\n # How did this request come in? (HTTP GET, PUT, POST, etc.)\n method = request_get('REQUEST_METHOD', 'GET').upper()\n\n # Probably a browser\n no_acquire_flag = 0\n if method in ('GET', 'POST', 'PURGE') and \\\n not is_xmlrpc_response(response):\n # index_html is still the default method, only any object can\n # override it by implementing its own __browser_default__ method\n method = 'index_html'\n elif method != 'HEAD' and self.maybe_webdav_client:\n # Probably a WebDAV client.\n no_acquire_flag = 1\n\n URL = request['URL']\n parents = request['PARENTS']\n object = parents[-1]\n del parents[:]\n\n self.roles = getRoles(None, None, object, UNSPECIFIED_ROLES)\n\n # if the top object has a __bobo_traverse__ method, then use it\n # to possibly traverse to an alternate top-level object.\n if hasattr(object, '__bobo_traverse__'):\n try:\n new_object = object.__bobo_traverse__(request)\n if new_object is not None:\n object = new_object\n self.roles = getRoles(None, None, object,\n UNSPECIFIED_ROLES)\n except Exception:\n pass\n\n if not path and not method:\n return response.forbiddenError(self['URL'])\n\n # Traverse the URL to find the object:\n if hasattr(object, '__of__'):\n # Try to bind the top-level object to the request\n # This is how you get 'self.REQUEST'\n object = object.__of__(RequestContainer(REQUEST=request))\n parents.append(object)\n\n steps = self.steps\n self._steps = _steps = list(map(quote, steps))\n path.reverse()\n\n request['TraversalRequestNameStack'] = request.path = path\n request['ACTUAL_URL'] = request['URL'] + quote(browser_path)\n\n # Set the posttraverse for duration of the traversal here\n self._post_traverse = post_traverse = []\n\n entry_name = ''\n try:\n # We build parents in the wrong order, so we\n # need to make sure we reverse it when we're done.\n while 1:\n bpth = getattr(object, '__before_publishing_traverse__', None)\n if bpth is not None:\n bpth(object, self)\n\n path = request.path = request['TraversalRequestNameStack']\n # Check for method:\n if path:\n entry_name = path.pop()\n else:\n # If we have reached the end of the path, we look to see\n # if we can find IBrowserPublisher.browserDefault. If so,\n # we call it to let the object tell us how to publish it.\n # BrowserDefault returns the object to be published\n # (usually self) and a sequence of names to traverse to\n # find the method to be published.\n\n # This is webdav support. The last object in the path\n # should not be acquired. Instead, a NullResource should\n # be given if it doesn't exist:\n if no_acquire_flag and \\\n hasattr(object, 'aq_base') and \\\n not hasattr(object, '__bobo_traverse__'):\n\n if (object.__parent__ is not\n aq_inner(object).__parent__):\n from webdav.NullResource import NullResource\n object = NullResource(parents[-2], object.getId(),\n self).__of__(parents[-2])\n\n if IBrowserPublisher.providedBy(object):\n adapter = object\n else:\n adapter = queryMultiAdapter((object, self),\n IBrowserPublisher)\n if adapter is None:\n # Zope2 doesn't set up its own adapters in a lot\n # of cases so we will just use a default adapter.\n adapter = DefaultPublishTraverse(object, self)\n\n object, default_path = adapter.browserDefault(self)\n if default_path:\n request._hacked_path = 1\n if len(default_path) > 1:\n path = list(default_path)\n method = path.pop()\n request['TraversalRequestNameStack'] = path\n continue\n else:\n entry_name = default_path[0]\n elif (method\n and hasattr(object, method)\n and entry_name != method\n and getattr(object, method) is not None):\n request._hacked_path = 1\n entry_name = method\n method = 'index_html'\n else:\n if hasattr(object, '__call__'):\n self.roles = getRoles(\n object, '__call__',\n object.__call__, self.roles)\n if request._hacked_path:\n i = URL.rfind('/')\n if i > 0:\n response.setBase(URL[:i])\n break\n step = quote(entry_name)\n _steps.append(step)\n request['URL'] = URL = f'{request[\"URL\"]}/{step}'\n\n try:\n subobject = self.traverseName(object, entry_name)\n if hasattr(object, '__bobo_traverse__') or \\\n hasattr(object, entry_name):\n check_name = entry_name\n else:\n check_name = None\n\n self.roles = getRoles(\n object, check_name, subobject,\n self.roles)\n object = subobject\n # traverseName() might raise ZTK's NotFound\n except (KeyError, AttributeError, ztkNotFound):\n if response.debug_mode:\n return response.debugError(\n \"Cannot locate object at: %s\" % URL)\n else:\n return response.notFoundError(URL)\n except Forbidden as e:\n if self.response.debug_mode:\n return response.debugError(e.args)\n else:\n return response.forbiddenError(entry_name)\n\n parents.append(object)\n\n steps.append(entry_name)\n finally:\n parents.reverse()\n\n # Note - no_acquire_flag is necessary to support\n # things like DAV. We have to make sure\n # that the target object is not acquired\n # if the request_method is other than GET\n # or POST. Otherwise, you could never use\n # PUT to add a new object named 'test' if\n # an object 'test' existed above it in the\n # hierarchy -- you'd always get the\n # existing object :(\n if no_acquire_flag and \\\n hasattr(parents[1], 'aq_base') and \\\n not hasattr(parents[1], '__bobo_traverse__'):\n base = aq_base(parents[1])\n if not hasattr(base, entry_name):\n try:\n if entry_name not in base:\n raise AttributeError(entry_name)\n except TypeError:\n raise AttributeError(entry_name)\n\n # After traversal post traversal hooks aren't available anymore\n del self._post_traverse\n\n request['PUBLISHED'] = parents.pop(0)\n\n # Do authorization checks\n user = groups = None\n i = 0\n\n if 1: # Always perform authentication.\n\n last_parent_index = len(parents)\n if hasattr(object, '__allow_groups__'):\n groups = object.__allow_groups__\n inext = 0\n else:\n inext = None\n for i in range(last_parent_index):\n if hasattr(parents[i], '__allow_groups__'):\n groups = parents[i].__allow_groups__\n inext = i + 1\n break\n\n if inext is not None:\n i = inext\n v = getattr(groups, 'validate', old_validation)\n\n auth = request._auth\n\n if v is old_validation and self.roles is UNSPECIFIED_ROLES:\n # No roles, so if we have a named group, get roles from\n # group keys\n if hasattr(groups, 'keys'):\n self.roles = list(groups.keys())\n else:\n try:\n groups = groups()\n except Exception:\n pass\n try:\n self.roles = list(groups.keys())\n except Exception:\n pass\n\n if groups is None:\n # Public group, hack structures to get it to validate\n self.roles = None\n auth = ''\n\n if v is old_validation:\n user = old_validation(groups, request, auth, self.roles)\n elif self.roles is UNSPECIFIED_ROLES:\n user = v(request, auth)\n else:\n user = v(request, auth, self.roles)\n\n while user is None and i < last_parent_index:\n parent = parents[i]\n i = i + 1\n if hasattr(parent, '__allow_groups__'):\n groups = parent.__allow_groups__\n else:\n continue\n if hasattr(groups, 'validate'):\n v = groups.validate\n else:\n v = old_validation\n if v is old_validation:\n user = old_validation(\n groups, request, auth, self.roles)\n elif self.roles is UNSPECIFIED_ROLES:\n user = v(request, auth)\n else:\n user = v(request, auth, self.roles)\n\n if user is None and self.roles != UNSPECIFIED_ROLES:\n response.unauthorized()\n\n if user is not None:\n if validated_hook is not None:\n validated_hook(self, user)\n request['AUTHENTICATED_USER'] = user\n request['AUTHENTICATION_PATH'] = '/'.join(steps[:-i])\n\n # Remove http request method from the URL.\n request['URL'] = URL\n\n # Run post traversal hooks\n if post_traverse:\n result = exec_callables(post_traverse)\n if result is not None:\n object = result\n\n return object",
"def __call__(self, request, *args):\n http_method = request.method.lower()\n if http_method == 'head':\n http_method = 'get'\n try:\n handler_method = getattr(self, http_method)\n except:\n raise HTTPMethodNotAllowed()\n response = handler_method(request, *args)\n if isinstance(response, str):\n response = HTTPResponse(response)\n if request.method.lower() == 'head':\n response = HTTPResponse('', response.status, response.content_type, response.charset, response.headerlist)\n return response",
"def simulate_get(self, path='/', **kwargs):\n return self.simulate_request('GET', path, **kwargs)",
"def handle_request(self, method_name, app_prefix, path, payload=None):\n path = self.__get_path(app_prefix, path)\n method = getattr(self._session, method_name)\n return ReselloResponse(method(path, json=payload))",
"def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json",
"def dispatch(environ, start_response):\n url_path = environ['PATH_INFO']\n print environ['PATH_INFO']\n if(url_path == '/alarms'):\n content = app.alarms(environ, start_response)\n\treturn content\n if(url_path == '/enodes'):\n content = app.enodeb(environ, start_response)\n return content\n if(url_path == '/perf'):\n content = app.perf(environ, start_response)\n return content\n if(url_path == '/hoa_son'):\n content = app.hoa_son(environ, start_response)\n return content\n if(url_path == '/hoa_w_son'):\n content = app.hoa_w_son(environ, start_response)\n return content\n if(url_path == '/anrs'):\n content = app.ANR(environ, start_response)\n return content\n if(url_path == '/post'):\n content = app.post(environ, start_response)\n return content\n else:\n\tcontent = app.application2(environ,start_response)\n\treturn content"
] | [
"0.6210815",
"0.61685705",
"0.61090153",
"0.60661197",
"0.5938361",
"0.5854154",
"0.5819774",
"0.5761749",
"0.5718614",
"0.5701811",
"0.56692934",
"0.56031966",
"0.5580705",
"0.55577755",
"0.5556755",
"0.5551802",
"0.55436087",
"0.55363166",
"0.5508227",
"0.5494905",
"0.54924154",
"0.54882497",
"0.5469903",
"0.5438001",
"0.54305565",
"0.54271555",
"0.54239976",
"0.5420127",
"0.5418208",
"0.54018426"
] | 0.62466556 | 0 |
Generate HTTP headers from passed HTTP response code | def gen_headers(self, http_code):
if http_code == 200:
http_headers = "HTTP/1.1 200 OK\n"
elif http_code == 400:
http_headers = "HTTP/1.1 400 Bad Request\n"
elif http_code == 404:
http_headers = "HTTP/1.1 404 Not Found\n"
utc_datetime = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S")
http_headers += dedent("""\
Date: %s GMT
Content-type: text/html; charset=UTF-8
Server: pydb.py
Connection: close\n\n""" % utc_datetime)
return http_headers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_response(message, mimetype, code=\"OK 200\"):\n\n if not isinstance(message, bytes):\n message = message.encode('utf-8')\n bytelength = len(message)\n header_list = []\n header_list.append('HTTP/1.1 %s \\r\\n' % code)\n header_list.append('Date: %s \\r\\n' % str(formatdate(usegmt=True)))\n header_list.append('Server: Team Python\\r\\n')\n header_list.append('Content-Type: %s; char=UTF-8\\r\\n' % mimetype)\n header_list.append('Content-Length: %s \\r\\n' % bytelength)\n header_list.append('\\r\\n%s' % message)\n header = ''.join(header_list)\n return header",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):",
"def http_response(status_code: int) -> Tuple[dict, int]:\n return ({'message': HTTP_STATUS_CODES.get(status_code, '')}, status_code)",
"def build_http_response(status_code: int,\n protocol_version: bytes = b'HTTP/1.1',\n reason: Optional[bytes] = None,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n line = [protocol_version, bytes_(status_code)]\n if reason:\n line.append(reason)\n if headers is None:\n headers = {}\n if body is not None and not any(\n k.lower() == b'content-length' for k in headers):\n headers[b'Content-Length'] = bytes_(len(body))\n return build_http_pkt(line, headers, body)",
"def build_http_response(status_code: int,\n protocol_version: bytes = HTTP_1_1,\n reason: Optional[bytes] = None,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n line = [protocol_version, bytes_(status_code)]\n if reason:\n line.append(reason)\n if headers is None:\n headers = {}\n has_content_length = False\n has_transfer_encoding = False\n for k in headers:\n if k.lower() == b'content-length':\n has_content_length = True\n if k.lower() == b'transfer-encoding':\n has_transfer_encoding = True\n if body is not None and \\\n not has_transfer_encoding and \\\n not has_content_length:\n headers[b'Content-Length'] = bytes_(len(body))\n return build_http_pkt(line, headers, body)",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def responseheaders(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def _construct_headers(error_type):\n return {\"x-amzn-errortype\": error_type, \"Content-Type\": \"application/json\"}",
"def get_response_headers(self, *args, **kwargs):\n if self.response_headers:\n return self._unpack_headers(self.response_headers)",
"def prepare_response(code, result=None, resp_type=None):\n body = result if result else std_welcome\n msg = 'OK' if str(code).startswith('2') else 'NOT OK'\n resp_type = resp_type if resp_type else 'application/json'\n\n return {\n 'statusCode': code,\n 'statusDescription': '{0} {1}'.format(code, msg),\n 'isBase64Encoded': False,\n 'body': '{}\\n'.format(body),\n 'headers': {\n 'Content-Type': '{}; charset=utf-8'.format(resp_type)\n }\n }",
"def writeHeaders(code, headers):\n pass",
"def response_headers():\n # Pending swaggerUI update\n # https://github.com/swagger-api/swagger-ui/issues/3850\n headers = MultiDict(request.args.items(multi=True))\n response = jsonify(list(headers.lists()))\n\n while True:\n original_data = response.data\n d = {}\n for key in response.headers.keys():\n value = response.headers.get_all(key)\n if len(value) == 1:\n value = value[0]\n d[key] = value\n response = jsonify(d)\n for key, value in headers.items(multi=True):\n response.headers.add(key, value)\n response_has_changed = response.data != original_data\n if not response_has_changed:\n break\n return response",
"def writeIntermediateResponse(code, headers=None):\n pass",
"def response_headers(self, extra_headers=None):\n headers_copy = self.headers.copy()\n\n if extra_headers:\n headers_copy.update(extra_headers)\n\n headers = \"\"\n\n for h in headers_copy:\n headers += \"%s: %s\\r\\n\" % (h, headers_copy[h])\n\n return headers.encode()",
"def _build_http_header(self) -> Dict[str, str]:\n return {}",
"def get_header_start(response_status):\n return '{protocol}{space}{status[0]}{space}{status[1]}'.format(\n protocol=http_protocol_version,\n space=l_s,\n status=response_status\n )",
"def _http_response(response, http_status_code):\n return make_response(jsonify(response), http_status_code)",
"async def with_code_header():\n return jsonify(language=request.headers.get(\"Lang\")), 203, {\"X\": 233}",
"def _ParseHTTPHeaders(self, http_headers_data, offset, display_name):\n header_string = http_headers_data.decode('ascii', errors='replace')\n\n try:\n http_header_start = header_string.index('request-method')\n except ValueError:\n logger.debug('No request method in header: \"{0:s}\"'.format(header_string))\n return None, None\n\n # HTTP request and response headers.\n http_headers = header_string[http_header_start::]\n\n header_parts = http_headers.split('\\x00')\n\n # TODO: check len(header_parts).\n request_method = header_parts[1]\n\n if request_method not in self._REQUEST_METHODS:\n logger.debug((\n '[{0:s}] {1:s}:{2:d}: Unknown HTTP method \\'{3:s}\\'. Response '\n 'headers: \\'{4:s}\\'').format(\n self.NAME, display_name, offset, request_method, header_string))\n\n try:\n response_head_start = http_headers.index('response-head')\n except ValueError:\n logger.debug('No response head in header: \"{0:s}\"'.format(header_string))\n return request_method, None\n\n # HTTP response headers.\n response_head = http_headers[response_head_start::]\n\n response_head_parts = response_head.split('\\x00')\n\n # Response code, followed by other response header key-value pairs,\n # separated by newline.\n # TODO: check len(response_head_parts).\n response_head_text = response_head_parts[1]\n response_head_text_parts = response_head_text.split('\\r\\n')\n\n # The first line contains response code.\n # TODO: check len(response_head_text_parts).\n response_code = response_head_text_parts[0]\n\n if not response_code.startswith('HTTP'):\n logger.debug((\n '[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. '\n 'Response headers: \\'{3:s}\\'.').format(\n self.NAME, display_name, offset, header_string))\n\n return request_method, response_code",
"def create_mock_response(self, status_code, headers=None, body_type='JSON', body=None):\n rsp = {}\n rsp['statusCode'] = int(status_code)\n\n if headers:\n rsp['headers'] = []\n\n for key, value in headers.items():\n header = {'name': key, 'values': value.split(\",\")}\n rsp['headers'].append(header)\n logger.debug(\"Add header - header: {}\".format(header))\n\n if body_type == 'JSON' and body:\n rsp['body'] = json.dumps(body)\n\n return rsp",
"def response_headers(self) -> dict:\n headers = {}\n if self.version_header:\n headers[self.version_header] = self.api_version\n return headers",
"def get_headers():\n if not headers:\n headers[\"Content-Type\"] = \"application/json\"\n headers[\"Accept\"] = \"application/json\"\n headers[\"User-Agent\"] = constants.USER_AGENT\n headers[\"Authorization\"] = get_token(constants.AUTH_URL, cfg[\"key\"])\n\n return headers\n\n return headers",
"def _get_response_message(code=200, reason=None):\n return {'reason': reason}, code",
"def get_headers(self):\n # Creating headers.\n headers = {'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, sdch, br',\n 'accept-language': 'en-GB,en;q=0.8,en-US;q=0.6,ml;q=0.4',\n 'cache-control': 'max-age=0',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}\n return headers",
"def _response(status_line):\n return b\"HTTP/1.1 \" + status_line + b\"\\nContent-length: 0\\n\\n\"",
"def create_501_response() -> bytes:\n date = datetime.datetime.now(datetime.timezone.utc).strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\n header = \"HTTP/1.1 501 Not Implemented\" + \"\\r\\nDate: \" + date + \"\\r\\n\\r\\n\"\n print(header)\n return header.encode(HttpServer.FORMAT)",
"def get_response_status_header(response: requests.Response) -> str:\n if hasattr(response, 'headers'):\n return response.headers.get(RESPONSE_STATUS_HEADER, '')\n return ''",
"def _get_headers(content_type):\n headers = {'x-amz-acl': 'public-read',\n 'Content-Type': content_type,\n 'Cache-Control': 'public,max-age=31536000'}\n return headers",
"def __updater_headers(self, path, req_from_updater):\n try:\n self.logger.info(\"Request from account-updater\")\n info = self.get_cont_stat(path, req_from_updater)\n if not isinstance(info, types.DictType):\n raise info()\n headers = HeaderKeyDict({\n 'X-Container-Object-Count': info['object_count'],\n 'X-Container-Bytes-Used': info['bytes_used'],\n 'X-DELETE-Timestamp': info['delete_timestamp'],\n 'X-PUT-Timestamp': info['put_timestamp'],\n 'X-Container' : info['container']\n })\n return headers\n except HTTPException as error:\n self.logger.exception(error)\n return error.status_int\n except Exception as err:\n self.logger.exception(err)\n return HTTP_INTERNAL_SERVER_ERROR",
"def clean_headers(status):\r\n import cherrypy\r\n \r\n response = cherrypy.response\r\n \r\n # Remove headers which applied to the original content,\r\n # but do not apply to the error page.\r\n respheaders = response.headers\r\n for key in [\"Accept-Ranges\", \"Age\", \"ETag\", \"Location\", \"Retry-After\",\r\n \"Vary\", \"Content-Encoding\", \"Content-Length\", \"Expires\",\r\n \"Content-Location\", \"Content-MD5\", \"Last-Modified\"]:\r\n if respheaders.has_key(key):\r\n del respheaders[key]\r\n \r\n if status != 416:\r\n # A server sending a response with status code 416 (Requested\r\n # range not satisfiable) SHOULD include a Content-Range field\r\n # with a byte-range-resp-spec of \"*\". The instance-length\r\n # specifies the current length of the selected resource.\r\n # A response with status code 206 (Partial Content) MUST NOT\r\n # include a Content-Range field with a byte-range- resp-spec of \"*\".\r\n if respheaders.has_key(\"Content-Range\"):\r\n del respheaders[\"Content-Range\"]"
] | [
"0.7076088",
"0.6702638",
"0.65351945",
"0.6456887",
"0.64418685",
"0.63103044",
"0.63103044",
"0.6244145",
"0.62058735",
"0.61695415",
"0.61634386",
"0.61449164",
"0.6127177",
"0.6060533",
"0.60463715",
"0.6001048",
"0.5998467",
"0.5989219",
"0.59759045",
"0.5963978",
"0.595876",
"0.59570295",
"0.5948188",
"0.594328",
"0.59357363",
"0.5935283",
"0.5893474",
"0.58889914",
"0.58851045",
"0.5879226"
] | 0.83281934 | 0 |
Make an instance of DBServer on the passed IP and port | def make_dbserver(server_address, server_port):
server = DBServer(server_address, server_port)
return server | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,dbname,server_ip,server_port):\n self.dbname = dbname\n self.con = sqlite3.connect(dbname)\n self.cur = self.con.cursor()\n self.server_ip=server_ip\n self.server_port=server_port\n\n # The access method from server initialization\n self.server = SimpleXMLRPCServer((server_ip, int(server_port)))\n logging.info('Server started on %s %s', server_ip, server_port)",
"def run_server(host='0.0.0.0', port=8080, database_url='mongodb://0.0.0.0:27017/'):\n f = furl(database_url)\n db, db_host, db_port = f.scheme, f.host, f.port \n app._db_driver = db_drivers[db].Driver(db_host, db_port)\n app.run(host=host, port=port)",
"def setup_server(port=0, verbose=False):\r\n\r\n host = gethostname()\r\n sock = socket(AF_INET, SOCK_STREAM)\r\n try:\r\n sock.bind((host, port))\r\n except error as msg:\r\n raise error(\"Could not open Socket on server: \" + str(msg))\r\n sock.listen(5) # max num of queued connections usually [1..5]\r\n if verbose:\r\n print \"Server listening on %s\" % str(sock.getsockname())\r\n return sock",
"def __init__(self, server_addr, server_port):",
"def create_server(\n handle_event: EventCallback,\n host: str = \"0.0.0.0\",\n port: int = 0,\n ssl_context: Optional[SSLContext] = None,\n) -> Server:\n return Server(handle_event, host, port, ssl_context)",
"def _create_server_socket(server_ip: str, server_port: int) -> socket.socket:\r\n\ttry:\r\n\t\tserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\tserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\tserver_socket.bind((server_ip, server_port))\r\n\t\tserver_socket.listen(MAX_CONNECTION)\r\n\texcept Exception as e:\r\n\t\t_logger.error(\"Exception occured while creating server socket: \" \\\r\n\t\t\t+ str(e))\r\n\t\treturn None\r\n\telse:\r\n\t\treturn server_socket",
"def __init__(self, dbname: str, dbuser: str, dbpassword: str,\n dbhost: str = \"127.0.0.1\", dbport: int = 5432):\n self.dbname = dbname\n self.dbuser = dbuser\n self.dbpassword = dbpassword\n self.dbhost = dbhost\n self.dbport = dbport\n self._connection = None",
"def __init__(self, user, instance, database, password=None,\n port=3306):\n if password:\n self._engine = create_engine(\n \"mysql+mysqldb://%s:%s@%s:%s/%s\" %\n (user, password, instance, port, database), poolclass=NullPool)\n else:\n self._engine = create_engine(\n \"mysql+mysqldb://%s@%s:%s/%s\" %\n (user, instance, port, database), poolclass=NullPool)",
"def createServer():\n cd('/')\n srv = cmo.createServer(managedServername) \n srv.setCluster(getMBean('/Clusters/%s' % cluster_name))\n srv.setListenPort(managedServerPort)\n return srv",
"def __init__(self, database, host='127.0.0.1', user='root', password='', port='3306', tenant_id=None):\n\n self.HOST = host\n self.USER = user\n self.PASSWORD = password\n self.PORT = port\n self.DATABASE = f'{tenant_id}_{database}' if tenant_id else database\n \n logging.info(f'Host: {self.HOST}')\n logging.info(f'User: {self.USER}')\n logging.info(f'Password: {self.PASSWORD}')\n logging.info(f'Port: {self.PORT}')\n logging.info(f'Database: {self.DATABASE}')\n\n self.connect()",
"def __init__(self, db_name: str, db_user: str, db_password: str, db_host: str, *args, **kwargs):\n self.db_name = db_name\n self.db_user = db_user\n self.db_password = db_password\n self.db_host = db_host\n self.dsn = f\"dbname={self.db_name} user={self.db_user} password={self.db_password} host={self.db_host}\"",
"def __init__(self, db_name: str, db_user: str, db_password: str, db_host: str, *args, **kwargs):\n self.db_name = db_name\n self.db_user = db_user\n self.db_password = db_password\n self.db_host = db_host\n self.dsn = f\"dbname={self.db_name} user={self.db_user} password={self.db_password} host={self.db_host}\"",
"def create(cls, application, **kwargs):\n host, port = get_free_port()\n if 'port' not in kwargs:\n kwargs['port'] = port\n if 'host' not in kwargs:\n kwargs['host'] = host\n if 'expose_tracebacks' not in kwargs:\n kwargs['expose_tracebacks'] = True\n server = cls(application, **kwargs)\n server.runner = threading.Thread(target=server.run)\n server.runner.daemon = True\n server.runner.start()\n return server",
"def createNewFileServer(ip, port):\n session = Queries.createSession()\n try:\n new_fs = FileServer(ip, port, 'ONLINE')\n session.add(new_fs)\n session.commit()\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()",
"def __init__(__self__, *,\n database: pulumi.Input[str],\n host: pulumi.Input[str],\n port: pulumi.Input[float]):\n pulumi.set(__self__, \"database\", database)\n pulumi.set(__self__, \"host\", host)\n pulumi.set(__self__, \"port\", port)",
"def server():\n\n server = client.Server(host=host, auth=auth)\n try:\n server.delete_db(test_db_name)\n except excepts.DBNotExists:\n pass\n return server",
"def __init__(self, host, port, user, passwd, db):\n self.host = host\n self.port = port\n self.user = user\n self.passwd = passwd\n self.db = db\n self.cursor = None\n self.database = None",
"def make_server(host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler):\n server = server_class((host, port), handler_class)\n server.set_app(app)\n return server",
"def init_server(host, listen_port, backlog):\r\n tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n tcp_server.bind((host, listen_port))\r\n tcp_server.listen(backlog)\r\n return tcp_server",
"def bind_server(self):\n self.MAIN_CONNECTION.bind((self.HOST, self.PORT))",
"def create_test_port(**kw):\n port = get_test_port(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del port['id']\n dbapi = db_api.get_instance()\n return dbapi.create_port(port)",
"def __init__(self, host='localhost', port=5432, dbname=None, user='www',\n password=None):\n # Set the DSN\n dsn = \"host='%s' port=%i dbname='%s' user='%s'\" % \\\n (host, port, dbname, user)\n\n # Add the password if specified\n if password:\n dsn += \" password='%s'\" % password\n\n # Generate a connection hash to keep a global instance of this\n # connection with\n self._connection_hash = _generate_connection_hash(dsn)\n\n # Attempt to get a cached connection from our module level pool\n self._pgsql = _get_cached_connection(self._connection_hash)\n\n # If we got a result, just log our success in doing so\n if self._pgsql:\n logger.debug(\"Re-using cached connection: %s\",\n self._connection_hash)\n\n # Create a new PostgreSQL connection and cache it\n else:\n # Connect to a PostgreSQL daemon\n logger.info(\"Connecting to %s:%i:%s as %s\",\n host, port, dbname, user)\n self._pgsql = pg_connect(dsn)\n\n # Commit after every command\n self._pgsql.set_isolation_level(\n extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n logger.info('Connected to PostgreSQL')\n\n # Add the connection to our module level pool\n _add_cached_connection(self._connection_hash, self._pgsql)\n\n # Always get a new cursor\n self._cursor = self._pgsql.cursor(cursor_factory=extras.DictCursor)",
"def start(self):\n\n self.server = SSHTunnelForwarder(\n ssh_address_or_host=self.ssh_host,\n ssh_username=self.dbname,\n ssh_pkey='~/.ssh/id_rsa',\n remote_bind_address=('127.0.0.1', 3306),\n logger=create_logger(loglevel=0))\n\n self.server.start()\n\n local_port=str(self.server.local_bind_port)\n\n self.engine = create_engine('{}://{}:{}@127.0.0.1:{}/{}?charset=utf8mb4'.format(self.db_engine, self.db_user, self.db_pass, local_port, self.db_name))\n self.conn = self.engine.connect()",
"def make_postgres(host, port, database, user, password):\n if host is None:\n return NoDb()\n return psycopg2.connect(host=host, port=port, database=database, user=user, password=password)",
"def setup_server(self):\n\n\t\tparser = argparse.ArgumentParser()\n\t\tip = socket.gethostbyname(socket.gethostname())\n\t\tparser.add_argument(\"--ip\",\n\t\t\t\t\t\t\tdefault=\"127.0.0.1\",\n\t\t\t\t\t\t\thelp=\"The ip to listen on\")\n\t\tparser.add_argument(\"--port\",\n\t\t\t\t\t\t\ttype=int,\n\t\t\t\t\t\t\tdefault=5000,\n\t\t\t\t\t\t\thelp=\"The port to listen on\")\n\t\targs = parser.parse_args()\n\n\t\tdispatcher = dp.Dispatcher()\n\t\tdispatcher.map(\"/debug\", print)\n\t\tdispatcher.map(\"/muse/eeg\", lambda addr, args, ch1, ch2, ch3, ch4, ch5,\n\t\t\t\t\t ch6: self.eeg_handler(addr, args, ch1, ch2, ch3, ch4, ch5, ch6), \"EEG\")\n\n\t\tserver = osc_server.ThreadingOSCUDPServer(\n\t\t\t(args.ip, args.port), dispatcher)\n\t\tserver.socket.setblocking(0)\n\t\tprint(\"Serving on {}\".format(server.server_address))\n\t\treturn server",
"def connect(self, host, port, db):\r\n params = self.make_connection_params(host, port, db)\r\n return self.get_connection(params)",
"def create(addr='127.0.0.1', port=0, options=None):\n if options is None:\n options = {}\n\n backend = MitmProxy(addr, port, options)\n\n t = threading.Thread(name='Selenium Wire Proxy Server', target=backend.serve_forever)\n t.daemon = not options.get('standalone')\n t.start()\n\n addr, port, *_ = backend.address()\n log.info('Created proxy listening on %s:%s', addr, port)\n\n return backend",
"def create_server(self, bind_options: tuple, ssl_options: tuple) -> Net:\n\n # Load SSL.\n self._load_ssl(ssl_options)\n\n # Create the server.\n host, port = bind_options\n self._server = yield from self._event_loop.create_server(self.butterfly_factory, host=host, port=port,\n ssl=self._ssl)\n # Create the Net.\n # Use the default net.\n self.net = self.default_net(ip=host, port=port, loop=self._event_loop, server=self._server)\n self.net._set_bf_handler(self)\n # Create a signal handler.\n if sys.platform != \"win32\":\n self._event_loop.add_signal_handler(15, self.stop)\n return self.net",
"def server(host, port, debug):\n run_server(host, port, debug)",
"def _new_server(self):\n server_address = mock.MagicMock()\n return epdb_server.InvertedTelnetServer(server_address)"
] | [
"0.6800187",
"0.66241175",
"0.6283851",
"0.62752354",
"0.6243701",
"0.61155516",
"0.60950834",
"0.6092231",
"0.60324806",
"0.6025268",
"0.59985566",
"0.59985566",
"0.5993353",
"0.5992748",
"0.59832615",
"0.5982069",
"0.5977922",
"0.5953338",
"0.59488934",
"0.592483",
"0.5912511",
"0.59042615",
"0.5885402",
"0.58792496",
"0.58770794",
"0.58606935",
"0.58483565",
"0.58318377",
"0.5822241",
"0.58108157"
] | 0.83336544 | 0 |
Resets the position of the turtle to its starting position | def reset_position(self):
self.goto(STARTING_POSITION) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0",
"def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos",
"def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()",
"def reset(self):\n self._position = TwoDV(0.0, 0.0)\n self._orient = TNavigator.START_ORIENTATION[self._mode]",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal",
"def goto(x, y):\n turtleTmp.setposition(x, y)",
"def reset(self):\n self.x_pos = 10\n self.y_pos = 10\n self.line_height = 15",
"def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n try:\n self._turtle.shape('pen.gif')\n except:\n self._turtle.shape('classic')\n self._turtle.color('red')\n self.speed = 0\n \n #pair = self._turtle.color()\n self._pencolor = self._turtle.color()[0]\n self._fillcolor = self._turtle.color()[0]",
"def reset(self):\n TNavigator.reset(self)\n TPen._reset(self)\n self._clear()\n self._drawturtle()\n self._update()",
"def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8",
"def reset_position(self):\n self.set_position(copy.deepcopy(self.ab_pos))",
"def init_turtle():\n turtle.up()\n turtle.home()",
"def reset(self):\r\n self.x = self.initX\r\n self.y = self.initY\r\n self.dir= self.initDir",
"def reset(self):\n self._x = 0\n self._y = 0",
"def reset(self):\n self.position = self.initial_position\n self.velocity = [0, 0, 0]",
"def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))",
"def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))",
"def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))",
"def resetPos(self):\n self.angle = self.startangle\n self.pos = []\n self.pos.extend(self.startpos)",
"def reset_pos(self):\n\n return self.pos(1, 1)",
"def reset_movement(self):\n self.direction = [0, 0]",
"def set_position(self, x, y):\n self.tx = -x\n self.ty = -y",
"def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)",
"def reset(self):\n self.x_pos1 = 0\n self.x_pos2 = self.x_pos1 + self.width\n self.y_pos = self.offset_y\n self.velocity = self.origin_velocity",
"def reset_pos(self):\n self.rect.y = random.randrange(-1000, -10)\n self.rect.x = random.randrange(0, WIDTH)",
"def reset(self):\r\n self.body = [[int(self.x_pos/2), int(self.y_pos/2)]] # initial snake starts at center of screen\r\n self.direction = \"UP\"\r\n self.length = 1\r\n self.alive = True\r\n self.speed = 10",
"def reset_pos(self):\n self.rect.y = random.randrange(-300, -20)\n self.rect.x = random.randrange(0, SCREEN_WIDTH)",
"def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')",
"def move_turtle(self):\n self.forward(self.move_speed)"
] | [
"0.77305067",
"0.73408115",
"0.72342724",
"0.7171275",
"0.7132374",
"0.7116628",
"0.6990136",
"0.6955724",
"0.6947348",
"0.68602973",
"0.68442464",
"0.6803416",
"0.6747056",
"0.6746576",
"0.6714489",
"0.66705287",
"0.66629267",
"0.66629267",
"0.66629267",
"0.6659177",
"0.6634175",
"0.65830076",
"0.65258086",
"0.6511597",
"0.64975655",
"0.6470824",
"0.64638424",
"0.6420939",
"0.6386762",
"0.637784"
] | 0.79360133 | 0 |
This is the initialization parser. Converts a line from a PDB file and initializes the PDB_atom's variables to appropriate values. Uses implicit typecasting as a failcheck for parsing the file correctly. Well formatted PDB files are 80 characters across and are well defined/easy to parse. Sadly not all PDB files are like this, so we use a heuristic to parse PDB files which are not 80 characters across. INPUT | def parse(self, line):
# remove trailing newline a-la Perl CHOMP
line = line.rstrip("\n")
# correctly formatted PDB files
# TODO - assuming 80 chars means well formatted is
# perhaps risky. Need a more robust way to asses
# formatting validity
if len(line) == 80:
self.record_name = line[0:6].strip()
self.atom_id = int(line[6:11].strip())
self.atom_name = line[12:16].strip()
self.alt_location = line[16]
self.res_name = line[17:20].strip()
self.chain = line[21]
self.res_id = line[22:26].strip()
self.res_ins_code = line[26]
self.coord_X = float(line[30:38].strip())
self.coord_Y = float(line[38:46].strip())
self.coord_Z = float(line[46:54].strip())
self.occupancy = float(line[54:60].strip())
self.beta = float(line[60:66].strip())
self.seg_ID = line[72:76].strip()
self.element = line[76:78].strip()
if line[78:80].strip() == "":
self.charge=0.0
else:
self.charge = float(line[78:80].strip())
self.chain_local_id = -1
self.formatted_ok = True
# Heuristic section - split by space and then use
# errors in casting as flags for things being issues
# Note this may need to be expanded as malformed edge-cases
# are identified...
else:
rawsplitline = filter(None, line.split(" "))
splitline = []
for i in rawsplitline:
if i == "\n" or i == "\t":
pass
else:
splitline.append(i)
num_cols = len(splitline)
print num_cols
try:
if num_cols == 10:
self.record_name = splitline[0]
self.atom_id = int(splitline[1])
self.atom_name = splitline[2]
self.alt_location = ""
self.res_name = splitline[3]
self.chain = ""
self.res_id = int(splitline[4])
self.res_ins_code = ""
self.coord_X = float(splitline[5])
self.coord_Y = float(splitline[6])
self.coord_Z = float(splitline[7])
self.occupancy = float(splitline[8])
self.beta = float(splitline[9])
self.seg_ID = " "
self.element = " "
self.charge = " "
self.chain_local_id = -1
self.formatted_ok = False
elif num_cols == 11:
self.record_name = splitline[0]
self.atom_id = int(splitline[1])
self.atom_name = splitline[2]
self.alt_location = " "
self.res_name = splitline[3]
self.chain = splitline[4]
self.res_id = int(splitline[5])
self.res_ins_code = " "
self.coord_X = float(splitline[6])
self.coord_Y = float(splitline[7])
self.coord_Z = float(splitline[8])
self.occupancy = float(splitline[9])
self.beta = float(splitline[10])
self.seg_ID = " "
self.element = " "
self.charge = " "
self.chain_local_id = -1
self.formatted_ok = False
elif num_cols == 12:
self.record_name = splitline[0]
self.atom_id = int(splitline[1])
self.atom_name = splitline[2]
self.alt_location = " "
self.res_name = splitline[3]
self.chain = splitline[4]
self.res_id = int(splitline[5])
self.res_ins_code = " "
self.coord_X = float(splitline[6])
self.coord_Y = float(splitline[7])
self.coord_Z = float(splitline[8])
self.occupancy = float(splitline[9])
self.beta = float(splitline[10])
self.seg_ID = " "
self.element = splitline[11]
self.charge = " "
self.chain_local_id = -1
self.formatted_ok = False
else:
raise PDB_atomException("Did not match number of columns")
except ValueError,e:
print "Error with columns (using " + str(num_cols) + ") columns"
print "Tried to cast string to int/float"
raise e | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, pdb_file):\n self.pdb_file = pdb_file\n self.content = None\n self.lines = None\n self.atom_section = None\n self.conect_section = None\n self.read_all()",
"def parse_pdb(self, line):\n super().parse_pdb(line)\n self.num_remark = int(line[10:15].strip())\n self.num_het = int(line[20:25].strip())\n self.num_helix = int(line[25:30].strip())\n self.num_sheet = int(line[30:35].strip())\n self.num_turn = int(line[35:40].strip())\n self.num_site = int(line[40:45].strip())\n self.num_xform = int(line[45:50].strip())\n self.num_coord = int(line[50:55].strip())\n self.num_ter = int(line[55:60].strip())\n self.num_conect = int(line[60:65].strip())\n self.num_seq = int(line[65:70].strip())",
"def __init__(self, input_pdb, autodock, autodock_2, psiblast_path, nr_path):\n self.home = os.getcwd()\n self.autodock = \"MGL=\" + autodock + \"\\n\"\n self.autodock_2 = \"ADT=\" + autodock_2 + \"\\n\"\n self.psiblast_path = psiblast_path\n self.nr_path = nr_path\n self.pdb_path = input_pdb\n self.pdb_name = input_pdb[0:-4]\n self.complex_name = input_pdb[0:-4] + \"_complex\"\n self.chains = self.pdb_name.split(\"_\")[1]\n self.simple_name_A = self.pdb_name + \"_\" + self.chains[0] + \".pdb\"\n self.simple_name_B = self.pdb_name + \"_\" + self.chains[1] + \".pdb\"\n self.complex_name_A = self.complex_name + \"_\" + self.chains[0] + \".pdb\"\n self.complex_name_B = self.complex_name + \"_\" + self.chains[1] + \".pdb\"",
"def parse_pdb_atom(line):\n\tattrs = dict()\n\n\tfor begin, end, type_, fieldname in PDB_ATOM_LINE_FIELDS:\n\t\tstrval = line[begin:end].strip()\n\n\t\tif strval:\n\t\t\tval = type_(strval)\n\t\telse:\n\t\t\tval = None\n\n\t\tattrs[fieldname] = val\n\n\t# Combine x, y, z, field values into a single attribute\n\tattrs['coord'] = Vector3(attrs['x'], attrs['y'], attrs['z'])\n\tdel attrs['x'], attrs['y'], attrs['z']\n\n\treturn PDBAtom(**attrs)",
"def __init__(self, inFilename):\n\n self._prmtopVersion=None\n self._flags=[]\n self._raw_format={}\n self._raw_data={}\n self._has_nbfix_terms = False\n\n with open(inFilename, 'r') as fIn:\n for line in fIn:\n if line[0] == '%':\n if line.startswith('%VERSION'):\n tag, self._prmtopVersion = line.rstrip().split(None, 1)\n elif line.startswith('%FLAG'):\n tag, flag = line.rstrip().split(None, 1)\n self._flags.append(flag)\n self._raw_data[flag] = []\n elif line.startswith('%FORMAT'):\n format = line.rstrip()\n index0=format.index('(')\n index1=format.index(')')\n format = format[index0+1:index1]\n try:\n m = FORMAT_RE_PATTERN.search(format)\n self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), int(m.group(3)), m.group(4))\n except:\n # We couldn't parse the format, so just treat the whole line as a single string.\n self._raw_format[self._flags[-1]] = (format, 1, 'a', 80, '')\n elif line.startswith('%COMMENT'):\n continue\n elif self._flags \\\n and 'TITLE'==self._flags[-1] \\\n and not self._raw_data['TITLE']:\n self._raw_data['TITLE'] = line.rstrip()\n else:\n flag=self._flags[-1]\n (format, numItems, itemType,\n iLength, itemPrecision) = self._getFormat(flag)\n line = line.rstrip()\n for index in range(0, len(line), iLength):\n item = line[index:index+iLength]\n if item:\n self._raw_data[flag].append(item.strip())\n # See if this is a CHAMBER-style topology file, which is not supported\n # for creating Systems\n self.chamber = 'CTITLE' in self._flags",
"def __init__(self, pdb_line, pdbstructure=None, extraParticleIdentifier='EP'):\n # We might modify first/final status during _finalize() methods\n self.is_first_atom_in_chain = False\n self.is_final_atom_in_chain = False\n self.is_first_residue_in_chain = False\n self.is_final_residue_in_chain = False\n # Start parsing fields from pdb line\n self.record_name = pdb_line[0:6].strip()\n try:\n self.serial_number = _parse_atom_index(pdb_line[6:11])\n except:\n # Just give it the next number in sequence.\n self.serial_number = pdbstructure._next_atom_number\n self.name_with_spaces = pdb_line[12:16]\n alternate_location_indicator = pdb_line[16]\n\n self.residue_name_with_spaces = pdb_line[17:20]\n # In some MD codes, notably ffamber in gromacs, residue name has a fourth character in\n # column 21\n possible_fourth_character = pdb_line[20:21]\n if possible_fourth_character != \" \":\n # Fourth character should only be there if official 3 are already full\n if len(self.residue_name_with_spaces.strip()) != 3:\n raise ValueError('Misaligned residue name: %s' % pdb_line)\n self.residue_name_with_spaces += possible_fourth_character\n self.residue_name = self.residue_name_with_spaces.strip()\n\n self.chain_id = pdb_line[21]\n try:\n self.residue_number = int(pdb_line[22:26])\n except:\n try:\n self.residue_number = int(pdb_line[22:26], 16) - 0xA000 + 10000\n except:\n # When VMD runs out of hex values it starts filling the residue ID field with ****.\n # Look at the most recent atoms to figure out whether this is a new residue or not.\n if pdbstructure._current_model is None or pdbstructure._current_model._current_chain is None or pdbstructure._current_model._current_chain._current_residue is None:\n # This is the first residue in the model.\n self.residue_number = pdbstructure._next_residue_number\n else:\n currentRes = pdbstructure._current_model._current_chain._current_residue\n if currentRes.name_with_spaces != self.residue_name_with_spaces:\n # The residue name has changed.\n self.residue_number = pdbstructure._next_residue_number\n elif self.name_with_spaces in currentRes.atoms_by_name:\n # There is already an atom with this name.\n self.residue_number = pdbstructure._next_residue_number\n else:\n self.residue_number = currentRes.number\n self.insertion_code = pdb_line[26]\n # coordinates, occupancy, and temperature factor belong in Atom.Location object\n x = float(pdb_line[30:38])\n y = float(pdb_line[38:46])\n z = float(pdb_line[46:54])\n try:\n occupancy = float(pdb_line[54:60])\n except:\n occupancy = 1.0\n try:\n temperature_factor = unit.Quantity(float(pdb_line[60:66]), unit.angstroms**2)\n except:\n temperature_factor = unit.Quantity(0.0, unit.angstroms**2)\n self.locations = {}\n loc = Atom.Location(alternate_location_indicator, unit.Quantity(Vec3(x,y,z), unit.angstroms), occupancy, temperature_factor, self.residue_name_with_spaces)\n self.locations[alternate_location_indicator] = loc\n self.default_location_id = alternate_location_indicator\n # segment id, element_symbol, and formal_charge are not always present\n self.segment_id = pdb_line[72:76].strip()\n self.element_symbol = pdb_line[76:78].strip()\n try: self.formal_charge = int(pdb_line[78:80])\n except ValueError: self.formal_charge = None\n # figure out atom element\n if self.element_symbol == extraParticleIdentifier:\n self.element = 'EP'\n else:\n try:\n # Try to find a sensible element symbol from columns 76-77\n self.element = element.get_by_symbol(self.element_symbol)\n except KeyError:\n self.element = None\n if pdbstructure is not None:\n pdbstructure._next_atom_number = self.serial_number+1\n pdbstructure._next_residue_number = self.residue_number+1",
"def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()",
"def initFromFile(self,file):\n self.source = file\n file_reader = open(file,\"r\")\n self.isInit = True\n lineCounter = 0\n firstLine = None\n SecondLine = None\n ThirdLine = None\n for line in file_reader:\n if(lineCounter == 0):\n firstLine = line.split()\n self.rowsNumber = int(firstLine[0])\n self.columnsNumber = int(firstLine[1])\n self.routerRangeRadius = int(firstLine[2])\n if(lineCounter == 1):\n SecondLine = line.split()\n self.backBoneCosts = int(SecondLine[0])\n Path.backBoneCost = self.backBoneCosts\n self.routerCosts = int(SecondLine[1])\n self.budget = int(SecondLine[2])\n if(lineCounter == 2):\n ThirdLine = line.split()\n self.firstCell = Cell(int(ThirdLine[0]),int(ThirdLine[1]))\n if(lineCounter>2):\n self.map.append([])\n LINE = line\n columnCounter = 0\n for char in LINE:\n temp = Cell(len(self.map)-1,columnCounter,Cell.getCellType(char))\n self.map[len(self.map)-1].append(temp)\n if(temp.cellType == \"FLOOR\"):\n self.notComputeRouter.append(temp)\n columnCounter += 1\n lineCounter +=1\n self.isInit = True",
"def from_pdb(self, **kwargs):\n return self.__from_file(kwargs, _pdb)",
"def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a date entry\n if not line[108] == '(':\n raise ParsingException\n if not line[164:165] == ')':\n raise ParsingException\n\n # Parsing definitions\n self.start_time = datetime.strptime(line[109:125], '%d-%b-%Y %H%M')\n self.end_time = datetime.strptime(line[128:144], '%d-%b-%Y %H%M')\n self.time_stamp = datetime.strptime(line[148:164], '%m/%d/%Y %H:%M')",
"def __init__(self, pdbfile, pdbcode=None, includeH=True):\n self.initialized = False\n self.load (pdbfile, pdbcode, includeH=includeH)",
"def __init__(self, file_name=None, file_object=None, pdb_code=None):\n self.line_number = 0\n if file_name is not None:\n assert file_object is None\n assert pdb_code is None\n self.file_object = open(file_name)\n elif file_object is not None:\n assert pdb_code is None\n self.file_object = file_object\n elif pdb_code is not None:\n self.file_object = mmcif_files.getFile(pdb_code)\n else:\n raise ValueError(\"No input file given\")",
"def parsePDB(filename):\n\t# Create a new molecule object.\n\tnew_molecule = []\n\tnew_molecule = molecule.Molecule()\n\n\t# Read the file into memory.\n\ttry:\n\t\topened_file = open(filename, 'r').readlines()\n\texcept FileNotFoundError:\n\t\tprint(\"Could not find file: {0}\".format(filename))\n\t\texit()\n\n\t# Find first coordinate entry.\n\tfor line in range(0, len(opened_file)):\n\t\t# Find first HETATM keyword\n\t\tif opened_file[line].split()[0] == 'HETATM' or opened_file[line].split()[0] == 'ATOM':\n\t\t\tfirstatom = line\n\t\t\tbreak\n\t\n\t# Extract the coordinates\n\tfor line in opened_file[firstatom:]:\n\t\t# If you reach the end of a PDB file via an END statement\n\t\tif line.strip() == \"END\":\n\t\t\treturn new_molecule\n\t\t# Split the line into its various data\n\t\tthis_line = line.split()\n\t\t# Set the index, x, y, and z for atom; set the residue type for the molecule\n\t\tindex = opened_file[firstatom:].index(line)+1\n\t\tatomtype = this_line[2]\n\t\tnew_molecule.setResidue(this_line[3])\n\t\tx = float(this_line[5])\n\t\ty = float(this_line[6])\n\t\tz = float(this_line[7])\n\t\tparticle = atom.Atom(index, atomtype, x, y, z)\n\t\tnew_molecule.addAtom(particle)\n\treturn new_molecule",
"def parse_pdb(self, line):\n if line is not None:\n self.original_text.append(line.rstrip(\"\\r\\n\"))",
"def test_8():\n answer_pdb_str = \"\"\"\nATOM 1 N ARG A 1 26.061 12.824 1.988 1.00 0.00 N\nATOM 2 CA ARG A 1 27.253 12.525 2.773 1.00 0.00 C\nATOM 3 C ARG A 1 28.520 12.882 2.003 1.00 0.00 C\nATOM 4 O ARG A 1 28.853 12.243 1.005 1.00 0.00 O\nATOM 5 CB ARG A 1 27.280 11.041 3.156 1.00 10.00 C\nATOM 6 CG ARG A 1 26.107 10.591 4.022 1.00 10.00 C\nATOM 7 CD ARG A 1 26.118 11.230 5.409 1.00 10.00 C\nATOM 8 NE ARG A 1 27.283 10.828 6.201 1.00 10.00 N\nATOM 9 CZ ARG A 1 27.735 11.441 7.298 1.00 10.00 C\nATOM 10 NH1 ARG A 1 27.146 12.525 7.803 1.00 10.00 N\nATOM 11 NH2 ARG A 1 28.808 10.956 7.908 1.00 10.00 N\nATOM 12 N ALA A 2 29.223 13.907 2.474 1.00 0.00 N\nATOM 13 CA ALA A 2 30.455 14.351 1.832 1.00 0.00 C\nATOM 14 C ALA A 2 31.652 14.171 2.758 1.00 0.00 C\nATOM 15 O ALA A 2 31.775 14.859 3.772 1.00 0.00 O\nATOM 16 CB ALA A 2 30.331 15.807 1.408 1.00 0.00 C\nATOM 17 N HIS A 3 32.534 13.242 2.403 1.00 0.00 N\nATOM 18 CA HIS A 3 33.724 12.970 3.202 1.00 0.00 C\nATOM 19 C HIS A 3 34.993 13.295 2.422 1.00 0.00 C\nATOM 20 O HIS A 3 35.327 12.618 1.450 1.00 0.00 O\nATOM 21 CB HIS A 3 33.744 11.503 3.640 1.00 0.00 C\nATOM 22 CG HIS A 3 32.618 11.130 4.554 1.00 0.00 C\nATOM 23 ND1 HIS A 3 32.586 11.494 5.882 1.00 0.00 N\nATOM 24 CD2 HIS A 3 31.485 10.424 4.330 1.00 0.00 C\nATOM 25 CE1 HIS A 3 31.481 11.029 6.437 1.00 0.00 C\nATOM 26 NE2 HIS A 3 30.795 10.375 5.517 1.00 0.00 N\nATOM 27 N ALA A 4 35.698 14.335 2.856 1.00 0.00 N\nATOM 28 CA ALA A 4 36.932 14.752 2.201 1.00 0.00 C\nATOM 29 C ALA A 4 38.127 14.604 3.136 1.00 0.00 C\nATOM 30 O ALA A 4 38.248 15.329 4.124 1.00 0.00 O\nATOM 31 CB ALA A 4 36.812 16.192 1.723 1.00 0.00 C\nATOM 32 N ASP A 5 39.007 13.660 2.818 1.00 0.00 N\nATOM 33 CA ASP A 5 40.194 13.415 3.630 1.00 0.00 C\nATOM 34 C ASP A 5 41.467 13.708 2.841 1.00 0.00 C\nATOM 35 O ASP A 5 41.801 12.995 1.896 1.00 0.00 O\nATOM 36 CB ASP A 5 40.211 11.966 4.122 1.00 0.00 C\nATOM 37 CG ASP A 5 41.346 11.691 5.089 1.00 0.00 C\nATOM 38 OD1 ASP A 5 41.256 12.134 6.254 1.00 0.00 O\nATOM 39 OD2 ASP A 5 42.327 11.032 4.685 1.00 0.00 O\nATOM 40 N ALA A 6 42.172 14.763 3.238 1.00 0.00 N\nATOM 41 CA ALA A 6 43.409 15.152 2.570 1.00 0.00 C\nATOM 42 C ALA A 6 44.601 15.036 3.514 1.00 0.00 C\nATOM 43 O ALA A 6 44.722 15.797 4.474 1.00 0.00 O\nATOM 44 CB ALA A 6 43.294 16.573 2.039 1.00 0.00 C\nATOM 45 N GLU A 7 45.480 14.079 3.234 1.00 0.00 N\nATOM 46 CA GLU A 7 46.665 13.862 4.057 1.00 0.00 C\nATOM 47 C GLU A 7 47.940 14.122 3.261 1.00 0.00 C\nATOM 48 O GLU A 7 48.275 13.373 2.344 1.00 0.00 O\nATOM 49 CB GLU A 7 46.677 12.432 4.604 1.00 0.00 C\nATOM 50 CG GLU A 7 45.565 12.140 5.599 1.00 0.00 C\nATOM 51 CD GLU A 7 45.595 10.711 6.103 1.00 0.00 C\nATOM 52 OE1 GLU A 7 46.403 9.912 5.585 1.00 0.00 O\nATOM 53 OE2 GLU A 7 44.809 10.384 7.019 1.00 0.00 O\nATOM 54 N ALA A 8 48.647 15.189 3.620 1.00 0.00 N\nATOM 55 CA ALA A 8 49.886 15.550 2.941 1.00 0.00 C\nATOM 56 C ALA A 8 51.076 15.468 3.892 1.00 0.00 C\nATOM 57 O ALA A 8 51.196 16.264 4.823 1.00 0.00 O\nATOM 58 CB ALA A 8 49.776 16.951 2.356 1.00 0.00 C\nATOM 59 N ALA A 10 55.122 15.615 4.002 1.00 0.00 N\nATOM 60 CA ALA A 10 56.363 15.948 3.313 1.00 0.00 C\nATOM 61 C ALA A 10 57.551 15.898 4.269 1.00 0.00 C\nATOM 62 O ALA A 10 57.671 16.728 5.170 1.00 0.00 O\nATOM 63 CB ALA A 10 56.258 17.326 2.676 1.00 0.00 C\nATOM 64 N ASN A 11 58.427 14.919 4.065 1.00 0.00 N\nATOM 65 CA ASN A 11 59.606 14.759 4.908 1.00 0.00 C\nATOM 66 C ASN A 11 60.886 14.953 4.102 1.00 0.00 C\nATOM 67 O ASN A 11 61.222 14.136 3.244 1.00 0.00 O\nATOM 68 CB ASN A 11 59.609 13.379 5.562 1.00 0.00 C\nATOM 69 CG ASN A 11 58.532 13.236 6.620 1.00 0.00 C\nATOM 70 OD1 ASN A 11 58.296 14.149 7.410 1.00 0.00 O\nATOM 71 ND2 ASN A 11 57.872 12.083 6.640 1.00 0.00 N\nATOM 72 N ALA A 12 61.597 16.041 4.383 1.00 0.00 N\nATOM 73 CA ALA A 12 62.841 16.345 3.686 1.00 0.00 C\nATOM 74 C ALA A 12 64.025 16.328 4.646 1.00 0.00 C\nATOM 75 O ALA A 12 64.145 17.191 5.515 1.00 0.00 O\nATOM 76 CB ALA A 12 62.740 17.698 2.997 1.00 0.00 C\nATOM 77 N GLN A 13 64.899 15.340 4.481 1.00 0.00 N\nATOM 78 CA GLN A 13 66.076 15.209 5.332 1.00 0.00 C\nATOM 79 C GLN A 13 67.359 15.370 4.522 1.00 0.00 C\nATOM 80 O GLN A 13 67.695 14.521 3.697 1.00 0.00 O\nATOM 81 CB GLN A 13 66.071 13.849 6.037 1.00 0.00 C\nATOM 82 CG GLN A 13 67.212 13.651 7.023 1.00 0.00 C\nATOM 83 CD GLN A 13 67.140 12.317 7.739 1.00 0.00 C\nATOM 84 OE1 GLN A 13 66.251 11.506 7.477 1.00 0.00 O\nATOM 85 NE2 GLN A 13 68.078 12.082 8.650 1.00 0.00 N\nATOM 86 N ALA A 14 68.071 16.466 4.765 1.00 0.00 N\nATOM 87 CA ALA A 14 69.318 16.740 4.059 1.00 0.00 C\nATOM 88 C ALA A 14 70.500 16.757 5.022 1.00 0.00 C\nATOM 89 O ALA A 14 70.620 17.652 5.859 1.00 0.00 O\nATOM 90 CB ALA A 14 69.222 18.067 3.320 1.00 0.00 C\nATOM 91 N LEU A 15 71.372 15.761 4.897 1.00 0.00 N\nATOM 92 CA LEU A 15 72.547 15.660 5.755 1.00 0.00 C\nATOM 93 C LEU A 15 73.832 15.788 4.943 1.00 0.00 C\nATOM 94 O LEU A 15 74.168 14.907 4.151 1.00 0.00 O\nATOM 95 CB LEU A 15 72.541 14.325 6.508 1.00 0.00 C\nATOM 96 CG LEU A 15 71.415 14.114 7.526 1.00 0.00 C\nATOM 97 CD1 LEU A 15 71.462 12.699 8.081 1.00 0.00 C\nATOM 98 CD2 LEU A 15 71.487 15.136 8.654 1.00 0.00 C\nATOM 99 N ALA A 16 74.546 16.890 5.146 1.00 0.00 N\nATOM 100 CA ALA A 16 75.795 17.135 4.434 1.00 0.00 C\nATOM 101 C ALA A 16 76.975 17.185 5.399 1.00 0.00 C\nATOM 102 O ALA A 16 77.095 18.110 6.202 1.00 0.00 O\nATOM 103 CB ALA A 16 75.704 18.434 3.646 1.00 0.00 C\nATOM 104 N PHE A 17 77.845 16.184 5.313 1.00 0.00 N\nATOM 105 CA PHE A 17 79.018 16.111 6.177 1.00 0.00 C\nATOM 106 C PHE A 17 80.304 16.206 5.364 1.00 0.00 C\nATOM 107 O PHE A 17 80.640 15.296 4.606 1.00 0.00 O\nATOM 108 CB PHE A 17 79.005 14.807 6.980 1.00 0.00 C\nATOM 109 CG PHE A 17 77.889 14.721 7.981 1.00 0.00 C\nATOM 110 CD1 PHE A 17 77.992 15.351 9.210 1.00 0.00 C\nATOM 111 CD2 PHE A 17 76.735 14.009 7.693 1.00 0.00 C\nATOM 112 CE1 PHE A 17 76.966 15.272 10.133 1.00 0.00 C\nATOM 113 CE2 PHE A 17 75.706 13.927 8.612 1.00 0.00 C\nATOM 114 CZ PHE A 17 75.822 14.560 9.833 1.00 0.00 C\nATOM 115 N ALA A 18 81.021 17.314 5.528 1.00 0.00 N\nATOM 116 CA ALA A 18 82.272 17.529 4.810 1.00 0.00 C\nATOM 117 C ALA A 18 83.450 17.613 5.775 1.00 0.00 C\nATOM 118 O ALA A 18 83.570 18.567 6.543 1.00 0.00 O\nATOM 119 CB ALA A 18 82.186 18.797 3.973 1.00 0.00 C\nATOM 120 N TYR A 19 84.318 16.606 5.729 1.00 0.00 N\nATOM 121 CA TYR A 19 85.488 16.564 6.598 1.00 0.00 C\nATOM 122 C TYR A 19 86.777 16.625 5.785 1.00 0.00 C\nATOM 123 O TYR A 19 87.113 15.687 5.063 1.00 0.00 O\nATOM 124 CB TYR A 19 85.471 15.292 7.450 1.00 0.00 C\nATOM 125 CG TYR A 19 84.363 15.258 8.479 1.00 0.00 C\nATOM 126 CD1 TYR A 19 83.662 16.411 8.812 1.00 0.00 C\nATOM 127 CD2 TYR A 19 84.016 14.074 9.116 1.00 0.00 C\nATOM 128 CE1 TYR A 19 82.648 16.386 9.751 1.00 0.00 C\nATOM 129 CE2 TYR A 19 83.002 14.039 10.057 1.00 0.00 C\nATOM 130 CZ TYR A 19 82.323 15.197 10.369 1.00 0.00 C\nATOM 131 OH TYR A 19 81.313 15.166 11.305 1.00 0.00 O\nATOM 132 N ALA A 20 87.496 17.737 5.909 1.00 0.00 N\nATOM 133 CA ALA A 20 88.749 17.922 5.187 1.00 0.00 C\nATOM 134 C ALA A 20 89.925 18.039 6.151 1.00 0.00 C\nATOM 135 O ALA A 20 90.046 19.021 6.883 1.00 0.00 O\nATOM 136 CB ALA A 20 88.668 19.159 4.303 1.00 0.00 C\nATOM 137 N VAL A 21 90.791 17.030 6.145 1.00 0.00 N\nATOM 138 CA VAL A 21 91.959 17.017 7.018 1.00 0.00 C\nATOM 139 C VAL A 21 93.250 17.045 6.207 1.00 0.00 C\nATOM 140 O VAL A 21 93.585 16.079 5.521 1.00 0.00 O\nATOM 141 CB VAL A 21 91.967 15.769 7.925 1.00 0.00 C\nATOM 142 CG1 VAL A 21 93.241 15.722 8.760 1.00 0.00 C\nATOM 143 CG2 VAL A 21 90.735 15.749 8.820 1.00 0.00 C\nATOM 144 N ALA A 22 93.971 18.159 6.291 1.00 0.00 N\nATOM 145 CA ALA A 22 95.226 18.315 5.565 1.00 0.00 C\nATOM 146 C ALA A 22 96.400 18.465 6.527 1.00 0.00 C\nATOM 147 O ALA A 22 96.521 19.473 7.222 1.00 0.00 O\nATOM 148 CB ALA A 22 95.150 19.517 4.636 1.00 0.00 C\nTER\nATOM 149 N ARG B 1 27.961 0.504 1.988 1.00 0.00 N\nATOM 150 CA ARG B 1 29.153 0.205 2.773 1.00 0.00 C\nATOM 151 C ARG B 1 30.420 0.562 2.003 1.00 0.00 C\nATOM 152 O ARG B 1 30.753 -0.077 1.005 1.00 0.00 O\nATOM 153 CB ARG B 1 29.180 -1.279 3.156 1.00 10.00 C\nATOM 154 CG ARG B 1 28.007 -1.729 4.022 1.00 10.00 C\nATOM 155 CD ARG B 1 28.018 -1.090 5.409 1.00 10.00 C\nATOM 156 NE ARG B 1 29.183 -1.492 6.201 1.00 10.00 N\nATOM 157 CZ ARG B 1 29.635 -0.879 7.298 1.00 10.00 C\nATOM 158 NH1 ARG B 1 30.708 -1.364 7.908 1.00 10.00 N\nATOM 159 NH2 ARG B 1 29.046 0.205 7.803 1.00 10.00 N\nATOM 160 N ALA B 2 31.123 1.587 2.474 1.00 0.00 N\nATOM 161 CA ALA B 2 32.355 2.031 1.832 1.00 0.00 C\nATOM 162 C ALA B 2 33.552 1.851 2.758 1.00 0.00 C\nATOM 163 O ALA B 2 33.675 2.539 3.772 1.00 0.00 O\nATOM 164 CB ALA B 2 32.231 3.487 1.408 1.00 0.00 C\nATOM 165 N HIS B 3 34.434 0.922 2.403 1.00 0.00 N\nATOM 166 CA HIS B 3 35.624 0.650 3.202 1.00 0.00 C\nATOM 167 C HIS B 3 36.893 0.975 2.422 1.00 0.00 C\nATOM 168 O HIS B 3 37.227 0.298 1.450 1.00 0.00 O\nATOM 169 CB HIS B 3 35.644 -0.817 3.640 1.00 0.00 C\nATOM 170 CG HIS B 3 34.518 -1.190 4.554 1.00 0.00 C\nATOM 171 ND1 HIS B 3 34.311 -0.928 5.866 1.00 0.00 C\nATOM 172 CD2 HIS B 3 33.431 -1.925 4.134 1.00 0.00 N\nATOM 173 CE1 HIS B 3 33.113 -1.504 6.211 1.00 0.00 N\nATOM 174 NE2 HIS B 3 32.603 -2.100 5.148 1.00 0.00 C\nATOM 175 N ALA B 4 37.598 2.015 2.856 1.00 0.00 N\nATOM 176 CA ALA B 4 38.832 2.432 2.201 1.00 0.00 C\nATOM 177 C ALA B 4 40.027 2.284 3.136 1.00 0.00 C\nATOM 178 O ALA B 4 40.148 3.009 4.124 1.00 0.00 O\nATOM 179 CB ALA B 4 38.712 3.872 1.723 1.00 0.00 C\nATOM 180 N ASP B 5 40.907 1.340 2.818 1.00 0.00 N\nATOM 181 CA ASP B 5 42.094 1.095 3.630 1.00 0.00 C\nATOM 182 C ASP B 5 43.367 1.388 2.841 1.00 0.00 C\nATOM 183 O ASP B 5 43.701 0.675 1.896 1.00 0.00 O\nATOM 184 CB ASP B 5 42.111 -0.354 4.122 1.00 0.00 C\nATOM 185 CG ASP B 5 43.246 -0.629 5.089 1.00 0.00 C\nATOM 186 OD1 ASP B 5 43.158 -0.186 6.253 1.00 0.00 O\nATOM 187 OD2 ASP B 5 44.227 -1.288 4.683 1.00 0.00 O\nATOM 188 N ALA B 6 44.072 2.443 3.238 1.00 0.00 N\nATOM 189 CA ALA B 6 45.309 2.832 2.570 1.00 0.00 C\nATOM 190 C ALA B 6 46.501 2.716 3.514 1.00 0.00 C\nATOM 191 O ALA B 6 46.622 3.477 4.474 1.00 0.00 O\nATOM 192 CB ALA B 6 45.194 4.253 2.039 1.00 0.00 C\nATOM 193 N GLU B 7 47.380 1.759 3.234 1.00 0.00 N\nATOM 194 CA GLU B 7 48.565 1.542 4.057 1.00 0.00 C\nATOM 195 C GLU B 7 49.840 1.802 3.261 1.00 0.00 C\nATOM 196 O GLU B 7 50.175 1.053 2.344 1.00 0.00 O\nATOM 197 CB GLU B 7 48.577 0.112 4.604 1.00 0.00 C\nATOM 198 CG GLU B 7 47.465 -0.180 5.599 1.00 0.00 C\nATOM 199 CD GLU B 7 47.495 -1.609 6.103 1.00 0.00 C\nATOM 200 OE1 GLU B 7 48.305 -2.409 5.584 1.00 0.00 O\nATOM 201 OE2 GLU B 7 46.711 -1.936 7.018 1.00 0.00 O\nATOM 202 N ALA B 8 50.547 2.869 3.620 1.00 0.00 N\nATOM 203 CA ALA B 8 51.786 3.230 2.941 1.00 0.00 C\nATOM 204 C ALA B 8 52.976 3.148 3.892 1.00 0.00 C\nATOM 205 O ALA B 8 53.096 3.944 4.823 1.00 0.00 O\nATOM 206 CB ALA B 8 51.676 4.631 2.356 1.00 0.00 C\nATOM 207 N ALA B 10 57.022 3.295 4.002 1.00 0.00 N\nATOM 208 CA ALA B 10 58.263 3.628 3.313 1.00 0.00 C\nATOM 209 C ALA B 10 59.451 3.578 4.269 1.00 0.00 C\nATOM 210 O ALA B 10 59.571 4.408 5.170 1.00 0.00 O\nATOM 211 CB ALA B 10 58.158 5.006 2.676 1.00 0.00 C\nATOM 212 N ASN B 11 60.327 2.599 4.065 1.00 0.00 N\nATOM 213 CA ASN B 11 61.506 2.439 4.908 1.00 0.00 C\nATOM 214 C ASN B 11 62.786 2.633 4.102 1.00 0.00 C\nATOM 215 O ASN B 11 63.122 1.816 3.244 1.00 0.00 O\nATOM 216 CB ASN B 11 61.509 1.059 5.562 1.00 0.00 C\nATOM 217 CG ASN B 11 60.432 0.916 6.620 1.00 0.00 C\nATOM 218 OD1 ASN B 11 60.252 1.957 7.425 1.00 0.00 N\nATOM 219 ND2 ASN B 11 59.769 -0.116 6.713 1.00 0.00 O\nATOM 220 N ALA B 12 63.497 3.721 4.383 1.00 0.00 N\nATOM 221 CA ALA B 12 64.741 4.025 3.686 1.00 0.00 C\nATOM 222 C ALA B 12 65.925 4.008 4.646 1.00 0.00 C\nATOM 223 O ALA B 12 66.045 4.871 5.515 1.00 0.00 O\nATOM 224 CB ALA B 12 64.640 5.378 2.997 1.00 0.00 C\nATOM 225 N GLN B 13 66.799 3.020 4.481 1.00 0.00 N\nATOM 226 CA GLN B 13 67.976 2.889 5.332 1.00 0.00 C\nATOM 227 C GLN B 13 69.259 3.050 4.522 1.00 0.00 C\nATOM 228 O GLN B 13 69.595 2.201 3.697 1.00 0.00 O\nATOM 229 CB GLN B 13 67.971 1.529 6.037 1.00 0.00 C\nATOM 230 CG GLN B 13 69.112 1.331 7.023 1.00 0.00 C\nATOM 231 CD GLN B 13 69.040 -0.003 7.739 1.00 0.00 C\nATOM 232 OE1 GLN B 13 68.046 -0.811 7.388 1.00 0.00 N\nATOM 233 NE2 GLN B 13 69.869 -0.305 8.598 1.00 0.00 O\nATOM 234 N ALA B 14 69.971 4.146 4.765 1.00 0.00 N\nATOM 235 CA ALA B 14 71.218 4.420 4.059 1.00 0.00 C\nATOM 236 C ALA B 14 72.400 4.437 5.022 1.00 0.00 C\nATOM 237 O ALA B 14 72.520 5.332 5.859 1.00 0.00 O\nATOM 238 CB ALA B 14 71.122 5.747 3.320 1.00 0.00 C\nATOM 239 N LEU B 15 73.272 3.441 4.897 1.00 0.00 N\nATOM 240 CA LEU B 15 74.447 3.340 5.755 1.00 0.00 C\nATOM 241 C LEU B 15 75.732 3.468 4.943 1.00 0.00 C\nATOM 242 O LEU B 15 76.068 2.587 4.151 1.00 0.00 O\nATOM 243 CB LEU B 15 74.441 2.005 6.508 1.00 0.00 C\nATOM 244 CG LEU B 15 73.315 1.794 7.526 1.00 0.00 C\nATOM 245 CD1 LEU B 15 72.426 0.619 7.136 1.00 0.00 C\nATOM 246 CD2 LEU B 15 72.491 3.063 7.674 1.00 0.00 C\nATOM 247 N ALA B 16 76.446 4.570 5.146 1.00 0.00 N\nATOM 248 CA ALA B 16 77.695 4.815 4.434 1.00 0.00 C\nATOM 249 C ALA B 16 78.875 4.865 5.399 1.00 0.00 C\nATOM 250 O ALA B 16 78.995 5.790 6.202 1.00 0.00 O\nATOM 251 CB ALA B 16 77.604 6.114 3.646 1.00 0.00 C\nATOM 252 N PHE B 17 79.745 3.864 5.313 1.00 0.00 N\nATOM 253 CA PHE B 17 80.918 3.791 6.177 1.00 0.00 C\nATOM 254 C PHE B 17 82.204 3.886 5.364 1.00 0.00 C\nATOM 255 O PHE B 17 82.540 2.976 4.606 1.00 0.00 O\nATOM 256 CB PHE B 17 80.905 2.487 6.980 1.00 0.00 C\nATOM 257 CG PHE B 17 79.789 2.401 7.981 1.00 0.00 C\nATOM 258 CD1 PHE B 17 79.893 3.032 9.211 1.00 0.00 C\nATOM 259 CD2 PHE B 17 78.636 1.690 7.694 1.00 0.00 C\nATOM 260 CE1 PHE B 17 78.868 2.956 10.134 1.00 0.00 C\nATOM 261 CE2 PHE B 17 77.607 1.611 8.614 1.00 0.00 C\nATOM 262 CZ PHE B 17 77.724 2.244 9.835 1.00 0.00 C\nATOM 263 N ALA B 18 82.921 4.994 5.528 1.00 0.00 N\nATOM 264 CA ALA B 18 84.172 5.209 4.810 1.00 0.00 C\nATOM 265 C ALA B 18 85.350 5.293 5.775 1.00 0.00 C\nATOM 266 O ALA B 18 85.470 6.247 6.543 1.00 0.00 O\nATOM 267 CB ALA B 18 84.086 6.477 3.973 1.00 0.00 C\nATOM 268 N TYR B 19 86.218 4.286 5.729 1.00 0.00 N\nATOM 269 CA TYR B 19 87.388 4.244 6.598 1.00 0.00 C\nATOM 270 C TYR B 19 88.677 4.305 5.785 1.00 0.00 C\nATOM 271 O TYR B 19 89.013 3.367 5.063 1.00 0.00 O\nATOM 272 CB TYR B 19 87.371 2.972 7.450 1.00 0.00 C\nATOM 273 CG TYR B 19 86.263 2.938 8.479 1.00 0.00 C\nATOM 274 CD1 TYR B 19 85.564 4.090 8.814 1.00 0.00 C\nATOM 275 CD2 TYR B 19 85.918 1.753 9.118 1.00 0.00 C\nATOM 276 CE1 TYR B 19 84.550 4.063 9.756 1.00 0.00 C\nATOM 277 CE2 TYR B 19 84.907 1.716 10.059 1.00 0.00 C\nATOM 278 CZ TYR B 19 84.228 2.874 10.374 1.00 0.00 C\nATOM 279 OH TYR B 19 83.220 2.843 11.312 1.00 0.00 O\nATOM 280 N ALA B 20 89.396 5.417 5.909 1.00 0.00 N\nATOM 281 CA ALA B 20 90.649 5.602 5.187 1.00 0.00 C\nATOM 282 C ALA B 20 91.825 5.719 6.151 1.00 0.00 C\nATOM 283 O ALA B 20 91.946 6.701 6.883 1.00 0.00 O\nATOM 284 CB ALA B 20 90.568 6.839 4.303 1.00 0.00 C\nATOM 285 N VAL B 21 92.691 4.710 6.145 1.00 0.00 N\nATOM 286 CA VAL B 21 93.859 4.697 7.018 1.00 0.00 C\nATOM 287 C VAL B 21 95.150 4.725 6.207 1.00 0.00 C\nATOM 288 O VAL B 21 95.485 3.759 5.521 1.00 0.00 O\nATOM 289 CB VAL B 21 93.867 3.449 7.925 1.00 0.00 C\nATOM 290 CG1 VAL B 21 95.105 2.602 7.660 1.00 0.00 C\nATOM 291 CG2 VAL B 21 92.599 2.630 7.720 1.00 0.00 C\nATOM 292 N ALA B 22 95.871 5.839 6.291 1.00 0.00 N\nATOM 293 CA ALA B 22 97.126 5.995 5.565 1.00 0.00 C\nATOM 294 C ALA B 22 98.300 6.145 6.527 1.00 0.00 C\nATOM 295 O ALA B 22 98.421 7.153 7.222 1.00 0.00 O\nATOM 296 CB ALA B 22 97.050 7.197 4.636 1.00 0.00 C\nTER\n \"\"\"\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_5).construct_hierarchy()\n anwer_h = iotbx.pdb.input(source_info=None,\n lines=answer_pdb_str).construct_hierarchy()\n h.write_pdb_file(\"test_8_before.pdb\")\n\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n\n nu.flip_atoms_in_ncs_groups(h, ncs_groups)\n h.write_pdb_file(\"test_8_result.pdb\")\n rmsd_smart = calculate_rmsd_smart(anwer_h, h)\n print(rmsd_smart)\n assert rmsd_smart < 0.01",
"def load_pdb(self):\n # 'atoms': list with the residue id for every atom.\n pdb = self.pdb\n for l_i in range(len(pdb)): \n dat = bio_lib.get_labels(pdb[l_i])\n res_atm = dat[0]\n res_nam = dat[1]\n res_ind = dat[2]\n res_chn = dat[3]\n self.identifiers.append([res_nam, res_ind, res_chn]) \n #x_i = dat[4]\n #y_i = dat[5]\n #z_i = dat[6]\n # Adjusted coordinates returned from PDB are not strictly formatted.\n if len(pdb[l_i]) > 10:\n x_i = pdb[l_i][31:].split()[0]\n y_i = pdb[l_i][31:].split()[1]\n z_i = pdb[l_i][31:].split()[2]\n c_i = \" \".join([res_atm, x_i, y_i, z_i])\n self.res_atm_xyz.append(c_i)",
"def read_pdb(self, pdb):\n pdb_a = {}\n for line in pdb:\n at = re.compile(\"(ATOM|HETATM)\")\n if at.match(line):\n nm = re.sub(r'\\s', '', line[6:12])\n aname = re.sub(r'\\s', '', line[12:17])\n ri_c = re.sub(r'\\s', '', line[20:27])\n x = re.sub(r'\\s', '', line[30:38])\n y = re.sub(r'\\s', '', line[38:46])\n z = re.sub(r'\\s', '', line[46:55])\n if ri_c and aname and x and y and z:\n pdb_a[int(nm)] = [aname, Vector(float(x), float(y), float(z)), ri_c]\n return [pdb_a, nm]",
"def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a cause\n if not line[108] == '(':\n raise ParsingException\n if not line[159:160] == ')':\n raise ParsingException\n\n # Parsing definitions\n self.cause = line[109:159].strip()",
"def load(self, pdbfile, pdbcode=None, includeH=True):\n\n self.pdbfile = pdbfile\n if pdbcode is None:\n self.pdbcode = pdbfile\n else:\n self.pdbcode = pdbcode\n\n # Does not handle file-not-found exceptions: this is done up-front\n f = open (pdbfile, \"r\")\n lines = f.readlines ()\n f.close ()\n\n self.atomcoords = []\n self.atmnames = []\n self.atmsymbols = []\n self.resnames = []\n self.resnum = []\n self.atmkeys = []\n self._residueInfo = dict ()\n self._residuenames = dict ()\n self._atomInfo = []\n count = -1\n reH = re.compile ('H')\n for line in lines:\n line = line.strip ()\n\n # Filter for ATOM\n if not ((line[0:4] == 'ATOM')): continue\n coords = [float (line[30:38]), float (line[38:46]), float (line[46:54])]\n name = line[12:16]\n symbol = line[13:14]\n resname = line[17:20]\n resID = int (line[22:26])\n\n # Account for PDB files in which the element symbol is shifted from column 14\n # VMD writes such PDB files, ptraj does not\n # Fully compliant PDB files should also have the element in columns 77-78\n # Option \"nowrap\" to ptraj's \"trajout\" command may well control this behaviour\n if ((symbol != 'H') and reH.match (name)): symbol = 'H'\n if not includeH and (symbol == 'H'): continue\n count = count + 1\n\n self.atomcoords.append (coords)\n self.atmnames.append (name)\n self.atmsymbols.append (symbol)\n self.atmkeys.append (0)\n\n self.resnames.append (resname)\n self.resnum.append (resID)\n\n self._atomInfo.append (dict (id=count, coords=coords, name=name, symbol=symbol, residue=resID, residue_name=resname,key=0))\n if not self._residueInfo.has_key (resID):\n self._residueInfo[resID] = dict (id=resID, atomID=[], name=resname)\n self._residuenames[resID] = dict (id=resID, name=resname)\n self._residueInfo[resID]['atomID'].append (count)\n\n self.nAtoms = len (self.atmnames)\n self.nCoords = 3 * self.nAtoms\n self.framebytes = (self.nCoords) * 8 + (\n self.nCoords / 10 + 1) # Numeric fields + EOL characters (in crd format)\n if self.nCoords % 10 == 0: self.framebytes -= 1 # Special case if ncoords exactly divisible by 10\n self.moltype = None\n self.initialized = True",
"def _parse(self):\n with open(self.input) as f:\n for line in f:\n if not line.lstrip().startswith(\"#\"): # comment\n stripped_line=line.split(\"#\")[0].strip()\n \n # Initialise an empty option dictionary with some good defaults\n if \"[\" in stripped_line:\n molname=stripped_line.split()[1]\n self.options[molname]=self.empty_option_dict.copy() # dict1=dict2 does not copy!\n self.options[molname][\"MolName\"]=molname\n if \":\" in stripped_line: \n # now process line by line\n if \"{\" not in stripped_line:\n key,value=[i.strip() for i in stripped_line.split(\":\")]\n\n if key not in self.options[molname].keys():\n raise BaseException(\"Option \\\"{}\\\" not known, please check your input file\".format(key))\n self.options[molname][key]=value \n else:\n # This is to define special lines that are given by a dictionary\n key,value=stripped_line.split(\":\",1) # split on first occurence\n if key==\"Addon\": # additional atoms to be added per molecule\n addondict=self.empty_addon_dict.copy()\n addondict_string = value.split(\"}\",-1)[0].split(\"{\",1)[1]\n for pair in addondict_string.split(\",\"):\n addonkey,addonvalue=[i.strip() for i in pair.split(\":\")]\n if addonkey not in addondict.keys():\n raise BaseException(\"Option \\\"{}\\\" in Addon section of molecule {} not known, please check your input file\".format(addonkey,molname))\n addondict[addonkey]=addonvalue\n value=addondict\n # Since addon keyword can be used many times, this is a list\n self.options[molname][key].append(value) \n self._check()",
"def __init__(self, path, names):\n try:\n f = open(path, \"r\")\n self.file = f\n except BaseException:\n print(\"Path does not exist\")\n self.names = names\n self.symbol_type_list = [\n self.NAME,\n self.KEYWORD,\n self.NUMBER,\n self.COMMA,\n self.SEMICOLON,\n self.ARROW,\n self.EQUALS,\n self.PERIOD,\n self.EOF,\n self.INVALID] = range(10)\n self.keywords_list = [\n \"START\",\n \"END\",\n \"DEVICES\",\n \"CONNECTIONS\",\n \"MONITORS\",\n \"ip\",\n \"init\",\n \"cycles\",\n \"CLOCK\",\n \"SWITCH\",\n \"AND\",\n \"NAND\",\n \"OR\",\n \"NOR\",\n \"DTYPE\",\n \"XOR\",\n \"Q\",\n \"QBAR\",\n \"DATA\",\n \"CLK\",\n \"SET\",\n \"CLEAR\",\n \"SIGGEN\",\n \"sig\"]\n [self.START_ID,\n self.END_ID,\n self.DEVICES_ID,\n self.CONNECTIONS_ID,\n self.MONITORS_ID,\n self.ip_ID,\n self.init_ID,\n self.cycles_ID,\n self.CLOCK_ID,\n self.SWITCH_ID,\n self.AND_ID,\n self.NAND_ID,\n self.OR_ID,\n self.NOR_ID,\n self.DTYPE_ID,\n self.XOR_ID,\n self.Q_ID,\n self.QBAR_ID,\n self.DATA_ID,\n self.CLK_ID,\n self.SET_ID,\n self.CLEAR_ID,\n self.SIGGEN_ID,\n self.sig_ID] = self.names.lookup(self.keywords_list)\n self.current_character = \"\"\n # Position indicators of each symbol are w.r.t to the definition file\n # and so are initialised when scanner is called\n self.line = 0\n self.prev_pos = 1\n self.position = 0",
"def readPDB(self, file):\n\n\t\ttry:\n\t\t\tpdb = open(file, 'r')\n\t\texcept:\n\t\t\tprint \"cannot open pdbfile\",file\n\t\t\treturn 0\n\n\t\tself.file = file\n\t\tpresi = \"\"\n\t\tpresn = \"\"\n\t\tprevc = \"\"\n\t\tnlines = 0\n\t\tmychain = None\n\t\tmyres = Residue()\n\t\tterm = 1\n\t\trescore = re.compile(\"res aa Eatr\")\n\t\tre_bk_tot = re.compile(\"bk_tot\")\n\t\tre_fa_rep = re.compile(\"fa_rep\")\n\t\tre_fa_atr = re.compile(\"fa_atr\")\n\t\tbReadScore = False\n\t\tfor line in pdb.readlines():\n\t\t\tline = string.strip(line)\n\n\t\t\tif line[0:3] == \"REM\":\n\t\t\t\tself.addRemark(line)\n\n\t\t\tif line[0:3] == \"TER\":\n\t\t\t\tterm = 1\n\n\t\t\tif rescore.search(line):\n\t\t\t\tbReadScore = True\n\t\t\t\tcontinue\n\n\t\t\t# read rosetta residue-based scoring information\n\t\t\tif bReadScore:\n\t\t\t\tcols = line.split()\n\t\t\t\tif cols[0] == \"totals\":\n\t\t\t\t\tbReadScore = False\n\t\t\t\t\tcontinue\n\n\t\t\t\tmyres = self.getResidue(int(cols[0]))\n\t\t\t\tif myres == None:\n\t\t\t\t\tprint \"warning reading score!!! cannot find residue:\",cols[0]\n\n\t\t\t\tmyres.Eatr = float(cols[2])\n\t\t\t\tmyres.Erep = float(cols[3])\n\t\t\t\tmyres.Esol = float(cols[4])\n\t\t\t\tmyres.Edun = float(cols[7])\n\t\t\t\tmyres.EhbBB = float(cols[9])\n\t\t\t\tmyres.EhbSC = float(cols[10])\n\t\t\t\tmyres.Egb = float(cols[13])\n\t\t\t\tmyres.Ecst = float(cols[16])\n\t\t\t\tmyres.Eres = float(cols[17])\n\t\t\t\t\n\t\t\t# read atomic information\n\t\t\tif line[0:4] == \"ATOM\" or line[0:6] == \"HETATM\":\n\t\t\t\tchain = line[21:22]\n\t\t\t\tif chain != prevc or nlines == 0 or term:\n\t\t\t\t\tmychain = Chain()\n\t\t\t\t\tmychain.name = chain\n\t\t\t\t\tprevc = chain\n\t\t\t\t\tself.addChain(mychain)\n\t\t\t\t\n\t\t\t\tresi = line[22:26]\n\t\t\t\tresn = line[17:20]\n\n\t\t\t\tif nlines == 0 or presi != resi or presn != resn:\n\t\t\t\t\tif term:\n\t\t\t\t\t\tif myres:\n\t\t\t\t\t\t\tmyres.terminal = \"CTER\"\n\t\t\t\t\t\t\n\t\t\t\t\tpresi = resi\n\t\t\t\t\tpresn = resn\n\t\t\t\t\tmyres = Residue()\n\t\t\t\t\tmyres.name = line[17:20]\n\t\t\t\t\tmyres.file_id = resi\n\t\t\t\t\tmychain.addResidue(myres)\n\n\t\t\t\t\tif term:\n\t\t\t\t\t\tmyres.terminal = \"NTER\"\n\t\t\t\t\t\tterm = 0\n\n\t\t\t\tmyatom = Atom()\n\t\t\t\t\n\t\t\t\tif line[0:4] == \"HETA\":\n\t\t\t\t\tmyatom.kind = \"HETATM\"\n\t\t\t\telse:\n\t\t\t\t\tmyatom.kind = \"ATOM \"\n\n\t\t\t\tmyatom.name = line[12:16]\n\t\t\t\tmyatom.file_id = line[6:11]\n\t\t\t\tmyatom.local = line[29:30]\n\t\t\t\tmyatom.coord[0] = float(line[30:38])\n\t\t\t\tmyatom.coord[1] = float(line[38:46])\n\t\t\t\tmyatom.coord[2] = float(line[46:54])\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif len(line) >= 66:\n\t\t\t\t\ttmpstr = line[54:68]\n\t\t\t\t\ttmplst = tmpstr.split()\n\t\t\t\t\tmyatom.occupancy = float(tmplst[0])\n\t\t\t\t\tmyatom.bfactor = float(tmplst[1])\n\t\t\t\t\t#myatom.occupancy = float(line[54:60])\n\t\t\t\t\t#myatom.bfactor = float(line[60:66])\n\t\t\t\t\tmyatom.rest = line[66:len(line)]\n\n\t\t\t\tself.__determineElement(myatom)\n\t\t\t\tmyres.addAtom(myatom)\n\t\t\t\t\n\t\t\t\tnlines += 1\n\n\t########\tif re_bk_tot.search(line):\n\t########\t\tcols = line.split()\n\t########\t\tself.bk_tot = float(cols[1])\n\n\t########\tif re_fa_rep.search(line):\n\t########\t\tcols = line.split()\n\t########\t\tself.fa_rep = float(cols[1])\n\n\t########\tif re_fa_atr.search(line):\n\t########\t\tcols = line.split()\n\t########\t\tself.fa_atr = float(cols[1])\n\n\t\tpdb.close()",
"def __init__(self,ic,ia,informat):\n self.ped = {}\n self.pedlist = []\n self.mark = {}\n self.marklist = []\n self.sep = '\\t'\n if informat in ['Plink','plink']:\n self.ic = 6\n self.ia = 3\n self.nc = 1\n elif informat in ['DMU','dmu']:\n self.ic = 1\n self.ia = 1\n self.nc = 0\n elif not informat:\n self.ic = ic\n self.ia = ia\n self.nc = 0\n else:\n sys.stderr.write('Unknown input format: \"%s\"\\n' % informat)\n sys.exit(1)",
"def test_parse_pdb(self):\n\n pdbfile = open(self.pdbfile, 'r').read()\n\n parser = PDBParser()\n pdbdf = parser.parse_to_pandas(pdbfile)\n\n self.assertItemsEqual(pdbdf['chain'].unique(), [None])\n self.assertItemsEqual(pdbdf['segid'].unique(), ['A'])\n self.assertItemsEqual(pdbdf['resnum'].unique(), range(89, 137))\n self.assertItemsEqual(pdbdf['resname'].unique(), ['ARG', 'ALA', 'GLN', 'PRO', 'LYS', 'TYR', 'SER', 'VAL',\n 'ASP', 'GLU', 'ASN', 'GLY', 'THR', 'TRP', 'ILE', 'MET',\n 'LEU', 'PHE'])",
"def __init__(self,filePath,headerSymbols=['@','+']):\n if filePath.endswith('.gz'):\n self._file = gzip.open(filePath)\n else:\n self._file = open(filePath, 'rU')\n self._currentLineNumber = 0\n self._hdSyms = headerSymbols",
"def __init__(self,filePath,headerSymbols=['@','+']):\n if filePath.endswith('.gz'):\n self._file = gzip.open(filePath)\n else:\n self._file = open(filePath, 'rU')\n self._currentLineNumber = 0\n self._hdSyms = headerSymbols",
"def __init__(self, infile):\n txt = infile.read()\n for block in self.splitter.split(txt):\n block = block.strip()\n if block:\n term = block.splitlines()[0].strip().decode('utf8')\n defn = \"\\n\".join(line.strip() for line in block.splitlines()[1:])\n self[term] = defn.decode('utf8')",
"def ParserPDB(a):\n\tcontenu=list()\n\tmon_fichier=open(a,\"r\")\n\tfor line in mon_fichier.readlines():\n\t\tcontenu.append(line.strip()) #met le contenu du fichier pdb dans la liste \"contenu\"\n\n\tacidea=dict()\n\t\n\n\n\tfor chain in range(len(contenu)): #On parcourt cette liste contenant tout le fichier pdb\n\t\tif contenu[chain][0:5]==\"MODEL\":\n\t\t\tnewProt = contenu[chain][7:14]\n\t\t\t\n\t\t\tif newProt not in acidea.keys():\n\t\t\t\tacidea[newProt]={}\n\t\t\t\t\n\t\tif contenu[chain][0:4]==\"ATOM\": #Si la ligne commence par \"ATOM\" \n\t\t\tChaine = contenu[chain][21]\n\t\t\t\n\t\t\tif Chaine not in acidea[newProt].keys(): #Si la chaine ( A, B ... ) existe pas deja \n\t\t\t\tacidea[newProt][Chaine] = {} #creation du dictionnaire qui a pour nom les caractères a la ligne 21 ( Chaine)\n\t\t\t\n\t\t\tPosi = contenu[chain][24:26]\n\t\t\tif Posi not in acidea[newProt][Chaine].keys(): #Si la position pour une chaine n'existe pas deja (ex : -3 dans la chaine A)\n\t\t\t\tacidea[newProt][Chaine][Posi]={} # creation du dictionnaire poisition dans le dictionnaire chaine \n\t\t\t\n\t\t\tresidu = contenu[chain][12:16]\n\t\t\tif residu not in acidea[newProt][Chaine][Posi].keys(): #si le residu n'existe pas deja pour une chaine et une position donnée (ex : un CO de la chaine A a la position -3)\n\t\t\t\tacidea[newProt][Chaine][Posi][residu]= {} #Creation du dictionnaire nom de l'atome, contenu dans le dictionnaire position lui meme contenu dans le dictionnaire chaine\t\n\t\t\t\n\t\t\t#repartition de l'information dans le dictionnaire.\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"x\"] = float(contenu[chain][32:38]) #Mise des information de X dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"y\"] = float(contenu[chain][40:46]) #Mise des information de Y dans le dictionnaire atome\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"z\"] = float(contenu[chain][48:54]) #Meme chose pour Z\n\t\t\tacidea[newProt][Chaine][Posi][residu][\"Id\"] = contenu[chain][9:11] #Meme chose pour Identifiant\n\n\treturn( acidea)",
"def parse(self, input_file):\n\n # trim and filter empty lines\n input_file_trimmed = [str.strip(line)\n for line in input_file if str.strip(line)]\n\n # remove als comments (line starting with ')\n input_file_without_comments = self.__remove_comments(\n input_file_trimmed\n )\n\n # store labels + address in symboltable-dictionary\n input_file_without_labels = self.__generate_symboltable(\n input_file_without_comments\n )\n\n # generate storage dump with zeros from first to last address\n zeros = self.__generate_zeros(input_file_trimmed)\n\n # parse assembler directives, insert constants\n # and search codes startaddress\n code_address, zeros_constants = self.__parse_assembler_directives(\n input_file_without_labels, zeros)\n\n # filter assembler code\n code = self.__filter_code(input_file_without_labels)\n\n return code_address, code, zeros_constants, self.__symboltable",
"def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a history entry\n if not line[108] == '(':\n raise ParsingException\n if not line[138:139] == ')':\n raise ParsingException\n\n self.status = line[109:122].strip()\n self.time_stamp = datetime.strptime(line[122:138], '%m/%d/%Y %H:%M')"
] | [
"0.6480801",
"0.63837385",
"0.5987677",
"0.59130067",
"0.5897999",
"0.5887236",
"0.57760376",
"0.5707651",
"0.570124",
"0.5690845",
"0.5651602",
"0.56459063",
"0.56334114",
"0.5630817",
"0.56185424",
"0.56093514",
"0.558209",
"0.5569036",
"0.5566678",
"0.54970634",
"0.5462942",
"0.5408486",
"0.53785235",
"0.53714657",
"0.53334486",
"0.53334486",
"0.5321988",
"0.5317302",
"0.5309878",
"0.53027415"
] | 0.71299464 | 0 |
Initialization function which takes a list of atoms all from one chain and constructs a chain object. Importantly, the chain_local_id atom and residue level attribute is only initilized in the "context" of a chain i.e. by this initialization function, so this function actually completes atom and residue initialization. A crucial assumption is that all the atoms in atomlist come from the same chain. In fact this list is how a chain object is defined, so if the list contains atoms/residues with different .chain values, things are going to go very wrong... | def __init__(self, atomlist):
self.residues = []
current_res = atomlist[0].res_id
temp_res =[]
for i in atomlist:
if i.res_id == current_res:
temp_res.append(i)
else:
self.residues.append(PDB_residue(temp_res))
temp_res = []
current_res = i.res_id
temp_res.append(i)
self.residues.append(PDB_residue(temp_res)) # get final residue
if len(self.residues) > 0:
self.chain_name = self.residues[0].chain
else:
self.chain_name = None
chain_local_id = 1
## CHAIN LOCAL ID variables in atom and residue
## objects are set here!
##
for res in self.residues:
for atom in res.atoms:
atom.chain_local_id = chain_local_id
res.chain_local_id = chain_local_id
chain_local_id=chain_local_id+1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n\n # names of atoms that make up relevant segements of each chain\n self.chains = {'a': {'C': 'C1', 'C1': 'C2', 'C2': 'C3', 'C3': 'C4', 'C4': 'C5', 'H': 'H1', 'H1': 'H2',\n 'H2': 'H3', 'H3': 'H4', 'H4': 'H5'},\n 'b': {'C45': 'C1', 'C44': 'C2', 'C43': 'C3', 'C42': 'C4', 'C41': 'C5', 'H81': 'H1', 'H80': 'H2',\n 'H79': 'H3', 'H78': 'H4', 'H77': 'H5'}\n }\n\n self.nchains = len(list(self.chains.keys()))\n\n self.chain_numbers = {'a': 0, 'b': 1} # used to number chains\n\n # self.initial_types = {'C1': 'c2', 'C2': 'ce', 'C3': 'ce', 'C4': 'c2', 'H1': 'ha', 'H2': 'ha', 'H3': 'ha',\n # 'H4': 'ha', 'H5': 'ha'}\n\n # all indices numbered from 0. D1, D2, ... correspond to dummies attached to C1, C2, ... respectively\n self.indices = {'a': {'C1': 0, 'C2': 1, 'C3': 2, 'C4': 3, 'C5': 4, 'H1': 52, 'H2': 53, 'H3': 54, 'H4': 55,\n 'H5': 56, 'D1': 136, 'D2': 137, 'D3': 138, 'D4': 139},\n 'b': {'C1': 49, 'C2': 48, 'C3': 47, 'C4': 46, 'C5': 45, 'H1': 133, 'H2': 132, 'H3': 131,\n 'H4': 130, 'H5': 129, 'D1': 140, 'D2': 141, 'D3': 142, 'D4': 143}\n }\n\n self.dummy_connectivity = {'a': {'C': 'D1', 'C1': 'D2', 'C2': 'D3', 'C3': 'D4'},\n 'b': {'C45': 'D1', 'C44': 'D2', 'C43': 'D3', 'C42': 'D4'}}\n\n self.hydrogen_connectivity = {'C': ['H1', 'H2'], 'C1': ['H3'], 'C2': ['H4'], 'C3': ['H5'],\n 'C45': ['H1', 'H2'], 'C44': ['H3'], 'C43': ['H4'], 'C42': ['H5']}\n\n self.dummy_mass = 1.008 # mass of hydrogen\n\n # write these in order of priority\n # for efficiency, don't repeat things. For example self.carbons['C1']: self.carbons['C2'] is the same as\n # self.carbons['C2']: self.carbons['C1']. Otherwise, computational expense goes up and a new reaction has\n # to be defined below.\n self.carbons = {'C1': ['C', 'C45'], 'C2': ['C1', 'C44'], 'C3': ['C2', 'C43'], 'C4': ['C3', 'C42']}\n self.bonds_with = [[self.carbons['C1'], self.carbons['C2']]]\n\n # define which improper dihedrals to remove -- written in same order as .itp file!!!\n # note that the order of the atoms may be different for each chain\n # NOTE: C3 not tested\n self.impropers = {'a': {'C1': ['H2', 'C1', 'H1', 'C2'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']},\n 'b': {'C1': ['C2', 'H2', 'C1', 'H1'], 'C2': ['C1', 'C3', 'C2', 'H3'],\n 'C3': ['C4', 'C2', 'C3', 'H4'], 'C4': ['C5', 'C3', 'C4', 'H5']}}",
"def __init__(self,\n atoms: Union[List[Atom], Atoms, None] = None):\n self._atoms = Atoms(atoms) if atoms is not None else None",
"def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):\n # check if it's a Chain object\n if heavy_chains is None and light_chains is None and fab is None:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n # check if fab object is a list and if all object are abpytools.Fab objects\n if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):\n self._fab = fab\n self._light_chains = ChainCollection([x[0] for x in self._fab])\n self._heavy_chains = ChainCollection([x[1] for x in self._fab])\n\n if fab is None and (heavy_chains is not None and light_chains is not None):\n\n if isinstance(heavy_chains, list):\n self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)\n\n elif isinstance(heavy_chains, ChainCollection):\n self._heavy_chains = heavy_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if isinstance(light_chains, list):\n self._light_chains = ChainCollection(antibody_objects=light_chains)\n\n elif isinstance(light_chains, ChainCollection):\n self._light_chains = light_chains\n\n else:\n raise ValueError('Provide a list of Chain objects or an ChainCollection object')\n\n if len(self._light_chains.loading_status()) == 0:\n self._light_chains.load()\n\n if len(self._heavy_chains.loading_status()) == 0:\n self._heavy_chains.load()\n\n if self._light_chains.n_ab != self._heavy_chains.n_ab:\n raise ValueError('Number of heavy chains must be the same of light chains')\n\n if isinstance(names, list) and all(isinstance(name, str) for name in names):\n if len(names) == self._heavy_chains.n_ab:\n self._names = names\n else:\n raise ValueError(\n 'Length of name list must be the same as length of heavy_chains/light chains lists')\n\n elif names is None:\n self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,\n self._light_chains.names)]\n\n else:\n raise ValueError(\"Names expected a list of strings, instead got {}\".format(type(names)))\n\n self._n_ab = self._light_chains.n_ab\n self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,\n self._light_chains.sequences)]\n\n # keep the name of the heavy and light chains internally to keep everything in the right order\n self._internal_heavy_name = self._heavy_chains.names\n self._internal_light_name = self._light_chains.names",
"def parse_pdb_chain(fobj):\n\n\tcurrentres = None\n\n\tfor line in fobj:\n\t\t# Ignore non-ATOM lines\n\t\tif not line.startswith('ATOM'):\n\t\t\tcontinue\n\n\t\t# Parse line\n\t\tatom = parse_pdb_atom(line.strip())\n\n\t\t# Create a new residue if needed\n\t\tif currentres is None or atom.resSeq != currentres.seq:\n\n\t\t\tif currentres is not None:\n\t\t\t\t# Check same chain\n\t\t\t\tif atom.chainID != currentres.chainID:\n\t\t\t\t\traise ValueError('PDB files with multiple chains not supported.')\n\n\t\t\t\tyield currentres\n\n\t\t\tcurrentres = PDBResidue(atom.resName, atom.chainID, atom.resSeq, [])\n\n\t\t# Add atom to current residue\n\t\tcurrentres.atoms.append(atom)\n\n\tif currentres is not None:\n\t\tyield currentres",
"def adapt_chain(chain):\n type_chain = check_type(chain)\n name = chain.id\n if type_chain == \"nucleic_acid\":\n new_chain = Bio.PDB.Chain.Chain(name)\n chain = copy.copy(chain)\n for residue in chain:\n new_chain.add(residue.copy())\n\n for residue in new_chain:\n for atom in residue:\n if atom.id == \"C1'\":\n atom.id = \"CA\"\n residue.add(atom.copy())\n return new_chain\n else:\n return chain",
"def add_atom(atom_list, atom_name, residue_name, residue_number, x, y, z, atom_number):\n atom_list.append(Atom(atom_number, atom_name, residue_name, residue_number, x, y, z))\n return atom_list",
"def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n self.chain_id = chain_id\n\n for atm in self.iter_atoms():\n atm.set_chain_id(chain_id)",
"def set_chain_id(self, chain_id):\n assert isinstance(chain_id, str)\n for atm in self.iter_alt_loc():\n atm.chain_id = chain_id",
"def __init__(self, pair_list=None):\n\n self.plugleads = []\n if pair_list is not None:\n self.add_many(pair_list)",
"def initLChain(self):\n if self.lChain is None:\n self.lChain = {} # maps edge -> (r, L) where 0 <= edge <= edgeLen+1\n self.lChain[0] = (0, 0)\n self.lChain[(self.edgeLen+1)] = (self.rMax, self.maxL)\n for edge in range(1, self.edgeLen+1):\n r = 0.5 * edge\n l = int(math.ceil((edge * self.maxL)/ float(self.edgeLen + 1)))\n self.lChain[edge] = (r, l)",
"def set_molecules(atom_list):\n from sys import setrecursionlimit, getrecursionlimit\n # Since we use a recursive function here, we make sure that the recursion\n # limit is large enough to handle the maximum possible recursion depth we'll\n # need (NATOM). We don't want to shrink it, though, since we use list\n # comprehensions in list constructors in some places that have an implicit\n # (shallow) recursion, therefore, reducing the recursion limit too much here\n # could raise a recursion depth exceeded exception during a _Type/Atom/XList\n # creation. Therefore, set the recursion limit to the greater of the current\n # limit or the number of atoms\n setrecursionlimit(max(len(atom_list), getrecursionlimit()))\n\n # Unmark all atoms so we can track which molecule each goes into\n atom_list.unmark()\n\n # The molecule \"ownership\" list\n owner = []\n # The way I do this is via a recursive algorithm, in which\n # the \"set_owner\" method is called for each bonded partner an atom\n # has, which in turn calls set_owner for each of its partners and\n # so on until everything has been assigned.\n molecule_number = 1 # which molecule number we are on\n for i in range(len(atom_list)):\n # If this atom has not yet been \"owned\", make it the next molecule\n # However, we only increment which molecule number we're on if\n # we actually assigned a new molecule (obviously)\n if not atom_list[i].marked:\n tmp = [i]\n _set_owner(atom_list, tmp, i, molecule_number)\n # Make sure the atom indexes are sorted\n tmp.sort()\n owner.append(tmp)\n molecule_number += 1\n return owner",
"def add_chain_to_model(chain, model, atoms):\n\n if chain[\"type\"] == \"polymer\" or chain[\"type\"] == \"branched\":\n polymer = {\n \"internal_id\": chain[\"internal_id\"], \"sequence\": chain[\"sequence\"],\n \"helices\": [], \"strands\": [], \"residues\": {}\n }\n for i, group in enumerate(chain[\"groups\"], start=1):\n add_het_to_dict(group, chain, atoms, polymer[\"residues\"], number=i)\n add_ss_to_chain(polymer)\n model[\"polymer\"][chain[\"id\"]] = polymer\n else:\n for group in chain[\"groups\"]:\n add_het_to_dict(group, chain, atoms, model[chain[\"type\"]])",
"def assembleMol(self):\n\n\t\tnewMol = Molecule()\n\n\t\tfor atom in self.atomlist:\n\t\t\tres = atom.parentResidue\n\t\t\tchain = res.parentChain\n\n\t\t\tcurrChain = newMol.getChain(chain.name)\n\t\t\tif not currChain:\n\t\t\t\tcurrChain = newMol.newChain()\n\t\t\t\tcurrChain.copy(chain)\n\n\n\t\t\tcurrRes = currChain.getResidue(res.file_id)\n\t\t\tif not currRes:\n\t\t\t\tcurrRes = currChain.newResidue()\n\t\t\t\tcurrRes.copy(res)\n\n\t\t\tcurrRes.addAtom(atom)\n\n\t\treturn newMol",
"def isChainAssigned(chain):\n\n for residue in chain.residues:\n for atom in residue.atoms:\n if atom.atomSet:\n if atom.atomSet.resonanceSets:\n return True\n \n return False",
"def residues(ls):\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set",
"def _initialize_chain(self, test_dir, num_nodes, cachedir, extra_args=None, stderr=None):\n\n assert num_nodes <= MAX_NODES\n create_cache = False\n for i in range(MAX_NODES):\n if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))):\n create_cache = True\n break\n\n if create_cache:\n self.log.debug(\"Creating data directories from cached datadir\")\n\n # find and delete old cache directories if any exist\n for i in range(MAX_NODES):\n if os.path.isdir(os.path.join(cachedir, \"node\" + str(i))):\n shutil.rmtree(os.path.join(cachedir, \"node\" + str(i)))\n\n # Create cache directories, run dashds:\n set_genesis_mocktime()\n for i in range(MAX_NODES):\n datadir = initialize_datadir(cachedir, i)\n args = [os.getenv(\"DASHD\", \"dashd\"), \"-server\", \"-keypool=1\", \"-datadir=\" + datadir, \"-discover=0\", \"-mocktime=\"+str(GENESISTIME)]\n if i > 0:\n args.append(\"-connect=127.0.0.1:\" + str(p2p_port(0)))\n if extra_args is not None:\n args.extend(extra_args)\n bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr)\n self.log.debug(\"initialize_chain: dashd started, waiting for RPC to come up\")\n wait_for_bitcoind_start(bitcoind_processes[i], datadir, i)\n self.log.debug(\"initialize_chain: RPC successfully started\")\n\n self.nodes = []\n for i in range(MAX_NODES):\n try:\n self.nodes.append(get_rpc_proxy(rpc_url(get_datadir_path(cachedir, i), i), i))\n except:\n self.log.exception(\"Error connecting to node %d\" % i)\n sys.exit(1)\n\n # Create a 200-block-long chain; each of the 4 first nodes\n # gets 25 mature blocks and 25 immature.\n # Note: To preserve compatibility with older versions of\n # initialize_chain, only 4 nodes will generate coins.\n #\n # blocks are created with timestamps 10 minutes apart\n # starting from 2010 minutes in the past\n block_time = GENESISTIME\n for i in range(2):\n for peer in range(4):\n for j in range(25):\n set_node_times(self.nodes, block_time)\n self.nodes[peer].generate(1)\n block_time += 156\n # Must sync before next peer starts generating blocks\n sync_blocks(self.nodes)\n\n # Shut them down, and clean up cache directories:\n self.stop_nodes()\n self.nodes = []\n disable_mocktime()\n for i in range(MAX_NODES):\n os.remove(log_filename(cachedir, i, \"debug.log\"))\n os.remove(log_filename(cachedir, i, \"db.log\"))\n os.remove(log_filename(cachedir, i, \"peers.dat\"))\n os.remove(log_filename(cachedir, i, \"fee_estimates.dat\"))\n\n for i in range(num_nodes):\n from_dir = os.path.join(cachedir, \"node\" + str(i))\n to_dir = os.path.join(test_dir, \"node\" + str(i))\n shutil.copytree(from_dir, to_dir)\n initialize_datadir(test_dir, i) # Overwrite port/rpcport in dsah.conf",
"def __init__(self):\n self.chain = [Block.genesis()]",
"def residues(self, chain_id, model_num = 0, include_alt = False):\n resi = []\n for res in self.chain(chain_id, model_num):\n res.chain_id = chain_id\n if res.id[0] ==' ':\n resi.append(res)\n elif include_alt:\n resi.append(res)\n else:\n continue\n return resi",
"def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al",
"def init(self):\n\n logger.info(mm_chain.ackn_str)\n self.acknowledgements = mm_chain.ackn_str\n self.references = mm_chain.refs['chain']\n\n return",
"def addChain(self, chain):\n\n\t\tself.chain.append(chain)\n\t\tchain.parentMolecule = self",
"def build_chain(\n self,\n use_visualize=False,\n create_lmpdata_file=False,\n create_lmpinput_file=False,\n bondscale=1.1,\n ffield=\"Dreiding\",\n charge=\"Gasteiger\",\n cell=True,\n lammps_min=False,\n lammps_min_levels=1,\n ): # **kwargs):\n # Build the path to the sample files.\n # in_path = os.path.join(current_location, '..', 'data', 'polymod_input', 'sample_chain.txt')\n # monomer_path = os.path.join(current_location, '..', 'data', 'pdb', 'monomer', 'PAN.pdb')\n\n # Build helix\n if self.custom:\n holder, unit_vector, unit_distance = self.build_helix()\n\n else:\n holder = self.build_helix()\n\n # Calculate unit distance and unit vector\n unit, unit_vector, unit_distance = chain_periodicity(\n holder, self.num_monomers, self.unit_num_monomers\n )\n\n # Correct chain orientation\n holder = correct_chain_orientation(holder, unit_vector)\n\n # Create infinite periodic polymer helix chain\n if self.infinite:\n if not self.custom:\n holder = create_infinite_chain(holder, self.num_monomers)\n\n # Correct chain position\n holder = correct_chain_position(holder, cell)\n\n if self.custom:\n helix_name = (\n self.polymer_type.name + \"_custom\" + (\"_inf\" if self.infinite else \"\")\n )\n\n else:\n # Further correct chain position for infinite chain\n if self.infinite:\n holder = correct_infinite_chain_position(holder)\n\n helix_name = self.polymer_type.name + \"_helix_%s%s%s%s%s\" % (\n self.helice,\n \"_\" + self.tacticity[:3] if self.tacticity else \"\",\n \"+\"\n if self.chiriality == \"right\"\n else \"-\"\n if self.chiriality == \"left\"\n else \"\",\n \"_custom\" if self.head_tail_defect_ratio else \"\",\n \"_inf\" if self.infinite else \"\",\n )\n\n self.helix_name = helix_name\n write_pdb(helix_name + \".pdb\", holder.el_names, holder.pos) # noqa: F405\n\n if cell:\n self.built = 1\n else:\n self.built = 0\n self.holder = holder\n\n maxi_array = get_maximum_position(holder.pos)\n self.x = maxi_array[0] if cell else maxi_array[0] + 15\n self.y = maxi_array[1] if cell else maxi_array[1] + 15\n if self.infinite:\n self.z = unit_distance * (self.num_monomers - 2) / self.unit_num_monomers\n else:\n self.z = maxi_array[2] if cell else maxi_array[2] + 15\n self.alpha = 90\n self.beta = 90\n self.gamma = 90\n\n # use_visualize = False\n # create_lmpdata_file = False\n # bondscale = 1.1\n # ffield = \"Dreiding\"\n # charge = \"Gasteiger\"\n # create_lmpinput_file = False\n\n # for key in kwargs:\n # if key == \"use_visualize\":\n # use_visualize = kwargs[\"use_visualize\"]\n # elif key == \"create_lmpdata_file\":\n # create_lmpdata_file = kwargs[\"create_lmpdata_file\"]\n # elif key == \"bondscale\":\n # bondscale = kwargs[\"bondscale\"]\n # elif key == \"ffield\":\n # ffield = kwargs[\"ffield\"]\n # elif key == \"charge\":\n # charge = kwargs[\"charge\"]\n # elif key == \"create_lmpinput_file\":\n # create_lmpinput_file = kwargs[\"create_lmpinput_file\"]\n # else:\n # raise KeyError(\n # \"Unknown input %s for build_chain function\\n Please see help for more information\"\n # % key\n # )\n\n # Create LAMMPS data file\n if create_lmpdata_file:\n Create_Data_File(\n helix_name + \".pdb\",\n bondscale=bondscale,\n ffield=ffield,\n charge=charge,\n xhi=self.x,\n yhi=self.y,\n zhi=self.z,\n alpha=self.alpha,\n beta=self.beta,\n gamma=self.gamma,\n outputName=helix_name,\n )\n\n # Create LAMMPS input file\n if create_lmpinput_file:\n write_lmp_ifile( # noqa: F405\n ffield=ffield,\n datafile=helix_name + \".data\",\n potentialfile=\"X6paircoeffs.txt\",\n lammps_min=lammps_min,\n lammps_min_levels=lammps_min_levels,\n )\n\n # View chain structure\n if use_visualize:\n write_pdb( # noqa: F405\n helix_name + \"_view.pdb\", holder.el_names, holder.pos, connect=False\n )\n # if create_lmpdata_file:\n # ovito_view(\n # helix_name + \".data\", helix_name + \"_Front.png\", view=\"Front\"\n # )\n # ovito_view(helix_name + \".data\", helix_name + \"_Top.png\", view=\"Top\")\n # else:\n # write_pdb(f\"{helix_name}_ovito.pdb\", h.el_names, h.pos, connect=False)\n ovito_view(\n helix_name + \"_view.pdb\", helix_name + \"_Front.png\", view=\"Front\"\n )\n ovito_view(helix_name + \"_view.pdb\", helix_name + \"_Top.png\", view=\"Top\")\n\n return helix_name",
"def chain_new(ctx, chain_name):\n project = ctx.obj['PROJECT']\n new_local_chain(project.project_dir, chain_name)",
"def get_atoms_list(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n atom_id = \"CA\"\n elif type_chain == \"nucleic_acid\":\n atom_id = \"P\"\n atoms = chain.get_atoms()\n atoms_list = []\n for atom in atoms:\n if atom.id == atom_id:\n atoms_list.append(atom)\n return atoms_list",
"def __init__(self, call_chain=None):\n if call_chain is None:\n call_chain = []\n self._call_chain = call_chain",
"def genChains(self):\n self.numMonomer = 0\n self.numBonds = 0\n self.numMols = 0\n self.numCations = 0\n self.numAnions = 0\n\n self.atomsCoords = []\n self.atomsType = []\n self.atomsCharge = []\n self.molId = []\n self.bondList = []\n \n for i in range(self.numPa + self.numPc):\n\n if i < self.numPc:\n # polycation chains, charge in LJ units of LAMMPS\n # electron charge would be 10.54 using bare LAMMPS LJ units\n # the dielectric constans of solvent is effectively taken as 111 when assign 1 to +e\n # just need to set dielectric as 0.72 in LAMMPS ot mimic water with dielectric constant 80\n self.beadCharge = 1\n self.beadType = 1 # atomic type for neutral beads in polycation chains\n self.chain = self.lenPc\n else:\n self.beadCharge = -1 # polyanion chains\n self.beadType = 3 # atomic type for neutral beads in polyanion chains\n self.chain = self.lenPa\n\n self.numMols += 1\n\n # generate the first bead of each chain randomly\n self.numMonomer += 1\n self.cxyz = np.random.rand(3) * self.box + self.lxyz\n\n self.atomsCoords.append(self.cxyz)\n #self.atomsType.append(self.beadType)\n\n # decide if the first bead is charged or not\n if self.chargeRepeat == 1:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsType.append(self.beadType)\n self.atomsCharge.append(0)\n\n self.molId.append(self.numMols)\n\n self.currpxyz = self.cxyz\n\n # follow random walk to generate the chain\n # generate the seconb bead of the chain\n self.theta, self.phi = np.random.rand(2) * np.array([np.pi, 2 * np.pi])\n self.ds = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n # decide if the second bead is charged or not\n if 2%self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n \n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = self.theta\n self.currphi = self.phi\n\n self.dstot += self.ds\n\n # generating the rest beads of the chain\n\n for k in range(3, self.chain+1):\n # only accept atoms that are beyong certain distance\n # from the atom precding the current atom in the chain\n self.theta, self.phi = np.random.rand() * np.array([np.pi - self.stiffangle, \\\n 2 * np.pi])\n self.ds1 = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.reverseXZrotation()\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n if k % self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = np.arccos(self.ds[0]/self.segment)\n if self.ds[2] > 0:\n self.currphi = np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n else:\n self.currphi = 2*np.pi - np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n\n self.dstot += self.ds\n\n print \"%d beads are generated.\\n\" % self.numMonomer \n assert self.numMonomer == self.numPc * self.lenPc + self.numPa * self.lenPa, \\\n \"The number of monomers in chains is wrong!\\n\"\n assert self.numCations == int(np.floor(self.lenPc * self.chargeFraction)*self.numPc), \\\n \"The number of positively charged beads is wrong!\\n\"\n assert self.numAnions == int(np.floor(self.lenPa * self.chargeFraction)*self.numPa), \\\n \"The number of negatively charged beads is wrong!\\n\"",
"def __init__(self, pdb_line, pdbstructure=None, extraParticleIdentifier='EP'):\n # We might modify first/final status during _finalize() methods\n self.is_first_atom_in_chain = False\n self.is_final_atom_in_chain = False\n self.is_first_residue_in_chain = False\n self.is_final_residue_in_chain = False\n # Start parsing fields from pdb line\n self.record_name = pdb_line[0:6].strip()\n try:\n self.serial_number = _parse_atom_index(pdb_line[6:11])\n except:\n # Just give it the next number in sequence.\n self.serial_number = pdbstructure._next_atom_number\n self.name_with_spaces = pdb_line[12:16]\n alternate_location_indicator = pdb_line[16]\n\n self.residue_name_with_spaces = pdb_line[17:20]\n # In some MD codes, notably ffamber in gromacs, residue name has a fourth character in\n # column 21\n possible_fourth_character = pdb_line[20:21]\n if possible_fourth_character != \" \":\n # Fourth character should only be there if official 3 are already full\n if len(self.residue_name_with_spaces.strip()) != 3:\n raise ValueError('Misaligned residue name: %s' % pdb_line)\n self.residue_name_with_spaces += possible_fourth_character\n self.residue_name = self.residue_name_with_spaces.strip()\n\n self.chain_id = pdb_line[21]\n try:\n self.residue_number = int(pdb_line[22:26])\n except:\n try:\n self.residue_number = int(pdb_line[22:26], 16) - 0xA000 + 10000\n except:\n # When VMD runs out of hex values it starts filling the residue ID field with ****.\n # Look at the most recent atoms to figure out whether this is a new residue or not.\n if pdbstructure._current_model is None or pdbstructure._current_model._current_chain is None or pdbstructure._current_model._current_chain._current_residue is None:\n # This is the first residue in the model.\n self.residue_number = pdbstructure._next_residue_number\n else:\n currentRes = pdbstructure._current_model._current_chain._current_residue\n if currentRes.name_with_spaces != self.residue_name_with_spaces:\n # The residue name has changed.\n self.residue_number = pdbstructure._next_residue_number\n elif self.name_with_spaces in currentRes.atoms_by_name:\n # There is already an atom with this name.\n self.residue_number = pdbstructure._next_residue_number\n else:\n self.residue_number = currentRes.number\n self.insertion_code = pdb_line[26]\n # coordinates, occupancy, and temperature factor belong in Atom.Location object\n x = float(pdb_line[30:38])\n y = float(pdb_line[38:46])\n z = float(pdb_line[46:54])\n try:\n occupancy = float(pdb_line[54:60])\n except:\n occupancy = 1.0\n try:\n temperature_factor = unit.Quantity(float(pdb_line[60:66]), unit.angstroms**2)\n except:\n temperature_factor = unit.Quantity(0.0, unit.angstroms**2)\n self.locations = {}\n loc = Atom.Location(alternate_location_indicator, unit.Quantity(Vec3(x,y,z), unit.angstroms), occupancy, temperature_factor, self.residue_name_with_spaces)\n self.locations[alternate_location_indicator] = loc\n self.default_location_id = alternate_location_indicator\n # segment id, element_symbol, and formal_charge are not always present\n self.segment_id = pdb_line[72:76].strip()\n self.element_symbol = pdb_line[76:78].strip()\n try: self.formal_charge = int(pdb_line[78:80])\n except ValueError: self.formal_charge = None\n # figure out atom element\n if self.element_symbol == extraParticleIdentifier:\n self.element = 'EP'\n else:\n try:\n # Try to find a sensible element symbol from columns 76-77\n self.element = element.get_by_symbol(self.element_symbol)\n except KeyError:\n self.element = None\n if pdbstructure is not None:\n pdbstructure._next_atom_number = self.serial_number+1\n pdbstructure._next_residue_number = self.residue_number+1",
"def get_seq_from_pdbchain(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n three_res_list = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'CA':\n residue = atom.get_parent()\n three_res_list.append(residue.get_resname())\n return three_to_one(three_res_list) # three_to_one function\n else:\n nucleic_acid_res = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'P':\n residue = atom.get_parent()\n nucleic_acid_res.append(residue.get_resname())\n nucleic_acid_seq = [x[2] for x in nucleic_acid_res]\n return \"\".join(nucleic_acid_seq)",
"def test_multi_arg_init(self):\n sch = scheme.Scheme(\n 'item-1',\n 'item-2',\n 'item-3'\n )\n\n assert len(sch.args) == 3\n assert sch._flat is None",
"def chainsetup(filename, cation, facets, operation, end_radii, nradii,\n adensity):\n\n # Load the Cage from the file\n try:\n # If that fails, try other file formats supported by pymatgen\n anion = Cage.from_file(filename)\n except ValueError:\n # If that fails, try the VASP POSCAR format\n anion = Cage.from_poscar(filename)\n\n # Center the anion around the origin\n anion.center()\n\n # Find the chain edges, i.e. the paths between the edge sharing facets of\n # the chain of non-equivalent facets.\n anion.find_surface_facets(ignore=IGNORE)\n\n if not facets == tuple:\n chosen_facets = [anion.facets[index] for index in facets]\n edges = anion.find_noneq_chain_links(chosen_facets)\n else:\n edges = anion.find_noneq_chain_links()\n\n total_mol = anion.copy()\n\n chain_dir = 'chain_' + operation\n try:\n os.mkdir(chain_dir)\n except FileExistsError:\n pass\n\n # For each edge, set up the calculation input files\n edge_number = 1\n\n for edge in edges:\n\n # Set up the edge directory\n edge_dir = os.path.join(chain_dir, \"edge\" + str(edge_number))\n\n while os.path.exists(edge_dir):\n edge_number += 1\n edge_dir = os.path.join(chain_dir, \"edge\" + str(edge_number))\n\n os.mkdir(edge_dir)\n\n # Write out the molecule and path facets to the edge directory\n anion.to(fmt=\"json\", filename=os.path.join(edge_dir, \"molecule.json\"))\n edge[0].to(fmt=\"json\", filename=os.path.join(edge_dir,\n \"init_facet.json\"))\n edge[1].to(fmt=\"json\", filename=os.path.join(edge_dir,\n \"final_facet.json\"))\n\n # Get copies so the originals aren't mutated\n edge_mol = anion.copy()\n facet1 = edge[0].copy()\n facet2 = edge[1].copy()\n\n if edge == edges[-1]:\n remove_endline = False\n else:\n remove_endline = True\n\n # Set up the landscape\n landscape = set_up_edge_landscape(facet1, facet2,\n endpoint_radii=end_radii,\n number_of_radii=nradii,\n angle_density=adensity,\n remove_endline=remove_endline)\n\n # Get the molecule for each landscape point\n molecules = set_up_molecules(edge_mol, landscape, cation)\n\n # Set up an xyz file to visualize the edge and total landscape\n for point in landscape.points:\n try:\n total_mol.append(pmg.Specie(cation, 1), point,\n validate_proximity=False)\n edge_mol.append(pmg.Specie(cation, 1), point,\n validate_proximity=False)\n except ValueError:\n pass\n\n edge_mol.to(fmt=\"xyz\", filename=os.path.join(edge_dir, \"edge.xyz\"))\n\n # In case the molecules must be optimized, add the constraints and\n # optimization setup (DRIVER)\n if operation == \"optimize\":\n far_facet = anion.find_farthest_facet(landscape.center)\n constraints = find_constraints(anion, far_facet.sites)\n constraints['fix atom'] += ' ' + str(len(anion.sites) + 1)\n ALT_SETUP['constraints'] = constraints\n ALT_SETUP[\"driver\"] = DRIVER_SETUP\n\n # Set up the task for the calculations\n tasks = [nwchem.NwTask(molecules[0].charge, None, BASIS,\n theory=\"dft\",\n operation=operation,\n theory_directives=THEORY_SETUP,\n alternate_directives=ALT_SETUP)]\n\n # Set up the input files\n study = Study(molecules, tasks)\n study.set_up_input(edge_dir, sort_comp=False,\n geometry_options=GEO_SETUP)\n\n edge_number += 1\n\n # Set up an xyz file with all the paths\n total_mol.to(fmt=\"xyz\", filename=os.path.join(chain_dir, \"total_mol.xyz\"))"
] | [
"0.55998135",
"0.5518003",
"0.5513605",
"0.54702634",
"0.54258364",
"0.5373895",
"0.53632593",
"0.53512615",
"0.53424484",
"0.5340515",
"0.5302385",
"0.5292807",
"0.525962",
"0.52405804",
"0.5200973",
"0.5157682",
"0.51401025",
"0.5090289",
"0.5086073",
"0.50783587",
"0.5050562",
"0.49951208",
"0.4989453",
"0.49398938",
"0.49385545",
"0.49232262",
"0.49117008",
"0.48678666",
"0.48676574",
"0.48617864"
] | 0.7775325 | 0 |
Function which returns the residue from a chain. If chainLocal = False (default) the global resID is used to search for a residue. If, on the other hand, chainLocal is set to true, then the resid local to that chain is used. chainLocal ID values ALWAYS start from 1 for the first residue in a chain and increment by one through the chain. They provide an easy internal way to get (for example) the first and last residue in the chain explicitly, or comparative residues when dealing with many identical species. | def get_residue(self, resid, chainLocal=False):
try:
resid = int(resid)
except ValueError, e:
print "ERROR: get_resid() requires a numeric value"
raise e
if type(resid) == int:
if chainLocal:
for res in self.residues:
if res.chain_local_id == resid:
return res
else:
for res in self.residues:
if res.res_id == resid:
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getResidue(self, resi):\n\n\t\tif self.numChains == 0:\n\t\t\tprint \"WARNING: Molecule has no chains\"\n\t\t\treturn None\n\n\t\tresi = int(resi)\n\t\tfor chn in self.chain:\n\t\t\tfor res in chn.residue:\n\t\t\t\tif int(res.file_id) == resi:\n\t\t\t\t\treturn res\n\n\t\treturn None",
"def get_residue(self, resnum, chain):\n res_chain = self.get_chain(chain)\n residue = []\n for line in res_chain.split(\"\\n\"):\n if str(resnum) == str(line[22:26].strip()):\n residue.append(line)\n return \"\\n\".join(residue)",
"def residue(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n res = self.struct[model_num][chain_id][(alt, resnum, icode)]\n res.chain_id = chain_id\n return res",
"def get_residue(self, chainid, resid) :\n if not self._res_dict :\n d = {}\n for r in self.residues :\n d[ (r.chainid, r.resid)] = r\n self._res_dict =d\n \n return self._res_dict[(chainid, resid)]",
"def chain_serial(self):\n return self.structure.chain_serial[self.mask]",
"def check_residue(self, chain_id, resid, resname):\n if resname in PTM_lookup.keys():\n return\n else:\n try:\n ref_resname = PTM_reverse_lookup[resname]\n except KeyError:\n # print \"skipping unrecognized residue\", resname\n return\n ptm_dict = PTM_lookup[ref_resname][resname]\n return (chain_id, resid, ref_resname, ptm_dict[\"goto_atom\"], ptm_dict[\"name\"])",
"def getResonanceResidue(resonance):\n\n residue = None\n if resonance.resonanceSet:\n residue = resonance.resonanceSet.findFirstAtomSet().findFirstAtom().residue\n \n return residue",
"def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None",
"def residues(self, chain_id, model_num = 0, include_alt = False):\n resi = []\n for res in self.chain(chain_id, model_num):\n res.chain_id = chain_id\n if res.id[0] ==' ':\n resi.append(res)\n elif include_alt:\n resi.append(res)\n else:\n continue\n return resi",
"def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]",
"def define_residue_order(self, chainID, resID, atomOrder):\n\n chain = self.chains[chainID]\n residue = chain.get_residue(resID, chainLocal=True)\n residue.set_residue_order(atomOrder)",
"def getResidue(self, resname):\n if self.hasResidue(resname):\n return self.map[resname]\n else:\n return None",
"def _side_chain_representative(self, residue_id):\n\n # Get biopython residue object\n residue = self._residue_from_residue_id(residue_id)\n if residue is None:\n return None\n\n # Get residue name\n residue_name = residue.get_resname()\n\n # Convert non-standard amino acids if applicable\n if residue_name not in STANDARD_AMINO_ACIDS:\n try:\n residue_name = NON_STANDARD_AMINO_ACID_CONVERSION[residue_name]\n except KeyError:\n return None\n\n # Get side chain representative\n try:\n atom_name = SIDE_CHAIN_REPRESENTATIVE[residue_name]\n atom = residue[atom_name]\n return atom\n except KeyError:\n return None",
"def __init__(self, atomlist):\n\n self.residues = []\n\n current_res = atomlist[0].res_id\n\n temp_res =[]\n\n for i in atomlist:\n if i.res_id == current_res:\n temp_res.append(i)\n else:\n self.residues.append(PDB_residue(temp_res))\n temp_res = []\n current_res = i.res_id\n temp_res.append(i)\n \n \n\n self.residues.append(PDB_residue(temp_res)) # get final residue\n\n if len(self.residues) > 0: \n self.chain_name = self.residues[0].chain\n else:\n self.chain_name = None\n \n chain_local_id = 1 \n\n ## CHAIN LOCAL ID variables in atom and residue\n ## objects are set here!\n ##\n for res in self.residues:\n for atom in res.atoms:\n atom.chain_local_id = chain_local_id\n res.chain_local_id = chain_local_id\n chain_local_id=chain_local_id+1",
"def get_seq_from_pdbchain(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n three_res_list = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'CA':\n residue = atom.get_parent()\n three_res_list.append(residue.get_resname())\n return three_to_one(three_res_list) # three_to_one function\n else:\n nucleic_acid_res = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'P':\n residue = atom.get_parent()\n nucleic_acid_res.append(residue.get_resname())\n nucleic_acid_seq = [x[2] for x in nucleic_acid_res]\n return \"\".join(nucleic_acid_seq)",
"def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None",
"def _residue_from_residue_id(self, residue_id):\n\n residues = list(self._data_complex.get_residues())\n # Select residue of interest\n # residue.get_id()[2] == \" \" makes sure that only the first residue is selected in case\n # there are residues with insertion codes for the residue of interest\n residue = [\n residue\n for residue in residues\n if (residue.get_id()[1] == residue_id) and (residue.get_id()[2] == \" \")\n ]\n\n if len(residue) == 1:\n return residue[0]\n elif len(residue) == 0:\n return None\n else:\n residue_ids = [residue.get_id()[1] for residue in residues]\n residue_ids_duplicated = [\n item for item, count in collections.Counter(residue_ids).items() if count > 1\n ]\n raise KeyError(\n f\"{len(residue)} residues were found, but must be 1 or 0. \"\n f\"Duplicated residue serial number(s): {residue_ids_duplicated}\"\n )",
"def getResidue(self, resnum, icode=None):\n\n return self._hv.getResidue(self.getChid(), resnum, icode,\n self.getSegname())",
"def get_chain_name (chain):\n if \"-\" in chain.id:\n id_chain=chain.id[-1]\n else:\n id_chain=chain.id\n return id_chain",
"def residues(ls):\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set",
"def get_chain(self, chain_id):\n if self.default_model is None:\n return None\n if self.default_model.chain_dict.has_key(chain_id):\n return self.default_model.chain_dict[chain_id]\n return None",
"def get_chain1(self):\n return self.atom1.fragment.chain",
"def getResiduesByName(self, resn):\n\n\t\treslist = []\n\t\tfor chn in self.chain:\n\t\t\tfor res in chn.residue:\n\t\t\t\tif res.name == resn:\n\t\t\t\t\treslist.append(res)\n\n\t\treturn reslist",
"def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]",
"def isChainAssigned(chain):\n\n for residue in chain.residues:\n for atom in residue.atoms:\n if atom.atomSet:\n if atom.atomSet.resonanceSets:\n return True\n \n return False",
"def get_next_residue(self):\n rawline = self.f.readline()\n while rawline:\n rematch = self.reslinere.match(rawline)\n if rematch:\n return (rematch.groups()[0], int(rematch.groups()[1]),\n float(rematch.groups()[2]), float(rematch.groups()[3]),\n float(rematch.groups()[4]), int(rematch.groups()[5]))\n # If we make it to a blank line, keep skipping forward until we hit\n # another CUMULATIVE record\n elif not rawline.strip():\n rematch2 = self.my_re.match(rawline)\n while not rematch2:\n rawline = self.f.readline()\n # Check if we hit EOF\n if not rawline: return None\n rematch2 = self.my_re.match(rawline)\n # end while not rematch2\n rawline = self.f.readline()\n # If we hit here, we are out of lines, or something\n return None",
"def residue(res_name):\n _init_dataset()\n array_dict = _residues[res_name]\n\n array = AtomArray(len(array_dict[\"res_name\"]))\n\n array.res_name = array_dict[\"res_name\"]\n array.atom_name = array_dict[\"atom_name\"]\n array.element = array_dict[\"element\"]\n array.charge = array_dict[\"charge\"]\n array.hetero = array_dict[\"hetero\"]\n\n array.coord[:,0] = array_dict[\"coord_x\"]\n array.coord[:,1] = array_dict[\"coord_y\"]\n array.coord[:,2] = array_dict[\"coord_z\"]\n\n array.bonds = BondList(\n array.array_length(),\n bonds = np.stack([\n array_dict[\"bond_i\"],\n array_dict[\"bond_j\"],\n array_dict[\"bond_type\"]\n ]).T\n )\n\n return array",
"def get_chain_refined(chain_original, chain_pattern):\n new_chain = Bio.PDB.Chain.Chain('X')\n\n for residue, pattern in zip(chain_original.get_residues(), chain_pattern):\n if pattern == 1:\n new_chain.add(residue.copy())\n return new_chain",
"def get_chain(self, chain):\n if not self.atom_section:\n self.read_atoms_section()\n chain_lines = []\n for at_line in self.atom_section:\n if at_line[21:22] == chain:\n chain_lines.append(at_line)\n return \"\".join(chain_lines)",
"def selectResN(self):\n\n\t\tif len(self.resn) == 0:\n\t\t\treturn\n\n\t\ttmplist = []\n\t\tfor atom in self.atomlist:\n\t\t\tfound = False\n\t\t\tfor resn in self.resn:\n\t\t\t\tif atom.parentResidue.name == resn:\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\n\t\t\tif found and not self.invresn:\n\t\t\t\ttmplist.append(atom)\n\t\t\tif not found and self.invresn:\n\t\t\t\ttmplist.append(atom)\n\n\t\tself.atomlist = tmplist"
] | [
"0.668893",
"0.6529663",
"0.61339056",
"0.59747255",
"0.5585247",
"0.5462381",
"0.54249173",
"0.53624696",
"0.53349656",
"0.5316846",
"0.5260693",
"0.5238058",
"0.51962155",
"0.51552856",
"0.5132912",
"0.51258445",
"0.5090824",
"0.5040737",
"0.5038784",
"0.50335884",
"0.4956886",
"0.4946355",
"0.4843393",
"0.48403767",
"0.48386288",
"0.48090112",
"0.48004666",
"0.47858927",
"0.4785203",
"0.47345132"
] | 0.82316744 | 0 |
Function where you can specify the atom order for a residue. Useful because different forcefields have annoying defaults for how residues are parsed which occasionally differ, so this provides an easy way to define the order and rewrite. | def define_residue_order(self, chainID, resID, atomOrder):
chain = self.chains[chainID]
residue = chain.get_residue(resID, chainLocal=True)
residue.set_residue_order(atomOrder) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reorder(self, new_order):\n #TODO doesn't work probably CRA 3/2019\n for field in [\"atoms\", \"xyz\"]:\n self.__dict__[field] = self.__dict__[field][list(new_order)]\n self.atoms = [self.atoms[i] for i in new_order]",
"def setOrder(self, verbose = 1):\n\n self.order = np.arange(self.atoms.shape[0])\n if verbose > 0:\n string = \"Updated the saved order\"\n ut.infoPrint(string)",
"def set_bond_order(molecule, bond_index, bond_order):\n return molecule.SetBondOrder(bond_index, bond_order)",
"def setOrder(self, *args):\n return _libsbml.CompartmentGlyph_setOrder(self, *args)",
"def test_order_atoms(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n converter.order_atoms(ref_mol=mol1, mol=mol2)\n for atom1, atom2 in zip(mol1.atoms, mol2.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n converter.order_atoms(ref_mol=mol3, mol=mol1)\n for atom1, atom2 in zip(mol3.atoms, mol1.atoms):\n self.assertEqual(atom1.symbol, atom2.symbol)\n\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n for mol in mol_list:\n converter.order_atoms(ref_mol=ref_mol, mol=mol)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols",
"def __resequence(self):\n if self.__reltype_ordering:\n def reltype_key(rel):\n reltype = rel._reltype\n if reltype in self.__reltype_ordering:\n return self.__reltype_ordering.index(reltype)\n return len(self.__reltype_ordering)\n\n def partname_idx_key(rel):\n partname = util.Partname(rel._target.partname)\n if partname.idx is None:\n return 0\n return partname.idx\n self._values.sort(key=lambda rel: partname_idx_key(rel))\n self._values.sort(key=lambda rel: reltype_key(rel))\n # renumber consistent with new sort order\n for idx, relationship in enumerate(self._values):\n relationship._rId = 'rId%d' % (idx+1)\n else:\n self._values.sort(key=lambda rel: rel._num)",
"def XmlFieldOrder(self) -> XmlFieldOrderOption:",
"def test_parameterize_ethanol_different_reference_ordering_rdkit(self, force_field):\n toolkit_registry = ToolkitRegistry(\n toolkit_precedence=[RDKitToolkitWrapper, AmberToolsToolkitWrapper]\n )\n pdbfile = app.PDBFile(get_data_file_path(\"systems/test_systems/1_ethanol.pdb\"))\n\n # Load the unique molecules with one atom ordering\n molecules1 = [Molecule.from_file(get_data_file_path(\"molecules/ethanol.sdf\"))]\n topology1 = Topology.from_openmm(\n pdbfile.topology,\n unique_molecules=molecules1,\n )\n\n omm_system1 = force_field.create_openmm_system(\n topology1,\n toolkit_registry=toolkit_registry,\n )\n\n # Load the unique molecules with a different atom ordering\n molecules2 = [\n Molecule.from_file(get_data_file_path(\"molecules/ethanol_reordered.sdf\"))\n ]\n topology2 = Topology.from_openmm(\n pdbfile.topology,\n unique_molecules=molecules2,\n )\n omm_system2 = force_field.create_openmm_system(\n topology2,\n toolkit_registry=toolkit_registry,\n )\n\n serialized_1 = XmlSerializer.serialize(omm_system1)\n serialized_2 = XmlSerializer.serialize(omm_system2)\n\n serialized_1 = round_charge(serialized_1)\n serialized_2 = round_charge(serialized_2)\n\n assert serialized_1 == serialized_2",
"def test_canonical_ordering_rdkit(self):\n from openforcefield.utils.toolkits import RDKitToolkitWrapper\n\n rdkit = RDKitToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(rdkit)\n # make sure the mapping between the ethanol and the rdkit ref canonical form is the same\n assert (\n True,\n {0: 2, 1: 0, 2: 1, 3: 8, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)",
"def canonical_order(match):\n\n # match[0][0:3] contains the ID numbers of the 4 atoms in the match\n atom0 = match[0][0]\n atom1 = match[0][1]\n atom2 = match[0][2]\n atom3 = match[0][3]\n # match[1][0:2] contains the ID numbers of the the 3 bonds\n bond0 = match[1][0]\n bond1 = match[1][1]\n bond2 = match[1][2]\n if atom0 < atom3:\n # return ((atom0, atom1, atom2, atom3), (bond0, bond1, bond2)) same\n # as:\n return match\n else:\n return ((atom3, atom2, atom1, atom0), (bond2, bond1, bond0))",
"def test_canonical_ordering_rdkit(self):\n from openff.toolkit.utils.toolkits import RDKitToolkitWrapper\n\n rdkit = RDKitToolkitWrapper()\n # get ethanol in canonical order\n ethanol = create_ethanol()\n # get reversed non canonical ethanol\n reversed_ethanol = create_reversed_ethanol()\n # get the canonical ordering\n canonical_ethanol = reversed_ethanol.canonical_order_atoms(rdkit)\n # make sure the mapping between the ethanol and the rdkit ref canonical form is the same\n assert (\n True,\n {0: 2, 1: 0, 2: 1, 3: 8, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7},\n ) == Molecule.are_isomorphic(canonical_ethanol, ethanol, True)",
"def get_symbol_name_order(gdbval):\n return (symtab_node_name (gdbval), int(gdbval[\"order\"]))",
"def order_ideal(self, gens):",
"def _reorder(self, order):\n if len(order) == 1 and isinstance(order[0], (tuple, list)):\n order = order[0]\n if 0 in order:\n assert order[0] == 0, \"zero cannot be reordered\"\n else:\n order = [0] + list(order)\n assert len(order) == self._.d + 1, \"wrong number of indices\"\n assert set(order) == set(range(self._.d + 1)), \\\n \"repeating or nonexisting indices\"\n return tuple(order)",
"def get_bond_order(bond_type):\n if bond_type == 'single':\n return 1\n elif bond_type == 'aromatic':\n return 1.5\n elif bond_type == 'double':\n return 2\n elif bond_type == 'triple':\n return 3\n else:\n raise ValueError(f'Unexpected bond type: {bond_type.upper()}')",
"def test_order_atoms_in_mol_list(self):\n ref_mol = Molecule(smiles='[CH](CC[CH]c1ccccc1)c1ccccc1')\n mol_list = ref_mol.copy(deep=True).generate_resonance_structures(keep_isomorphic=False, filter_structures=True)\n success = converter.order_atoms_in_mol_list(ref_mol=ref_mol, mol_list=mol_list)\n self.assertTrue(success)\n bond_dict = dict()\n for index1, atom1 in enumerate(ref_mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = ref_mol.atoms.index(atom2)\n if index1 < index2:\n if index1 not in bond_dict:\n bond_dict[index1] = [index2]\n else:\n bond_dict[index1].append(index2)\n for mol in mol_list:\n for index1, atom1 in enumerate(mol.atoms):\n for atom2 in atom1.edges.keys():\n index2 = mol.atoms.index(atom2)\n if index1 < index2:\n self.assertIn(index2, bond_dict[index1]) # check that these atoms are connected in all mols",
"def atomIndex(molecule):\n for i in range(len(molecule.index) - 1):\n start = molecule.index[i]\n end = molecule.index[i+1]\n if end - start > 1:\n for I in range(start, end):\n element = molecule.type_list[I] + str(I)\n molecule.setAtoms(I, element = element)\n molecule.sort()\n return molecule",
"def getAmberParams(residue, name):\n atomname = name\n type = residue.get(\"type\")\n if type == 4:\n resname = residue.get(\"naname\")\n else:\n resname = residue.get(\"name\")\n\n # Residue Substitutions\n\n if residue.get(\"name\") == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYX\"\n elif residue.get(\"name\") == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HIP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIE\"\n else:\n resname = \"HID\" # Default for no hydrogens\n elif residue.get(\"name\") == \"HSP\":\n resname = \"HIP\"\n elif residue.get(\"name\") == \"HSE\":\n resname = \"HIE\"\n elif residue.get(\"name\") == \"HSD\":\n resname = \"HID\"\n elif residue.get(\"name\") == \"GLU\" or residue.get(\"name\") == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GLH\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GLH\"\n elif residue.get(\"name\") == \"ASP\" or residue.get(\"name\") == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"ASH\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"ASH\"\n\n if residue.get(\"isCterm\"):\n resname = \"C\" + resname\n elif residue.get(\"isNterm\"):\n resname = \"N\" + resname\n\n # Atom Substitutions\n\n if resname == \"WAT\":\n if atomname == \"O\":\n atomname = \"OW\"\n elif atomname == \"H1\":\n atomname = \"HW\"\n elif atomname == \"H2\":\n atomname = \"HW\"\n elif resname == \"ILE\":\n if atomname == \"CD\": atomname = \"CD1\"\n if resname[0] == \"N\" and resname != \"NME\": # N-terminal\n if atomname == \"H\": atomname = \"H1\"\n if (resname == \"CCYS\" or resname == \"NCYS\") and atomname == \"HG\": atomname = \"HSG\"\n if resname == \"CYM\" and atomname == \"H\": atomname = \"HN\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN2\":\n atomname = \"H2\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN1\":\n atomname = \"H3\"\n return resname, atomname",
"def makeResidueAtomSets(residue, aromaticsEquivalent=True):\n \n getResidueMapping(residue)\n \n equivalent = {}\n elementSymbolDict = {}\n nonequivalent = {}\n multiSet = {}\n chemAtomSetDict = {}\n inMultiSet = {}\n molType = residue.molResidue.molType\n \n for atom in residue.atoms: \n chemAtom = atom.chemAtom\n chemAtomSetDict[atom] = chemAtom\n elementSymbol = chemAtom.elementSymbol\n chemAtomSet = chemAtom.chemAtomSet\n\n if chemAtomSet is None:\n name = chemAtom.name\n makeAtomSet(name,(atom,),None,'simple')\n \n else:\n name = chemAtomSet.name\n elementSymbolDict[name] = elementSymbol\n chemAtomSetDict[name] = chemAtomSet\n if chemAtomSet.isEquivalent:\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and atom.atomSet and (len(atom.atomSet.atoms) > 1):\n # aromatic rotation prev set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n elif (chemAtomSet.isEquivalent is None) and (not atom.atomSet) and aromaticsEquivalent:\n # aromatic rotation to be set\n if equivalent.get(name) is None:\n equivalent[name] = []\n equivalent[name].append(atom)\n \n else:\n if nonequivalent.get(name) is None:\n nonequivalent[name] = []\n nonequivalent[name].append(atom)\n \n if chemAtomSet.chemAtomSet is not None:\n multiName = chemAtomSet.chemAtomSet.name\n chemAtomSetDict[multiName] = chemAtomSet.chemAtomSet\n elementSymbolDict[multiName] = elementSymbol\n if multiSet.get(multiName) is None:\n multiSet[multiName] = {}\n multiSet[multiName][name] = 1\n inMultiSet[name] = multiName\n\n for groupName in equivalent.keys():\n atoms = equivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if len(atoms)==2:\n # not enough atoms for multi sets!\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n else:\n if inMultiSet.get(groupName):\n # e.g. for Val Hg1*\n makeAtomSet(groupName,atoms,chemAtomSet,'stereo')\n \n else:\n makeAtomSet(groupName,atoms,chemAtomSet,'simple')\n\n for groupName in nonequivalent.keys():\n atoms = nonequivalent[groupName]\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n atomSetNames = []\n \n if len(atoms) == 1:\n atom = atoms[0]\n # not enough atoms for prochiral. Corrupt ChemComp\n makeAtomSet(atom.name, atoms, None, 'simple')\n continue\n \n for atom in atoms:\n name = chemAtomSetDict[atom].name\n makeAtomSet(name,(atom,),chemAtomSet,'stereo')\n atomSetNames.append(name)\n\n for n, atom in enumerate(atoms):\n \n #name = chemAtomSetDict[atom].name\n #name2 = makeNonStereoName(molType, name, n)\n # Shouldn't have to do this if non-equiv groups have paired names\n \n name2 = makeNonStereoName(molType, '%s%d' % (chemAtomSet.name[:-1], n), n)\n \n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n\n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)\n\n for groupName in multiSet.keys():\n atomSetNames = multiSet[groupName].keys()\n elementSymbol = elementSymbolDict[groupName]\n chemAtomSet = chemAtomSetDict[groupName]\n if \"|\" in groupName:\n # we don't do these pseudoatoms in Analysis\n continue\n\n # e.g. for Val Hga*\n for n, atomSetName in enumerate(atomSetNames):\n name2 = makeNonStereoName(molType, atomSetName, n)\n makeGuiMultiAtomSet(residue, name2, atomSetNames,\n elementSymbol,'nonstereo',chemAtomSet)\n \n makeGuiMultiAtomSet(residue, groupName, atomSetNames,\n elementSymbol,'ambiguous',chemAtomSet)",
"def reorder(self,order='nodes'):\n if order == 'nodes':\n a = sort(self,axis=-1) # first sort rows\n order = sortByColumns(a)\n elif order == 'reverse':\n order = arange(self.nelems()-1,-1,-1)\n elif order == 'random':\n order = random.permutation(self.nelems())\n else:\n order = asarray(order)\n if not (order.dtype.kind == 'i' and \\\n (sort(order) == arange(order.size)).all()):\n raise ValueError,\"order should be a permutation of range(%s)\" % self.nelems()\n return order",
"def set_order(self, order):\n self.order = order",
"def set_order(self, order):\n self.order = order",
"def residue(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n res = self.struct[model_num][chain_id][(alt, resnum, icode)]\n res.chain_id = chain_id\n return res",
"def get_bond_order(molecule, bond_index):\n return molecule.GetBondOrder(bond_index)",
"def sort(self, reverse=False, seq_position=False):\n if seq_position:\n self.edits = sorted(self.edits, key=lambda edit: edit.sequence_position, reverse=reverse)\n else:\n self.edits.sort(reverse=reverse)",
"def RefAtom(Residue):\n\n RES = ['HOH','ALA','ARG','ASN','ASP','CYS','GLN','GLU','GLY','HIS','ILE','LEU','LYS','MET','PHE','PRO','SER','THR','TRP','TYR','VAL']\n REF = [['O'],['O'], ['NH1','NH2'], ['OD1'], ['OD1', 'OD2'], ['SG'], ['OE1'], ['OE1', 'OE2'], ['O'], ['ND1', 'NE2'], ['O'], ['O'], ['NZ'], ['SD'], ['O'], ['O'], ['OG'], ['OG1'], ['O'], ['OH'], ['O']]\n return REF[RES.index(Residue)][:]",
"def add_atom(atom_list, atom_name, residue_name, residue_number, x, y, z, atom_number):\n atom_list.append(Atom(atom_number, atom_name, residue_name, residue_number, x, y, z))\n return atom_list",
"def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order",
"def update_normalization_order(self):\n self._cache[\"input\"][\"order\"] = int(self.order.currentText())\n self.reset_input_style_defaults()\n self.fit_continuum(True)\n self.draw_continuum(True)\n return None",
"def order(self, order=0):\n # type: (int) -> Entity\n self.type_def['order'] = order\n\n return self"
] | [
"0.6063239",
"0.5962212",
"0.57026076",
"0.5530429",
"0.5500488",
"0.5479389",
"0.5399059",
"0.5277751",
"0.52450716",
"0.519707",
"0.51750755",
"0.51736563",
"0.5059151",
"0.5023142",
"0.50008786",
"0.49948952",
"0.49739462",
"0.495744",
"0.49457055",
"0.4943124",
"0.49398884",
"0.49398884",
"0.49189338",
"0.49124122",
"0.488052",
"0.48759785",
"0.48742273",
"0.48700976",
"0.48388517",
"0.48374116"
] | 0.7369942 | 0 |
Returns a list of instances of the service's model with the specified ids. | def get_all(self, *ids):
return self.__model__.query.filter(self.__model__.id.in_(ids)).all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)",
"def get_objects(self, ids, **args):\n args[\"ids\"] = \",\".join(ids)\n return self.request(self.version + \"/\", args)",
"def get_objects(self,ids):\n return model.objects.filter(pk__in=ids).order_by(search_field)",
"def filter(cls, ids):\n entities = api.get([key.Key(cls.__name__, i) for i in ids])\n return [cls.from_entity(e) for e in entities if e]",
"def get_objects(self, ids, **args):\n args[\"ids\"] = \",\".join(ids)\n return self.request(\"\", args)",
"def get_all(self, datastore, *ids):\n return datastore.query(self.__model__).filter(self.__model__.id.in_(ids)).all()",
"def fetch_instances(self, ids):\n result = []\n self.log.info(f\"fetch '{len(ids)}' instances\")\n self.log.debug(f\"fetch instance data for ids '{ids}'\")\n try:\n response = self.client.describe_instances(\n InstanceIds=ids\n )\n if 'HTTPStatusCode' in response['ResponseMetadata'] and response['ResponseMetadata']['HTTPStatusCode'] == 200:\n pass\n else:\n raise Exception(f'not able to fetch instacnes with ids: {ids}')\n if len(response['Reservations'][0]['Instances']) == 0:\n raise Exception(f'should retrun at least single insatance data')\n result = []\n for reservation in response[\"Reservations\"]:\n for el in reservation[\"Instances\"]:\n ec2 = EC2Instance.factory(el)\n if ec2.state:\n result.append(ec2)\n else:\n self.log.warn(f'instance \"{ec2.id}\" excluded')\n except Exception as e:\n raise Exception(f'exception when trying to fetch instance data {ids}')\n return sorted(list(result), key=lambda instance: instance.launch_time)",
"def get_all(self, *ids):",
"def convert_to_model(self, *args):\n services_data, *_ = args\n return [Service(**service) for service in services_data]",
"def get_models_from_table_ids(table_ids, force=None):\n models = []\n errors = []\n for table_id in table_ids:\n try:\n models.append(\n get_model_from_table_id(table_id)\n )\n except (\n IncorrectTableNameException,\n TableIdDoesNotExistException,\n ModelDoesNotExistException\n ) as exc:\n if not force:\n raise exc\n errors.append(exc.args)\n return models, errors",
"async def get_instances(self, **kwargs) -> List[ApiResource]:\n raw_resources = await self.get_resources(**kwargs)\n _instances = [\n self._resource_factory(_raw)\n for _raw in self._loop_raw(raw_resources)\n ]\n return _instances",
"def get_objects(self,ids):\n return Order.objects.filter(pk__in=ids).order_by('number')",
"def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return",
"def _get_objects(self, cr, uid, name, args=[], ids=None): \n obj = self.pool.get(name)\n if not ids:\n ids = obj.search(cr, uid, args)\n return obj.browse(cr, uid, ids)",
"def model_ids(self, protocol=None, groups=None):\n return [model.id for model in self.models(protocol, groups)]",
"def hent_observationer(self, ids: List[str]) -> List[Observation]:\n return self.session.query(Observation).filter(Observation.id.in_(ids)).all()",
"def get_dict_by_ids(model, ids):\n\n objects_dict = {}\n\n query = model.objects.filter(pk__in=ids)\n\n for obj in query:\n key = str(obj.id)\n objects_dict[key] = obj\n\n return objects_dict",
"def get_instances(cls):\n raise NotImplementedError",
"def GetInstances(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n instances = self._SendRequest(HTTP_GET,\n \"/%s/instances\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return instances\n else:\n return [i[\"id\"] for i in instances]",
"def findLocationByIds(cls, ids):\r\n for id in ids:\r\n return cls.query.filter_by(id=id[id])",
"def get_many(self, *ids: int) -> Dict[int, ModelledTable]:\n\n return self.model.get_many(self.cursor, *ids)",
"def get(self, request):\n MODEL_NOT_FOUND = -1\n model_ids = self.request.query_params.get(\"ids\", False)\n if not model_ids:\n return HttpResponse(status=400)\n else:\n model_ids = model_ids.split(\",\")\n results = []\n for model_id in model_ids:\n try:\n model = models.ModelRun.objects.get(id=model_id)\n results.append(\n {\"name\": model.name, \"id\": int(model_id), \"status\": model.status}\n if model.is_base or model.public or model.user == self.request.user\n else {\n \"name\": model.name,\n \"id\": int(model_id),\n \"status\": MODEL_NOT_FOUND,\n }\n )\n except models.ModelRun.DoesNotExist:\n results.append({\"id\": int(model_id), \"status\": MODEL_NOT_FOUND})\n\n return Response({\"results\": results})",
"def apply_model(data, ids):\n similar_vectors = load_model(data, ids)\n return similar_vectors",
"def get_results(self, ids):\n self.join()\n return [self.results[id] for id in ids]",
"def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models",
"def all_model_instances(self) -> Iterator['panda_core_data.model.Model']:\n for current_type in self.all_models:\n for current_instance in current_type.all_instances:\n yield current_instance",
"def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def models(self, protocol=None, groups=None):\n return self.clients(protocol, groups)",
"def list(self, project_id):\n endpoint = \"/project/{}/model\".format(project_id)\n return self._get(endpoint, _ModelSchema(many=True))"
] | [
"0.67031586",
"0.6699068",
"0.6696586",
"0.6638207",
"0.65994287",
"0.65258837",
"0.63502115",
"0.63468754",
"0.62926453",
"0.6194497",
"0.6034407",
"0.59933615",
"0.59566957",
"0.5881391",
"0.5861612",
"0.58427215",
"0.581983",
"0.58133096",
"0.5810381",
"0.58030975",
"0.5763386",
"0.57188046",
"0.57005",
"0.5636238",
"0.5609078",
"0.55943584",
"0.5585191",
"0.5580476",
"0.55717266",
"0.5552413"
] | 0.6741081 | 0 |
Immediately deletes the specified model instance. | def delete(self, model):
self._isinstance(model)
db.session.delete(model)
db.session.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_model(self, request, instance):\n pass",
"def delete(self):\n self.stop()\n try:\n self.skil.api.delete_model(self.deployment.id, self.model.id)\n except api_exception as e:\n self.skil.printer.pprint(\n \">>> Exception when calling delete_model_instance: %s\\n\" % e)",
"def delete(self):\n self.dbm().model_delete(self)",
"def delete(self, instance: BaseModel):\n with rconnect() as conn:\n id = str(instance.id)\n try:\n query = self.q.get(id).delete()\n rv = query.run(conn)\n except Exception as e:\n console.warn(e)\n raise\n else:\n return True",
"def delete_model(ModelName=None):\n pass",
"def soft_delete(self, instance):\n self.destroy(instance)",
"def model_delete(self, db):\n db.session.delete(self)\n db.session.commit()",
"def delete(self):\n self.id = uuid4()\n DataStore.remove_instance(self)",
"def _delete(self, model_obj):\n conn = self._get_session()\n\n try:\n model_obj.delete(\n index=self.model_cls._index._name,\n using=conn,\n refresh=True,\n )\n except Exception as exc:\n logger.error(f\"Error while creating: {exc}\")\n raise\n\n return model_obj",
"def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []",
"def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None",
"def _delete_from_db(self, instance: DBModelInstance) -> None:\n self.db.session.delete(instance)\n self.db.session.commit()",
"def delete(self, using=None):\n self.model.remove_field(self)",
"def delete_instance(self):\n return self.delete().filter(**{\n self.get_pk_name(): self.get_pk()}).execute()",
"def DeleteModel(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def DeleteModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"async def delete(self, connection: SQLConnectionInterface, model: Model,\n _global: Model = None):\n await self.validate(connection, model, ValidationTypes.DELETE)\n await connection.execute(await self._delete_stmt(),\n model[self.identifier_key].value)",
"def __delete__(self, instance):\n self.session.close()",
"def delete(self):\n return self.parent.delete_instance(self.name)",
"def perform_destroy(self, instance):\n self.object.comments -= 1\n self.object.save()\n instance.delete()",
"def delete(self):\n if Model.data_connector:\n with Model.data_connector.u_lock:\n Model.data_connector.remove_object(self)",
"def force_delete(self):\n self.manager.force_delete(self)",
"def force_delete(self):\n self.manager.force_delete(self)",
"def delete_instance(model, *instance_or_pk):\r\n\r\n cache.delete_many([instance_key(model, x) for x in instance_or_pk])",
"def perform_destroy(self, instance):\n pass",
"def delete(self):\n self.manager.delete(self)",
"def delete(self, obj=None):\n pass",
"def delete(self, obj=None):\n if obj is not None:\n self.__session.delete(obj)\n self.save()",
"def _delete(self, model_obj):\n conn = self._get_session()\n db_item = None\n\n # Fetch the record from database\n try:\n identifier = getattr(model_obj, id_field(self.entity_cls).attribute_name)\n db_item = conn.query(self.model_cls).get(\n identifier\n ) # This will raise exception if object was not found\n except DatabaseError as exc:\n logger.error(f\"Database Record not found: {exc}\")\n raise\n\n if db_item is None:\n conn.rollback()\n conn.close()\n raise ObjectNotFoundError(\n {\n \"_entity\": f\"`{self.entity_cls.__name__}` object with identifier {identifier} \"\n f\"does not exist.\"\n }\n )\n\n try:\n conn.delete(db_item)\n except DatabaseError as exc:\n logger.error(f\"Error while deleting: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return model_obj",
"def clear_side_effect_model(self, model):\n if model:\n model.delete()"
] | [
"0.81506425",
"0.7652153",
"0.76048505",
"0.744475",
"0.737455",
"0.7273976",
"0.7251842",
"0.71769357",
"0.7160143",
"0.70483905",
"0.7047641",
"0.70271033",
"0.7008685",
"0.70044905",
"0.69935286",
"0.6987541",
"0.69523567",
"0.69240004",
"0.6912577",
"0.6905001",
"0.6900934",
"0.6875366",
"0.6875366",
"0.6869633",
"0.68360835",
"0.6805401",
"0.6801757",
"0.67726505",
"0.6765607",
"0.67592484"
] | 0.77066785 | 1 |
Rules defined for 'who' type of questions | def find_who_rules_scores(question, story_sentence, morphed_sentence):
score = 0
# Rule 1
score += word_match(question, morphed_sentence)
# Rule 2
'''print(type(question))
print(type(morphed_sentence))
print(type(' '.join(morphed_sentence)))
print(' '.join(morphed_sentence))
print(type(story_sentence))'''
story_sentence_morphed = ' '.join(morphed_sentence)
if not is_name_in_sentence_frag(question):
if is_name_in_sentence_frag(story_sentence_morphed):
score += confident
# Rule #3
if not is_name_in_sentence_frag(question):
if "name" in story_sentence_morphed:
score += good_clue
# Rule #4
if is_name_in_sentence_frag(story_sentence_morphed) or is_human_in_sentence(story_sentence_morphed):
score += good_clue
return score | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hey(self, msg):\n if issilence(msg):\n return \"Fine. Be that way.\"\n elif isshouting(msg):\n return \"Woah, chill out!\"\n elif isquestion(msg):\n return \"Sure.\"\n else:\n return \"Whatever.\"",
"def is_question(message, unique_users, q):\n \n line = get_tagged_user(message['text'], unique_users)[1]\n\n if '?' in line:\n return line\n\n START_WORDS = ['can', 'do', 'will', 'how', 'when', 'what', 'where',\n 'why', 'is', 'does', \"doesn't\", 'if', 'for', 'did', 'is']\n\n for word in START_WORDS:\n if line.lower().startswith(word):\n return line\n\n if fuzz.ratio(line, q) > 20:\n return line\n\n return None",
"def who(self, who):\n allowed_values = [\"m\", \"mo\", \"o\"] # noqa: E501\n if who not in allowed_values:\n raise ValueError(\n \"Invalid value for `who` ({0}), must be one of {1}\" # noqa: E501\n .format(who, allowed_values)\n )\n\n self._who = who",
"def testWhois(self):\n timestamp = int(time.time() - 100)\n hostname = self.p.hostname\n req = \"requesting-nick\"\n targ = \"target-nick\"\n self.p.whois(\n req,\n targ,\n \"target\",\n \"host.com\",\n \"Target User\",\n \"irc.host.com\",\n \"A fake server\",\n False,\n 12,\n timestamp,\n [\"#fakeusers\", \"#fakemisc\"],\n )\n lines = [\n \":%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User\",\n \":%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server\",\n \":%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time\",\n \":%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc\",\n \":%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.\",\n \"\",\n ]\n expected = \"\\r\\n\".join(lines) % dict(\n hostname=hostname, timestamp=timestamp, req=req, targ=targ\n )\n self.check(expected)",
"def question_new_validate():",
"async def nwhy(self, ctx):\n question = nekos.why()\n await ctx.send(question)",
"def other(who):\r\n return 1 - who",
"def w_question(analysis):\n if analysis.sv:\n #Opinion is a what question so we have to make some changes\n if analysis.sv[0].vrb_main[0].endswith('like'):\n verb = analysis.sv[0].vrb_main[0]\n analysis.sv[0].vrb_main[0] = verb[:len(verb) - 4] + 'think+of'\n\n #processing as yes or no question\n phrase = y_o_question(analysis)\n\n #Specific processing for invitation\n if analysis.aim == 'invitation':\n return ['how', 'about'] + phrase[1:]\n\n #Specific processing for classification\n if analysis.aim.startswith('classification'):\n aim_question = other_functions.list_rebuilding(analysis.aim)\n return ['what', 'kind', 'of'] + aim_question[1:] + phrase\n\n #It is an how question\n if other_functions.is_an_adj(analysis.aim) == 1:\n return ['how'] + [analysis.aim] + phrase\n elif analysis.aim == 'manner':\n return ['how'] + phrase\n\n if analysis.aim == 'thing' or analysis.aim == 'situation' or analysis.aim == 'explication' or analysis.aim == 'opinion':\n return ['what'] + phrase\n return ['what'] + [analysis.aim] + phrase",
"def questionType(self):\n raise NotImplementedError",
"def question(update, context):\n bot = context.bot\n user = update.message.from_user\n inc_msg = str.lower(update.message.text)\n\n # answer why questions with a reasons from database\n if 'waarom' in inc_msg:\n\n # return a random reason from file\n with open(REASONS) as file:\n lines = file.readlines()\n msg = random.choice(lines)\n\n # answer other questions with\n else:\n # TODO: introduce random silence\n rng = random.random()\n\n if rng < 0.9 and not 'rob' not in inc_msg:\n return\n options = [\n f\"Vraag het maar niet aan mij, ik ben niet alwetend.\",\n (\"https://lmgtfy.com/?q=\" + inc_msg.replace(\" \", \"+\") + \"&pp=1&s=g&t=w\"),\n f\"Ja he dat weet ik toch ook niet, google dat maar ff {user.first_name}...\"\n ]\n\n msg = random.choice(options)\n time.sleep(HUMAN_DELAY * len(msg))\n\n bot.send_message(chat_id=update.message.chat_id, text=msg,\n reply_to_message_id=update.message.message_id,\n parse_mode=ParseMode.MARKDOWN)",
"def ask(self, question):\n\n\t\t# If you're just trying to test voice detection, you can uncomment\n\t\t# the following 5 lines. Bobby will guess \"yellow flashlight\" and will prompt\n\t\t# you to correct him by saying \"blue flashlight\"\n\n\t\t# fake_answers = [\"no\", \"yes\", \"yes\", \"yes\", \"no\", \"yes\", \"yes\"]\n\t\t# global count\n\t\t# count += 1\n\t\t# print question\n\t\t# return fake_answers[count - 1]\n\n\t\t# self.say(question)\n\t\t# #starts listening for an answer\n\t\t# self.asr.subscribe(\"TEST_ASR\")\n\t\t# data = (None, 0)\n\t\t# while not data[0]:\n\t\t# \tdata = self.mem.getData(\"WordRecognized\")\n\t\t# #stops listening after he hears yes or no\n\t\t# self.asr.unsubscribe(\"TEST_ASR\")\n\t\t#\n\t\t# print data\n\t\t#\n\t\t# for word in self.yes_no_vocab:\n\t\t# \tfor syn in self.yes_no_vocab[word]:\n\t\t# \t\tif data[0] == syn:\n\t\t# \t\t\treturn word",
"def test_model_mc_question(mc_question_one_true):\n assert mc_question_one_true.answer1 == \"a1\"\n assert mc_question_one_true.answer1_correct == True\n assert mc_question_one_true.answer2 == \"a2\"\n assert mc_question_one_true.answer2_correct == False\n assert mc_question_one_true.answer3 == \"a3\"\n assert mc_question_one_true.answer3_correct == False",
"def ShouldI(sc, event):\n options = ['Yes, you should!',\n 'I think that would be best.',\n 'Hrmm... yes!',\n 'Signs point to yes!',\n 'That\\'s the best idea I\\'ve ever heard!',\n 'D\\'uh! Of course!',\n 'Wow! What a great idea!',\n 'What an incredible idea! You\\'re a genius!',\n 'Yes, yes! A thousand times, yes!',\n 'Of course you should!',\n 'I\\'ve never heard of a better idea!',\n 'Why didn\\'t I think of that? You\\'re brilliant!']\n response = random.choice(options)\n sc.api_call('chat.postMessage', as_user='true',\n channel=event['channel'], text=response)",
"def hey(what):\n\tif len(what) == 0 or what.isspace():\n\t\treturn \"Fine. Be that way!\"\n\t\"\"\"Checks if string is in upper case(Yelling)\"\"\"\n\tif what.isupper():\n\t\treturn \"Whoa, chill out!\"\n\t\"\"\"Iterates through string backwards looking for a ?, stopping if a non-\n\twhitespace character is found(Question)\"\"\"\n\tfor character in reversed(what):\n\t\tif character == '?':\n\t\t\treturn \"Sure.\"\n\t\tif character != \" \":\n\t\t\tbreak\n\t\"\"\"Catch all response for any other input\"\"\"\n\treturn \"Whatever.\"",
"def score(self, who):\n if who == 'they':\n reward = -1\n self.their_score += 1\n elif who == 'we':\n reward = 1\n self.our_score += 1\n else:\n raise ValueError(f\"who must be 'we' or 'they', not {who}\")\n\n self._reset_ball()\n return reward",
"def func(self):\n\n caller = self.caller\n\n if not self.lhs or not self.rhs:\n caller.msg(\"Usage: whisper <player> = <message>\")\n return\n\n receiver = caller.search(self.lhs)\n\n if not receiver:\n return\n\n if caller == receiver:\n caller.msg(\"You can't whisper to yourself.\")\n return\n\n speech = self.rhs\n\n # Feedback for the object doing the talking.\n caller.msg( 'You whisper to %s, \"%s|n\"' % ( getNameAnsi( receiver ), speech ) )\n\n # Build the string to emit to receiver.\n emit_string = '%s whispers, \"%s|n\"' % ( getNameAnsi( caller ), speech )\n receiver.msg(text=(emit_string, {\"type\": \"whisper\"}), from_obj=caller)",
"def test_fluents(self):\n kb = logic.PropKB()\n kb.tell(logic.expr('Has(John, Cat)'))\n kb.tell(logic.expr('Has(John, Computer)'))\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr(\n 'Has(John, ?w)')), [{'?w': 'Computer'}, {'?w': 'Cat'}])\n kb.define_fluent(logic.expr('Age'))\n kb.tell(logic.expr('Age(John, 35)'))\n kb.tell(logic.expr('Age(John, 40)'))\n self.assertAllBindingsEqual(\n kb.ask_all(logic.expr('Age(John, ?a)')), [{'?a': '40'}])\n self.assertRaises(logic.Error, lambda: kb.define_fluent('Foo'))",
"def hey(what):\n if what == what.upper() and not only_numbers(what):\n return 'Whoa, chill out!'\n if what[-1:] == '?':\n return 'Sure.'\n if only_silence(what):\n return 'Fine. Be that way!'\n return 'Whatever.'",
"def hey(phrase):\n\n # Bob has spent years building up his knowledge base. He knows\n # four whole things.\n knowledge = (nostatement, anger, question, easteregg)\n\n # Loop through all of his thoughts. Bob isn't much of a multi-tasker\n for thought in knowledge:\n response = thought(phrase)\n\n # if the thought registered, spit it out! Don't think to hard!\n if response:\n return response\n\n # If he didn't understand what you said, he gives up and returns\n # to being dumb.\n return 'Whatever.'",
"async def wouldyourather(message: discord.Message, opt: options=None):\n # If there are no options, the bot will ask the questions (if there are any to choose from)\n if opt is None:\n assert message.channel.id not in sessions, \"**A would you rather session is already in progress.**\"\n sessions.add(message.channel.id)\n\n assert db.data[\"questions\"], \"**There are ZERO questions saved. Ask me one!**\"\n\n question = random.choice(db.data[\"questions\"])\n choices = question[\"choices\"]\n await client.say(message, \"Would you rather **{}** or **{}**?\".format(*choices))\n\n timeout = db.data[\"timeout\"]\n replied = []\n\n # Wait for replies from anyone in the channel\n while True:\n reply = await client.wait_for_message(timeout=timeout, channel=message.channel,\n check=lambda m: m.author not in replied)\n # Break on timeout\n if reply is None:\n break\n\n # Check if the choice is vlaid\n choice = get_choice(choices, reply.content)\n if choice is None:\n continue\n\n # Register that this author has replied\n replied.append(reply.author)\n\n # Update the answers in the DB\n # We don't care about multiples, just the amount (yes it will probably be biased)\n question[\"answers\"][choice] += 1\n\n name = reply.author.display_name\n response = random.choice(db.data[\"responses\"]).format(name=name, NAME=name.upper(), choice=choices[choice])\n await client.say(message, response)\n\n # Say the total tallies\n await client.say(message, \"A total of {0} would **{2}**, while {1} would **{3}**!\".format(\n *question[\"answers\"], *choices))\n db.save()\n sessions.remove(message.channel.id)\n\n # Otherwise, the member asked a question to the bot\n else:\n db.data[\"questions\"].append(dict(\n choices=list(opt),\n answers=[0, 0]\n ))\n db.save()\n\n answer = random.choice(opt)\n await client.say(message, \"**I would {}**!\".format(answer))",
"def speak(self, what):\n if isinstance(what, str):\n return self.whatever()\n\n what = self.clean(what)\n if not what or what == '':\n return self.silence()\n if what.isupper():\n return self.shouting()\n if what.endswith('?'):\n return self.asking()\n return self.whatever()",
"def test_one_trick_pony(self):\n self.validate_goal_for('game-20120625-114828-af02f875.html',\n u'WanderingWinder',\n 'OneTrickPony')",
"def question_reply(sentence,x):\n sentence = sentence.lower()\n \n global question_flag # records question asked by bot\n if len(sentence) < 1:\n return default_reply() # default reply if input cannot be comprehended\n if is_question(sentence):\n return \"I'm sorry, I can't answer that.\"\n if is_firstperson(sentence):\n sentence = \" \".join(sentence.split()[1:])\n sentence = punctuation_remover(sentence)\n\n if QUESTIONS[x] == NAME_QUESTION: \n return name_reply(sentence) # hello, <name>\n elif QUESTIONS[x] == AGE_QUESTION:\n return age_reply(sentence) # reply depending on how old the user is\n elif QUESTIONS[x] == FEELING_QUESTION:\n return feeling_reply(sentence)\n elif QUESTIONS[x] == WHY_QUESTION:\n return why_reply(sentence) # does the user know why they may feel this way? \n # bot may offer simple advice based on detected keywords\n elif QUESTIONS[x] == PHYSICAL_QUESTION:\n return physical_reply(sentence)\n elif QUESTIONS[x] == SOCIAL_QUESTION:\n return social_reply(sentence)\n elif QUESTIONS[x] == TIME_QUESTION:\n return time_reply(sentence) # how long has the user's symptoms persisted?\n elif QUESTIONS[x] == SUICIDE_QUESTION:\n return suicide_resp(sentence)",
"def personal_questions(user):\n questions = {\n 1: 'GBB: How long have you been a fan of the Packers?',\n 2: 'GBB: Why are you a fan of the Packers?',\n 3: \"GBB: What team do you hate the most?\",\n 4: \"GBB: Who's your favorite player on the Packers?\",\n 5: \"GBB: Who's your least favorite player on the Packers?\",\n 6: \"GBB: Do you come from a family of Packer fans, or are you a lone ranger?\"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Look's like I know you so well that I don't even need to ask you a question!\"",
"def user_question():\n return input('What would you like? (espresso/latte/cappuccino): ')",
"def setup(self, *args):\n\n responses = [\n ('Yes.', 'eq'),\n ('No.', 'eq'),\n ('Nope.', 'eq'),\n ('Maybe.', 'eq'),\n ('Possibly.', 'eq'),\n ('It could be.', 'eq'),\n (\"No. No, I don't think so.\", 'eq/2'),\n ('Without a doubt.', 'eq/2'),\n ('I think... Yes.', 'eq/2'),\n ('Heck yes!', 'eq/2'),\n ('Maybe. Possibly. It could be.', 'eq/2'),\n ('Ask again later.', 'eq/3'),\n (\"I don't know.\", 'eq/3'),\n (\"I'm sorry, I was thinking of bananas\", 'eq/100'),\n ]\n\n responses += [(x, 'eq/10') for x in obliques]\n self.advices = [(x, 1) for x in obliques]\n total_prob = 0\n real_resp = []\n evens = []\n for resp, prob in responses:\n if isinstance(prob, str):\n if prob.startswith('eq'):\n sp = prob.split('/')\n if len(sp) == 1:\n evens.append((resp, 1))\n else:\n div = int(sp[1])\n evens.append((resp, 1.0 / div))\n\n else:\n real_resp.append((resp, prob))\n total_prob += prob\n\n # Share is the probability of a \"eq\" probability. Share/2 would be the\n # probability of a \"eq/2\" probability.\n share = (1 - total_prob) / sum(div for _, div in evens)\n for resp, divisor in evens:\n real_resp.append((resp, share * divisor))\n\n self.responses = real_resp\n self.is_question = re.compile('.*\\?(\\?|!)*$')",
"def command_who(self, bot, update):\n\n messages = [\n 'Myles Braithwaite lives in Toronto where he runs a small '\n 'consluting company called [Monkey in your Soul]'\n '(https://monkeyinyoursoul.com/) (you should hire him because '\n \"he's awesome).\",\n 'You should follow him on [Twitter](https://twitter.com/mylesb) '\n 'or [Instagram](https://instagram.com/myles).',\n 'You can find his programming stuff on [GitHub]'\n '(https://github.com/myles) or [CodePen]'\n '(http://codepen.io/mylesb/).'\n ]\n\n self.send_messages(bot, update, messages)",
"def notAFan_questions(user):\n questions = {\n 1: \"GBB: How old are you? \",\n 2: \"GBB: What do you like to do in your free time? \",\n 3: \"GBB: What is your ethnicity? \",\n 4: \"GBB: What did you eat for breakfast? \",\n 5: \"GBB: Are you an early bird or a night owl? \",\n 6: \"GBB: Do you like football? \"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Looks like I know you so well that I don't even need to ask you a question! Type anything to proceed.\"",
"def test_quick_answer(self):\n pass",
"def check_hint_condition(self, hxml_set, student_answers):\r\n pass"
] | [
"0.57526445",
"0.56380576",
"0.5491493",
"0.53774744",
"0.53468835",
"0.53228533",
"0.5320208",
"0.53180206",
"0.52792186",
"0.5274273",
"0.5266316",
"0.52485275",
"0.5247412",
"0.5235203",
"0.52073026",
"0.51886165",
"0.5178019",
"0.51778334",
"0.51548207",
"0.5142385",
"0.5122224",
"0.5104897",
"0.5089786",
"0.50832105",
"0.5079195",
"0.507654",
"0.50746775",
"0.5068038",
"0.5057527",
"0.505524"
] | 0.59680885 | 0 |
Merge the stats_dict with the SQL DB entry, adding where able ValueError when stats_dict contains an invalid key | async def log_stats(self, stats_dict):
cmd = "PRAGMA table_info(trainer_stats)"
cur = self.sql.cur
data = cur.execute(cmd).fetchall()
valid_keys = []
for entry in data:
valid_keys.append(entry['name'])
self.log.info(valid_keys)
for key in stats_dict:
if key not in valid_keys:
raise ValueError()
trainer_id = self.trainer_id
for key in stats_dict:
value = stats_dict[key]
cmd = f"""UPDATE trainer_stats
SET {key} = {key} + :value
WHERE trainer_id = :trainer_id"""
cur.execute(cmd, locals())
await self.sql.commit(now=True)
self.log.info("log completed") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_stats(episodal_stats, k, current_stats):\n # Check if the key exists in on of the dicts and retrieve the type of the corresponding value\n if k in current_stats:\n stat_type = type(current_stats[k])\n elif k in episodal_stats:\n stat_type = type(episodal_stats[k])\n else:\n raise KeyError(\"Key not found in supplied env_info dict which is used to update the current stats.\")\n\n # Define how data should be merged depending on the type into a single dict and which default value to use if data\n # does not yet exists in one of the merging dicts\n if stat_type is int or stat_type is float or stat_type is bool:\n return episodal_stats.get(k, 0) + current_stats.get(k, 0)\n elif stat_type is list:\n return np.array(episodal_stats.get(k, np.zeros_like(current_stats.get(k))), dtype=int) + np.array(\n current_stats.get(k, []), dtype=int)",
"def add_stats(self, game):\n with self.con:\n cursor = self.con.cursor()\n queryString = \"INSERT INTO stats(game_id, team_id\"\n homeValueString = \" VALUES(?, ?\"\n awayValueString = homeValueString\n \n homeValues = [game.game_id, self.teams[game.home_team]]\n awayValues = [game.game_id, self.teams[game.away_team]]\n \n for key in game.home_stats:\n\n queryString += ', '\n awayValueString += ', '\n homeValueString += ', '\n \n homeValueString += '?'\n awayValueString += '?'\n \n homeValues.append(game.home_stats[key])\n awayValues.append(game.away_stats[key])\n \n queryString += statsToSql[key]\n \n queryString += ')'\n homeValueString += ')' \n awayValueString += ')'\n \n \n cursor.execute(queryString + homeValueString, homeValues)\n cursor.execute(queryString + awayValueString, awayValues)",
"def add_metrics(_dict):\n for key, itr in _dict.items():\n if key not in self.metric_cols:\n self.metric_cols.append(key)",
"def insert_dict(self, dict_data):\n fields = ', '.join([f for f in dict_data])\n values = ', '.join([str(dict_data[f]) for f in dict_data])\n sql = (\"INSERT INTO snapshot_log (timestamp, \" + fields +\n \") VALUES (NOW(), \" + values + \" )\")\n\n cur = self.cursor()\n try:\n cur.execute(sql)\n #self.conn.commit()\n except sqlc.Error as e:\n print (\"Error #{0}: {1}\\nCouldn't insert\\nsql={2}\"\n .format(e.errno, e.msg, sql))\n except Exception as e:\n print (\"Error: {0}\\nCouldn't insert\\nsql={1}\"\n .format(e.message, sql))\n finally:\n self.close()",
"def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def _update_stats(stats, train_loss=None, train_accuracy=None, test_loss=None, test_accuracy=None,\n test_confusion_matrix=None):\n if train_loss:\n stats['train_loss'].append(train_loss)\n if train_accuracy:\n stats['train_accuracy'].append(train_accuracy)\n if test_loss:\n stats['test_loss'].append(test_loss)\n if test_accuracy:\n stats['test_accuracy'].append(test_accuracy)\n if test_confusion_matrix is not None:\n stats['test_confusion_matrix'].append(test_confusion_matrix)\n\n return stats",
"def fix_db_stats(invalid_books, invalid_translations, invalid_subreddit):\n\t\n\tupdate_book_stats(invalid_books, is_edit_or_delete = True)\n\tupdate_translation_stats(invalid_translations, is_edit_or_delete = True)\n\tupdate_subreddit_stats(invalid_subreddit, is_edit_or_delete = True)",
"def parse_stats(stats: dict, res: dict):\n for k, v in stats.items():\n if k not in res.keys():\n res.update({k: {}})\n if isinstance(v, list):\n for element in v:\n for metric, value in element.items():\n res[k].update({metric: [value]})",
"def update_stats_dict(stats_dict: dict) -> dict:\n dict_copy = {}\n\n for node in stats_dict[\"nodes\"]:\n if node[\"type\"] == \"osd\":\n dict_copy.update({node[\"id\"]: node})\n elif node[\"type\"] == \"host\":\n dict_copy.update({node[\"name\"]: node})\n\n dict_copy.update({\"stray\": stats_dict[\"stray\"]})\n dict_copy.update({\"summary\": stats_dict[\"summary\"]})\n\n return dict_copy",
"def _add_from_dict(self, row) :\n\n data = [row.get(col, None) for col in self.cols]\n self._insert_internal(self.cols, data)",
"def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def update_query_metadata_and_stats(\n query: Query,\n sql: str,\n timer: Timer,\n stats: MutableMapping[str, Any],\n query_metadata: SnubaQueryMetadata,\n query_settings: Mapping[str, Any],\n trace_id: Optional[str],\n status: QueryStatus,\n) -> MutableMapping[str, Any]:\n stats.update(query_settings)\n sql_anonymized = format_query_anonymized(query).get_sql()\n\n query_metadata.query_list.append(\n ClickhouseQueryMetadata(\n sql=sql,\n sql_anonymized=sql_anonymized,\n stats=stats,\n status=status,\n profile=generate_profile(query),\n trace_id=trace_id,\n )\n )\n\n return stats",
"def __add_missing_entries_to_dict(self, member_dict: dict) -> dict:\n try:\n self.dict = member_dict\n self.dict['Voorletters'] = member_dict['Voornaam'][0].upper() + '.'\n self.dict['Dag'] = member_dict['GeboorteDatum'].split('-')[0]\n self.dict['Maand'] = member_dict['GeboorteDatum'].split('-')[1]\n self.dict['Jaar'] = member_dict['GeboorteDatum'].split('-')[2]\n self.dict['Postcode'] = member_dict['Postcode'].replace(' ', '')\n r = member_dict['Adres'].split(' ')\n self.dict['Huisnr'] = r[len(r) - 1]\n return self.dict\n\n except Exception as e:\n raise Exception(\"Toevoegen lid in NAS mislukt: \" + str(e))",
"def _prepare_stats_table(self, pinfos):\n\n stats_tbl = OrderedDict()\n stats_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n stats_tbl[res.reportid] = OrderedDict()\n\n for pinfo in pinfos:\n for colname in (pinfo[\"colname\"], pinfo[\"xcolname\"]):\n if colname in stats_tbl[\"Title\"]:\n continue\n\n # Each column name is represented by a row in the statistics table. Fill the \"Title\"\n # column.\n title_dict = stats_tbl[\"Title\"][colname] = OrderedDict()\n defs = self._refdefs.info[colname]\n\n if defs.get(\"unit\") == \"nanosecond\":\n # Convert nanoseconds to microseconds.\n unit = \"us\"\n else:\n unit = defs.get(\"short_unit\", \"\")\n\n title_dict[\"colname\"] = colname\n if unit:\n title_dict[\"colname\"] += f\", {unit}\"\n title_dict[\"coldescr\"] = defs[\"descr\"]\n\n title_dict[\"funcs\"] = OrderedDict()\n for funcname in self._stats_funcs:\n if funcname in self.rsts[0].cstats[colname]:\n title_dict[\"funcs\"][funcname] = RORawResult.get_stat_func_descr(funcname)\n\n # Now fill the values for each result.\n for res in self.rsts:\n res_dict = stats_tbl[res.reportid][colname] = OrderedDict()\n res_dict[\"funcs\"] = OrderedDict()\n\n for funcname in title_dict[\"funcs\"]:\n val = res.cstats[colname][funcname]\n fmt = \"{}\"\n if defs.get(\"unit\") == \"nanosecond\" and \"index\" not in funcname:\n val /= 1000\n fmt = \"{:.2f}\"\n if defs[\"type\"] == \"float\":\n fmt = \"{:.2f}\"\n\n fdict = res_dict[\"funcs\"][funcname] = OrderedDict()\n fdict[\"val\"] = fmt.format(val)\n fdict[\"raw_val\"] = val\n\n if self._refres.reportid == res.reportid:\n fdict[\"hovertext\"] = \"This is the reference result, other results \" \\\n \"are compared to this one.\"\n continue\n\n ref_fdict = stats_tbl[self._refres.reportid][colname][\"funcs\"][funcname]\n change = val - ref_fdict[\"raw_val\"]\n if ref_fdict[\"raw_val\"]:\n percent = (change / ref_fdict[\"raw_val\"]) * 100\n else:\n percent = change\n change = fmt.format(change) + unit\n percent = \"{:.1f}%\".format(percent)\n fdict[\"hovertext\"] = f\"Change: {change} ({percent})\"\n\n return stats_tbl",
"def _upsert_ad_performance(ad_insights: [adsinsights.AdsInsights], con: sqlite3.Connection):\n con.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS ad_performance (\n date DATE NOT NULL,\n ad_id BIGINT NOT NULL,\n device TEXT NOT NULL,\n performance TEXT NOT NULL,\n PRIMARY KEY (ad_id, device)\n);\"\"\")\n con.executemany(\"INSERT OR REPLACE INTO ad_performance VALUES (?,?,?,?)\",\n _to_insight_row_tuples(ad_insights))",
"def consolidate_other(self):\n record = self.db[self.args['cstats_table']].find_one({'type': 'client'})\n if not record:\n self.log.critical('Could not get the \"client\" key in the \"cstats_table\"')\n return\n for k in record.keys():\n if k in ['_id', 'type', 'stats']:\n continue\n self.other[k] = record[k]\n self.stats.update(record.get('stats', {}))",
"def add_to_dict_fail( self, key_0, key_1, list_2 ):\n dict_1_new = { key_1: list_2 }\n\n\n dict_1_current = self._dd_dict.get( key_1 )\n if dict_1_current is None:\n #dict_1_new = dict_1_new\n pass\n\n else:\n dict_1_new = {**dict_1_current, **dict_1_new }\n\n\n dict_0_new = { key_0: dict_1_new } # maybe a merge ??\n\n dict_0_current = self._dd_dict.get( key_0 )\n\n if dict_0_current is None:\n dict_0_new = dict_0_new\n\n else:\n dict_0_new = {**dict_0_current, **dict_0_new }\n\n dict_0_new = {**dict_0_current, **dict_0_new }\n\n self._dd_dict = { **self._dd_dict, **dict_0_new }\n\n print( self._dd_dict )\n\n return self._dd_dict",
"def _add_record(days_dict, record, key):\n days_dict[key] = {\n \"Name\": record[\"title\"],\n \"Owner\": record[\"owner\"],\n \"Severity\": record[\"severity\"],\n \"Created\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"createdAt\"] / 1000.0))),\n }\n if \"endDate\" in record:\n days_dict[key].update(\n {\n \"Closed\": (time.strftime(SIRPPipeline.TIME_FMT, time.gmtime(record[\"endDate\"] / 1000.0),)),\n \"Resolution\": record[\"resolutionStatus\"],\n }\n )",
"def add_statistics(self, stat_col):\n # Those will be displayed.\n stat_col.add_statistics(self.key_precision, '{:05.4f}')\n stat_col.add_statistics(self.key_recall, '{:05.4f}')\n stat_col.add_statistics(self.key_f1score, '{:05.4f}')\n # That one will be collected and used by aggregator.\n stat_col.add_statistics(self.key_f1score+'_support', None)",
"def store(self, **stats):\n if self.first_row:\n self.log_headers = list(stats.keys())\n for key in stats:\n assert key in self.log_headers, f\"Can't introduce a new key that you didn't include before: {key}\"\n\n # Write to output file\n if self.first_row:\n self.file_writer.writerow(self.log_headers)\n self.file_writer.writerow(stats.values())\n self.output_file.flush()\n\n # Display in stdout\n if self.log_freq > 0 and self.counter % self.log_freq == 0:\n _print_table(stats)\n\n self.first_row = False\n self.counter += 1",
"def basic_stats(db):\n rps = len(list(db['rp'].keys()))\n users = len(list(db['users'].keys()))\n logins = db['logins']\n return {\"rps\": rps, \"users\": users, \"logins\": logins}",
"def _fill_mean_dict(self, running_metrics: Dict, mean_dict: Dict) -> None:\n for key, value in running_metrics.items():\n mean = np.mean(value)\n mean_dict[key].append(mean)",
"def add_dict(self, indep, dep):\n dfull = {IND: len(self), INDEP: indep.copy(), DEP: dep}\n validate_row(dfull)\n check_objects(dfull)\n if settings.CONVERT_SCALAR_ARRAYS:\n scalarise(dfull)\n if settings.PRINT_UPDATES:\n print(self.show([dfull]))\n self.append(dfull)\n self._combine(dfull)",
"def _merge(acc: Dict[str, str], cur: Any) -> Dict[str, str]:\n parsed = _parse_feature(cur)\n acc[\"timestamp\"] = parsed[\"timestamp\"]\n acc[\"lat\"] = parsed[\"lat\"]\n acc[\"lon\"] = parsed[\"lon\"]\n key = parsed[\"property\"]\n val = parsed[\"value\"]\n\n acc[key] = val\n\n return acc",
"def _process_stats_data(self, trans_stats_data):\n if not isinstance(trans_stats_data, dict):\n return {}\n for package, stats in trans_stats_data.items():\n stats[\"Remaining\"] = float(\"{0:.2f}\".format(stats.get(\"Remaining\")))\n trans_stats_data[\"Calculated on\"] = \"Messages\"\n return trans_stats_data",
"def add_team_derived_stats(stats, opp_stats):\n stats['FGP'] = gen_derived_var(stats['FG'], stats['FGA'])\n stats['FTP'] = gen_derived_var(stats['FT'], stats['FTA'])\n stats['THRP'] = gen_derived_var(stats['THR'], stats['THRA'])\n stats['EFGP'] = gen_derived_var(stats['FG'] + 0.5 *\n stats['THR'], stats['FGA'])\n stats['TSA'] = stats['FGA'] + 0.44 * stats['FTA']\n stats['TSP'] = gen_derived_var(stats['PTS'], 2 * stats['TSA'])\n stats['THRAr'] = gen_derived_var(stats['THRA'], stats['FGA'])\n stats['FTAr'] = gen_derived_var(stats['FTA'], stats['FGA'])\n stats['TWOAr'] = gen_derived_var(stats['TWOA'], stats['FGA'])\n stats['TWOP'] = gen_derived_var(stats['TWO'], stats['TWOA'])\n stats['ORBr'] = gen_derived_var(stats['ORB'], stats['TRB'])\n stats['DRBr'] = gen_derived_var(stats['DRB'], stats['TRB'])\n stats['AST_to_TOV'] = gen_derived_var(stats['AST'], stats['TOV'])\n stats['STL_to_TOV'] = gen_derived_var(stats['STL'], stats['TOV'])\n stats['FIC'] = (stats['PTS'] + stats['ORB'] + 0.75 * stats['DRB'] +\n stats['AST'] + stats['STL'] + stats['BLK'] - 0.75 *\n stats['FGA'] - 0.375 * stats['FTA'] -\n stats['TOV'] - 0.5 * stats['PF'])\n stats['FT_to_FGA'] = gen_derived_var(stats['FT'], stats['FGA'])\n\n stats['OPOS'] = gen_possessions(stats, opp_stats)\n stats['DPOS'] = gen_possessions(opp_stats, stats)\n stats['PACE'] = 48 * ((stats['OPOS'] + stats['DPOS']) / (2 * (float(stats['MP']) / 5)))\n\n stats['ORBP'] = stats['ORB'] / (stats['ORB'] + opp_stats['DRB'])\n stats['DRBP'] = stats['DRB'] / (stats['DRB'] + opp_stats['ORB'])\n stats['TRBP'] = stats['TRB'] / (stats['TRB'] + opp_stats['TRB'])\n stats['ASTP'] = stats['AST'] / stats['FG']\n stats['STLP'] = stats['STL'] / stats['DPOS']\n stats['BLKP'] = stats['BLK'] / opp_stats['TWOA']\n stats['TOVP'] = stats['TOV'] / stats['OPOS']\n # stats['+/-'] = stats['+/-'] / stats['N']",
"def Merge(self,\n other_map: 'BaseTypedMap',\n reference_map: Optional[dict] = None) -> None:\n assert isinstance(other_map, self.__class__)\n # We should only ever encounter a single updated BuildStats for an\n # expectation/builder/step combination. Use the reference map to determine\n # if a particular BuildStats has already been updated or not.\n reference_map = reference_map or copy.deepcopy(self)\n for key, value in other_map.items():\n if key not in self:\n self[key] = value\n else:\n if isinstance(value, dict):\n self[key].Merge(value, reference_map.get(key, {}))\n else:\n assert isinstance(value, BuildStats)\n # Ensure we haven't updated this BuildStats already. If the reference\n # map doesn't have a corresponding BuildStats, then base_map shouldn't\n # have initially either, and thus it would have been added before\n # reaching this point. Otherwise, the two values must match, meaning\n # that base_map's BuildStats hasn't been updated yet.\n reference_stats = reference_map.get(key, None)\n assert reference_stats is not None\n assert reference_stats == self[key]\n self[key] = value",
"def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)",
"def write_results_to_db(data):\n try:\n vul_name = str(data.get('vulnerability',{}).get('name',''))\n\n severity_dict = {4:3,3:3,2:2,1:1,0:0} \n cvss_dict = {3:7.5,2:4.5,1:2.5,0:0} \n\n if vul_name:\n app_name = data.get('host',{}).get('name')\n app_obj = Application.objects.get(name=app_name)\n tool = data.get('vulnerability',{}).get('tool') \n\n severity = data.get('vulnerability',{}).get('severity')\n if severity:\n severity = severity_dict.get(int(severity))\n\n cwe_id = data.get('vulnerability',{}).get('cwe',{}).get('cwe_id',0) \n\n confidence = int(data.get('vulnerability',{}).get('confidence',2)) \n\n\n burp_confidence_dict = {\n \"Certain\":3,\n \"Firm\":2,\n \"Tentative\":1,\n } \n zap_confidence_dict = {\n \"Medium\":2,\n \"Low\":1 \n } \n data['vulnerability']['vul_type'] = data['vulnerability'].get('vul_type','Configuration')\n data['vulnerability']['owasp'] = data['vulnerability'].get('owasp','Uncategorized')\n data['vulnerability']['cvss'] = data['vulnerability'].get('cvss',cvss_dict.get(severity))\n data['vulnerability']['cwe'] = {\n 'cwe_id': cwe_id \n }\n if tool == 'Burp':\n if confidence == 3:\n data['vulnerability']['is_false_positive'] = False\n elif confidence == 2:\n data['vulnerability']['is_false_positive'] = False\n elif confidence == 1:\n data['vulnerability']['is_false_positive'] = True\n elif tool == 'ZAP':\n if confidence == 1:\n data['vulnerability']['is_false_positive'] = True\n elif confidence == 2:\n data['vulnerability']['is_false_positive'] = False\n vuls = Vulnerability.objects.select_related('vul','scan__application').filter(name=vul_name,tool=tool,is_false_positive=True,scan__application__name=app_name) \n if vuls.exists():\n data['vulnerability']['is_false_positive'] = True\n \n cwe = str(data['vulnerability'].get('cwe',{}).get('cwe_id',0))\n es_reference = data.get('scan_reference',{}).get('es_reference','')\n if not cwe:\n scan_obj = Scan.objects.select_related('application__org').get(name=es_reference)\n org_obj = scan_obj.application.org \n if cwe: \n data['vulnerability']['cwe']['cwe_id'] = cwe\n data['vulnerability']['severity'] = severity \n else:\n if severity == 0 or severity == 1:\n if severity == 0:\n cvss = 0\n else:\n cvss = 2\n data['vulnerability']['vul_type'] = 'Configuration'\n data['vulnerability']['owasp'] = 'Security Misconfiguration'\n data['vulnerability']['cvss'] = cvss\n data['vulnerability']['cwe']['cwe_id'] = 0\n data['vulnerability']['name'] = vul_name \n evidences = data.get('vulnerability',{}).get('evidences',[])\n create_vul(data,es_reference,confidence,severity,cwe,tool,evidences)\n info_debug_log(event='Write false positive data to ES',status='success')\n except BaseException as e:\n log_exception(e)\n critical_debug_log(event=e,status='failure')",
"def aggregate_log_dict(agg_dict, new_dict) -> dict:\n for k in new_dict:\n # init new if not present\n if k not in agg_dict:\n agg_dict[k] = {\n 'n': 0,\n 'sum': 0.0,\n 'max': new_dict[k],\n 'min': new_dict[k],\n }\n # aggregate\n agg_dict[k]['n'] += 1\n agg_dict[k]['sum'] += new_dict[k]\n agg_dict[k]['max'] = max(new_dict[k], agg_dict[k]['max'])\n agg_dict[k]['min'] = min(new_dict[k], agg_dict[k]['min'])\n # TODO: add more stats (e.g. stdev, max, minin the future)\n\n return agg_dict"
] | [
"0.5904677",
"0.56306887",
"0.56248367",
"0.54878306",
"0.54276437",
"0.5413086",
"0.5379965",
"0.5305739",
"0.51488465",
"0.51462114",
"0.5103581",
"0.5051371",
"0.50435966",
"0.5016924",
"0.5011857",
"0.5008543",
"0.49911267",
"0.49539304",
"0.49530947",
"0.49171865",
"0.49031708",
"0.4886292",
"0.48855197",
"0.48575053",
"0.4831707",
"0.48226002",
"0.48190245",
"0.48126215",
"0.480139",
"0.48004615"
] | 0.59542394 | 0 |
Return distance from the front sensors (left, center, right). | def get_front_distances(self):
return np.array([self.get_distance(name) for name in self.front_distance_sensors]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance",
"def getWidth(self):\n\t\tif (self.position==[]):\n\t\t\treturn 0\n\t\treturn abs(self.position[1][0]-self.position[0][0])",
"def left_distance(self):\n return self.x",
"def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def calcDistance(self, left, right):\n\n return math.fabs(right-left)",
"def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def calculate_distance_edge(self):\n if self.mu > 0:\n # right interface is intersected next\n dx = self.cell_xr - self.x\n self.next_cell_index = self.cell_index + 1\n else:\n # left interface is intersected next\n dx = self.cell_xl - self.x\n self.next_cell_index = self.cell_index - 1\n\n return dx / self.mu",
"def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5",
"def distance_between_wheels():",
"def get_distance(self) -> int:\n return self.get_measurement_data().distance",
"def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])",
"def _center_distance(self):\n # Split positions in segments of two points :\n cut = np.vsplit(self.a_position, int(self.a_position.shape[0]/2))\n # Get center position and starting line position :\n center = np.mean(cut, axis=1)\n\n # ============ EUCLIDIAN DISTANCE ============\n diff = np.sqrt(np.square(center[:, np.newaxis, :] - center).sum(2))\n diff[np.tril_indices_from(diff)] = np.inf\n\n return center, diff",
"def distance_from_origin(self) -> float:\n return self._distance_from_origin",
"def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5",
"def displacement(self):\n return self[0].distance(self[-1])",
"def top_distance(self):\n return self.y",
"def manhatam_distance(self) -> int:\n return abs(self.north) + abs(self.east)",
"def center(self):\n return (self.upper_right + self.lower_left) * 0.5",
"def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0",
"def get_distance(self, sensor):\n if sensor not in self.distance_sensors:\n raise ValueError('sensor should be one of {}!'.format(self.distance_sensors))\n\n return 255 - self._io.last_state['distance'][sensor]",
"def get_distance(self):\n\n # Activate trigger\n self.trigger()\n\n # Detect rising edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.RISING, timeout=2)\n if channel is None:\n # Timeout on wait of rising interrupt\n return None\n else:\n # Rising edge detected, save pulse start\n pulse_start = time.time()\n\n\n # Detect falling edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.FALLING, timeout=2)\n if channel is None:\n # Timeout on wait of falling interrupt\")\n return None\n else:\n # Falling edge detected, save pulse end\n pulse_end = time.time()\n\n # Calculated pulse width in microseconds (x1mln)\n pulse_width = (pulse_end - pulse_start)*1000000\n\n # Return distance in cm\n return pulse_width / 58"
] | [
"0.7017235",
"0.6549371",
"0.64581984",
"0.6379817",
"0.6338722",
"0.63304055",
"0.63304055",
"0.63304055",
"0.63304055",
"0.63304055",
"0.63304055",
"0.63304055",
"0.6323968",
"0.6294477",
"0.6253321",
"0.624666",
"0.6196004",
"0.60975796",
"0.6055105",
"0.6030134",
"0.60296077",
"0.6018451",
"0.59846336",
"0.59388006",
"0.59113973",
"0.5910715",
"0.59074706",
"0.5888614",
"0.58845943",
"0.58760875"
] | 0.69127107 | 1 |
Return distance from the given sensor. See Rosa.distance_sensors for a list of all available sensors. | def get_distance(self, sensor):
if sensor not in self.distance_sensors:
raise ValueError('sensor should be one of {}!'.format(self.distance_sensors))
return 255 - self._io.last_state['distance'][sensor] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance_sensor(unit):\n\n\tsensor_name = \"baseBoard\"\n\treg_addr = 24\n\tdata_len = 56\n\tregist_sensor(sensor_name, reg_addr, data_len)\n\n\tdata = rospy.wait_for_message(\"MediumSize/SensorHub/Range\", Range, 2)\n\tdistance = data.range\n\t# transfer sensor data to target unit\n\tif unit == \"cm\":\n\t\tresult = distance / 10.0\n\telse:\n\t\tresult = distance\n\n\tdelete_sensor(sensor_name)\n\treturn result",
"def get_distance_sensor(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n return all_data[name][0]\n except KeyError:\n raise KeyError(\"No Sensor with that name\")",
"async def get_distance() -> int:\n\n _initialize_sensor()\n pulse_start, pulse_end = await _get_echo_time(False), await _get_echo_time(True)\n signal_delay = pulse_end - pulse_start\n distance = _compute_distance(signal_delay)\n return int(distance)",
"def compute_distance_with_sensor_and_obj_loc(sensor_loc, obj_loc): \n\testimated_distance = scipy.spatial.distance.cdist(obj_loc, \n\t\t\t\t\t\t\t\t\t\t\tsensor_loc, \n\t\t\t\t\t\t\t\t\t\t\tmetric='euclidean')\n\treturn estimated_distance",
"def get_distance(self, location=None, angle=None):\n\n if location is None:\n location = self.vehicle.location\n if angle is None:\n # Offset for the yaw being increasing counterclockwise and starting \n # at 0 degrees when facing north rather than facing east.\n angle = -(self.vehicle.attitude.yaw - math.pi/2.0)\n\n # Ensure angle is always in the range [0, 2pi).\n angle = angle % (2*math.pi)\n\n distance = sys.float_info.max\n for obj in self.objects:\n distance = min(distance, self.get_obj_distance(obj, location, angle))\n\n # TODO: Replace with a parameter that has a limit on the measured \n # sensor distance, similar to what the ArduPilot simulator does?\n return distance",
"def read_sensor(self, maze):\n return maze.distance_to_nearest_beacon(*self.xy)",
"def get_distance(self) -> int:\n return self.get_measurement_data().distance",
"def distance_to(self, location):\r\n return gislib.getDistance((self.latitude, self.longitude), location)",
"def GetClosestSensorValue(active_sensors, value_col, distance_col, sensor_col):\n active_sensors = active_sensors.sort(distance_col)\n for sensor, value, dist in zip(active_sensors[sensor_col], active_sensors[value_col], active_sensors[distance_col]):\n if value > 0: return sensor, value, dist\n return 'None', -99, 99999",
"def get_distance(self):\n\n # Activate trigger\n self.trigger()\n\n # Detect rising edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.RISING, timeout=2)\n if channel is None:\n # Timeout on wait of rising interrupt\n return None\n else:\n # Rising edge detected, save pulse start\n pulse_start = time.time()\n\n\n # Detect falling edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.FALLING, timeout=2)\n if channel is None:\n # Timeout on wait of falling interrupt\")\n return None\n else:\n # Falling edge detected, save pulse end\n pulse_end = time.time()\n\n # Calculated pulse width in microseconds (x1mln)\n pulse_width = (pulse_end - pulse_start)*1000000\n\n # Return distance in cm\n return pulse_width / 58",
"def distance(self, location):\n return numpy.linalg.norm(self.vector_to(location))",
"def get_distances(self):\n return DistanceSensors(*self.bot_client.send_command(_Command.GetDistances))",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"async def distance(self):\n return round(await self._rpc.distance(), 2)",
"def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance",
"def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))",
"def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))",
"def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)",
"def distmeter(self):\n return self._distance.to(\"m\").value",
"def distance():\n return str(us.get_distance())",
"def distance(self, other_room):\n return self.p[0].distanceSquare(other_room.p[0])",
"def _get_distance(self, target): \r\n sensor_transform = self._sensors['rgb_front'].get_transform()\r\n\r\n distance = np.sqrt(\r\n (sensor_transform.location.x - target.x) ** 2 +\r\n (sensor_transform.location.y - target.y) ** 2 +\r\n (sensor_transform.location.z - target.z) ** 2)\r\n\r\n return distance",
"def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )",
"def get_geopy_distance(self, location):\n try:\n event = (float(location.split('latitude:')[1].split(' ')[0]), float(location.split('longitude:')[1]))\n base = (float(config.OFFICE_COORDINATES[0]), float(config.OFFICE_COORDINATES[1]))\n\n return geopy.distance.vincenty(event, base).km\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def calculate_distance(x: float, y: float) -> float:\n # return geopy.distance.vincenty(x, y).km\n R = 6370\n lat1 = radians(x[0]) #insert value\n lon1 = radians(x[1])\n lat2 = radians(y[0])\n lon2 = radians(y[1])\n\n dlon = lon2 - lon1\n dlat = lat2- lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n distance = R * c\n return distance",
"def get_temperature(self, sensor: int = 0) -> float:\n\n return self.send(self.cmd.GET_HEATING_ACT)",
"def get_distance(self):\n values = self.speakers.values()\n values.sort(reverse=True)\n try:\n return abs(values[1]) - abs(values[0])\n except (IndexError, ValueError):\n return -1",
"def distance(self, lat: float, lon: float) -> float:\n return distance((self.lat, self.lon), (lat, lon))",
"def distance(self):\n return self._distance",
"def distance_to_location(self, row, col):\n return float(sqrt(pow(self._row - row, 2) + pow(self._col - col, 2)))"
] | [
"0.71713",
"0.7136991",
"0.67143583",
"0.64265466",
"0.63979185",
"0.6347279",
"0.6342724",
"0.63268715",
"0.61834866",
"0.61157393",
"0.60445964",
"0.6027434",
"0.5997184",
"0.59771746",
"0.5814516",
"0.57811177",
"0.5756442",
"0.57413125",
"0.57289225",
"0.57217413",
"0.5669247",
"0.5652404",
"0.56477654",
"0.56152385",
"0.5593668",
"0.55914557",
"0.55830526",
"0.5568712",
"0.5563172",
"0.5547691"
] | 0.78509593 | 0 |
Return RGBAmbient detected from the front center sensor. | def get_color(self):
return self._io.last_state['color']['front-center'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ambient_light(self):\n return self._ambient_light",
"def getAmbientLightSensor(self):\n cmd = 'O'\n ambientLightSensor = [-1,-1,-1,-1,-1,-1,-1,-1]\n out = self.getData(cmd)\n out = str(out, 'utf-8')\n if self.debug:\n print(out)\n if out[0] == 'n':\n isStart = False\n j = 0\n for i in range(len(out)):\n if isStart:\n if out[i] == ',':\n ambientLightSensor[j] = int(data)\n j = j + 1\n isStart = False\n else:\n data=data+out[i]\n if out[i] == ',':\n isStart = True\n data = ''\n ambientLightSensor[j] = int(data)\n return ambientLightSensor",
"def getAmbient(self):\n return self.__ambient",
"def get_avg_color(image):\n\n perceptually_uniform = cspace_convert(image, \"sRGB1\", \"CAM02-UCS\")\n avg_center = np.mean(np.mean(perceptually_uniform, axis=0), axis=0)\n\n avg_center_JCh = cspace_convert(avg_center, \"CAM02-UCS\", \"JCh\")\n\n return avg_center_JCh",
"def ambient(self) -> float:\n return self.GetAmbient()",
"def lightness(self):\n min_component = min(self.red, self.green, self.blue)\n max_component = max(self.red, self.green, self.blue)\n avg = (max_component + min_component) / 2\n light = avg / 255\n return light",
"def ambient_dim(self):\n return self._ambient_dim",
"def meanColor(self):\n return self.image[self.x, self.y]",
"def recognize_color(self):\n x = (self.x + DIRECTIONS[(self.facing_direction - self.config) % 8][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[(self.facing_direction - self.config) % 8][1]) % (self.image.shape[1] - 1)\n color_left = self.image[x, y]\n if abs(self.luminance(color_left) - self.luminance_fcolor) <= self.lum_threshold:\n return self.turn_left\n x = (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)\n color_forward = self.image[x, y]\n if abs(self.luminance(color_forward) - self.luminance_fcolor) <= self.lum_threshold:\n return self.move_forward\n x = (self.x + DIRECTIONS[(self.facing_direction + self.config) % 8][0]) % (self.image.shape[0] - 1)\n y = (self.y + DIRECTIONS[(self.facing_direction + self.config) % 8][1]) % (self.image.shape[1] - 1)\n color_right = self.image[x, y]\n if abs(self.luminance(color_right) - self.luminance_fcolor) <= self.lum_threshold:\n return self.turn_right\n return None",
"def get_brightness(arr):\n\tR,G,B = arr[:,:,0], arr[:,:,1], arr[:,:,2]\n\tY = 0.299*R + 0.587*G + 0.144*B\n\treturn Y.mean()",
"def getAverageColor(img):\n avgColorPerRow = np.average(img, axis=0)\n avgColor = np.average(avgColorPerRow, axis=0)\n return avgColor",
"def get_primary_color(source: str) -> list:\r\n img = Image.fromarray(source.copy()).convert(\"RGB\")\r\n img.resize((1, 1), resample=0)\r\n primary_color = img.getpixel((0, 0))\r\n return primary_color",
"def SetupAmbientColor(inColorID, immediate=False):\n # If we're running the first time, resolve all of our datarefs just once.\n if not gColorRefs:\n for n in range(XP_Color.Count):\n gColorRefs.append(xp.findDataRef(kXPlaneColorNames[n]))\n\n target = []\n xp.getDatavf(gColorRefs[inColorID], target, 0, 3)\n\n # If the user passed NULL, set the color now using the alpha level.\n if immediate:\n target.append(gAlphaLevel)\n GL.glColor4fv(target)\n return target[0:3]",
"def get_brightness(img,mask):\r\n\r\n bright = cv2.meanStdDev(img, mask=mask)\r\n return {\"bright_avg\":bright[0][0,0],\"bright_sd\":bright[1][0,0]}",
"def get_center_color():\n center_color = input(\"What color should the center of the flower be?\")\n return center_color",
"def getAdaptiveAmbient(self, channel, unitCode=0):\n resp = self.XAPCommand(\"AAMB\", channel, unitCode=unitCode)\n return int(resp)",
"def luminance(self):\n \n return (self.r + self.g + self.b) // 3",
"def current_average_luma(camera):\n camera.capture('/home/pi/Desktop/image1.jpg')#camera take picture\n img = Image.open(\"/home/pi/Desktop/image1.jpg\") #opens image\n \n luma=0 #sum of the lumenance of each pixels\n pixels = img.width*img.height #number of pixels\n \n for x in range(img.width):\n for y in range(img.height):\n (r, g, b) = img.getpixel((x,y))#get colour touple \n luma += (0.2126*r + 0.7152*g + 0.0722*b) #calculate luma of RGB data, then add to total\n #END for\n #END for\n \n img.close()#ensure to properly close the image\n return luma/pixels #return average of all pixels",
"def get_ambient_light(self, ldr_voltage: Optional[int] = None) -> float:\n if ldr_voltage is None:\n ldr_voltage = self.raw_ldr_voltage\n\n # TODO: this conversion algorithm is straight from the manual but it seems odd.\n # It goes \"to infinity\" as ldr_voltage nears 1023 (hence the clamp, I guess)\n # Clarify.\n if ldr_voltage > 1022:\n ldr_voltage = 1022\n if ldr_voltage < 1:\n ldr_voltage = 1\n\n return self.ldr_pull_up_resistance / ((1023 / ldr_voltage) - 1)",
"def ambient_temperature(self) -> int:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature\"))\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"F\":\r\n return self.ambient_temperature_f\r\n elif self.temperature_scale == \"C\":\r\n return self.ambient_temperature_c\r\n else:\r\n return self._ambient_temperature",
"def get_colour(self):\n \n distorted = []\n if piCameraFound:\n # Use piCamera\n \n #frame = self.capture_generator.next()\n #distorted = frame.array\n self.cam.capture(self.rawCapture, format=\"bgr\", use_video_port=True)\n distorted = self.rawCapture.array\n \n # clear the stream in preparation for the next frame\n self.rawCapture.truncate(0)\n \n else: # Use OpenCV\n retval, distorted = self.cam.read() # Read frame\n\n if not retval: # Error\n print \"Camera.get_colour: Could not read next frame\";\n exit(-1);\n \n \n #colour = cv2.remap(distorted, self.mapx, self.mapy, cv2.CV_INTER_LINEAR)\n #colour = cv2.remap(distorted, self.mapx, self.mapy, cv2.INTER_LINEAR)\n \n # Skip this part because it is slow\n #colour = cv2.undistort(distorted, self.intrinsic_matrix, self.distortion_coeffs)\n colour = distorted\n return colour, distorted",
"def getSurfaceBrightness(self):\n return self._sbrightn",
"def getIntensity(self, pos):\n #Camera doesnt have position so im just using the position of the followed object (of 1st camera)\n camPos = glad.renderer.cameraList[0].objectFollowed.getPos()\n\n r=(pos-camPos)#separation vector\n if r.isNullVector(): #if the vector is null, sound will be max anyways\n sin = 1\n cos = 1\n else:\n #calculate angles to determine where sound is coming from\n cos = dotProduct(r.getNormalized(),Vector(-1,0))\n sin = dotProduct(r.getNormalized(), Vector(0,1))\n #Calculate intensity for left and right channels\n #when sound is directly to the side have 80 percent come from that side speaker\n #hopefully this will give some directional sounds\n k = 130000 #arbitrary constant to calculate sound intensity\n if r.isNullVector():\n intensity = k #removes division by zero error\n else:\n intensity = k/r.getMagnitude()**2\n #major is the percent of the sound intensity from the side with the greater intensity\n a=0.68 #max percent of the intensity coming from one side\n major = (a*0.5)/((0.5*cos)**2+(a*sin)**2)**0.5 #equation for an ellipse\n if r[0] <= 0:\n right = major\n left = 1-major\n else:\n left = major\n right = 1-major\n right *= intensity\n left *= intensity\n if right > 1: right = 1\n if left > 1: left = 1\n return left,right",
"def get_light_state(self):\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n tl_image_rgb, color_index = self.light_classifier.get_classification(cv_image)\n tl_cv_image = cv2.cvtColor(tl_image_rgb, cv2.COLOR_RGB2BGR)\n try:\n self.tl_detected_image_pub.publish(self.bridge.cv2_to_imgmsg(tl_cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)",
"def light_color(self):\n return self._spots[constants.CROSSING_LOCATION - 1].light_color()",
"def get_bg(bg_reader):\n print('Type your current bg.')\n bg = bg_reader() # Type your current bg\n try:\n bg_as_integer = int(bg)\n return bg_as_integer\n except Exception as e:\n return math.nan",
"def get_foreground(self):\n\n h = ((self._bytes[12] & 0x0F) << 8) | self._bytes[13]\n s = self._bytes[14]\n l = self._bytes[15]\n\n h = utils.map(h, 0, 4095, 0, 360)\n s = 65 - utils.map(s, 0, 255, 0, 20)\n l = 75 - utils.map(l, 0, 255, 0, 20)\n\n return utils.hsl_to_rgb(h, s, l)",
"def return_average(rgb):\n value = sum(rgb[:3])//3 \n return (value, value, value)",
"def get_front_door_mask(self) -> np.array:\n front_door_mask = self.boundary == 255\n region = measure.regionprops(front_door_mask.astype(int))[0]\n return np.array(region.bbox, dtype=int)",
"def global_sky_background(self, LF):\n # Variables:\n s = 9 # Number of subframes (CHANGE IF NEEDED!) E.g. 4, 9, 16 etc. \n n = self.h*self.w/(self.h+self.w) # Number of pixels used in subframes scales with image dim \n nrows = self.h/(s/2) # Numbers of rows in each subframe\n ncols = self.w/(s/2) # Numbers of columns in each subframe\n\n # Reshape light frame into subframe:\n LF_sub = (LF.reshape(self.h//nrows, nrows, -1, ncols).swapaxes(1,2).reshape(-1, nrows, ncols))\n\n # Loop over all subframes:\n min_val = np.zeros((s,n))\n for i in range(s):\n # Loops over all pixels:\n for j in range(n):\n min_val[i,j] = np.min(LF_sub[i]) # Minimum value for array\n min_dex = np.where(LF_sub[i] == min_val[i,j]) # Find row, column for min value\n # Min pixel is set to max in order to find the next min:\n LF_sub[i, min_dex[0][0], min_dex[1][0]] = np.max(LF_sub[i]) \n\n # Flux:\n flux_sky = 3*median(min_val) - 2*mean(min_val) # Mean flux from pixels\n return flux_sky"
] | [
"0.6456869",
"0.6094395",
"0.60205966",
"0.59814113",
"0.58402157",
"0.5835906",
"0.5803118",
"0.56540084",
"0.5627646",
"0.55591786",
"0.5520857",
"0.541431",
"0.54012066",
"0.5390628",
"0.5378792",
"0.53740406",
"0.5353544",
"0.5350059",
"0.5344358",
"0.53411293",
"0.5339509",
"0.533622",
"0.53331876",
"0.5329445",
"0.53283453",
"0.53207934",
"0.53144735",
"0.5300815",
"0.5299178",
"0.5296808"
] | 0.6448859 | 1 |
Trigger a buzz for duration (in sec). | def buzz(self, duration):
self._io.buzz(duration) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vibrate(self, duration):\n self.wm.rumble = 1\n sleep(duration)\n self.wm.rumble = 0",
"def alarm(seconds): # real signature unknown; restored from __doc__\n pass",
"def buzz(self, timeout: float, rate: float) -> None:\n # FIXME - the close() method should stop any buzz in progress\n\n output_pin = settings.DOOR_LOCK[\"CONFIG\"][\"output_pin\"]\n\n time_end = time.time() + timeout\n while time.time() < time_end:\n GPIO.output(output_pin, True)\n time.sleep(rate)\n GPIO.output(output_pin, False)\n time.sleep(rate)",
"def delay(self, distance, seconds):\n delay = distance/seconds\n return delay",
"def delay(ms: int, /) -> None:",
"def alarm(self, interval, call):",
"def _delay(self, n=None):",
"async def alarm(ctx, on_time:float=1, off_time:float=0.6, n:int=5):\n buzzer.beep(on_time, off_time, n)\n await ctx.send(f\"Alarme acionado\")",
"def fake_delay(self, ha_delay):\n hass_now = dt_util.utcnow()\n shifted_time = hass_now + timedelta(seconds=ha_delay)\n self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time})",
"def udelay(us: int, /) -> None:",
"def script_delay(now):\n self._listener = None\n self.turn_on()",
"def beat(self, **options):\n pass",
"def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)",
"def spawn(delay: timedelta, activity: Callable[[], None]) -> None:\n\n\teGGame.AddDelayedActivity(delay, activity)",
"def tic():\n then = datetime.datetime.now()\n return lambda: delay(datetime.datetime.now() - then)",
"def take_action(duration_length):\r\n print(\r\n f'Sleeping for {duration_length} second(s)...and now it is {strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())}/')\r\n time.sleep(duration_length)\r\n return f'Wake up after sleeping for {duration_length} second(s)... and now it is {strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())}/'",
"def beep(self, delay):\n self.on()\n Timer(delay, self.off).start()",
"def click_periodically(self, pos=(0, 0), interval=1, duration=1):\n time_passed = 0\n while time_passed < duration/interval:\n self.click(pos=pos)\n sleep(interval)\n time_passed += interval",
"async def sleep(cls, delay: float) -> None:",
"def set_delay(delay):\r\n inst.write(\"PULS:DEL %f\" %(delay))",
"def alarmoff() :\n s.alarm(False, \"\")",
"def take_buff(self, key, magnitude, duration):\n #EFFECTS: DoT, buffs: (attack, defense, move speed), knockback, stun\n pass",
"def shift(self, delay):\n self.go_to(self.time + delay)",
"def sleep_for(self, duration):\n raise NotImplementedError()",
"def explode(self):\n\t\tsound.play(\"explosion\")\n\t\tself.timeout = uniform(0.667, 2.333)",
"def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)",
"def cooldown():\n print camera.CoolerON()\n camera.status.update()",
"def bullet_up(screen, player):\n #If the player is not firing at a higher speed\n if player.maxcooldown > 6:\n player.maxcooldown -= 2",
"def moving_delay(self, duration):\n start_time = monotonic()\n while (monotonic() - start_time)*1e3 < duration:\n if self.check_movement() == False:\n if self.move_state != MOV_ROTATE: # rotate is only valid movement\n print(\"Stopping in moving_delay()\")\n self.move_brake()",
"def delay():\r\n time.sleep(2)"
] | [
"0.61852217",
"0.5827161",
"0.57949793",
"0.5773484",
"0.5751459",
"0.5750044",
"0.57450426",
"0.5731707",
"0.57204735",
"0.5700166",
"0.5686778",
"0.56843597",
"0.56702906",
"0.5667191",
"0.56382865",
"0.562899",
"0.5618123",
"0.5612783",
"0.56075585",
"0.55937237",
"0.55925167",
"0.55693865",
"0.556409",
"0.5552671",
"0.55158085",
"0.55150104",
"0.5514607",
"0.5509112",
"0.55006933",
"0.5499962"
] | 0.6760693 | 0 |
Initializes the history with the startofsentence symbol. | def initialize(self, src_sentence):
self.history = ['<s>'] if self.history_len > 0 else [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\r\n self.s_table = SymbolTable.preSymbols",
"def __init__(self, symbol, name=None, market=None, history=None):\n self._symbol = symbol\n self._name = name\n if name == None:\n self._name = symbol\n self._market = market\n if history != None:\n self._history = history\n else:\n self._history = []",
"def __init__(self, symbol):\n self._symbol = symbol\n self._hash = hash(symbol)",
"def __init__(self, symbols):\r\n self.symbols = set(symbols)",
"def reset(self):\n self.history = ['<s>'] if self.history_len > 0 else []",
"def __init__(self, history=None):\n\n self.__history = history if history else []",
"def __init__(self) -> None:\n self.symbols = {\n \"÷\": \"//\",\n \"+\": \"+\",\n \"-\": \"-\",\n \"×\": \"*\"\n }",
"def __init__(self):\n self._symbols = set()\n self._blank_symbol = None\n self._states = set()\n self._start_state = None\n self._end_states = set()\n self._transitions = {}\n\n self._current_state = None\n self._tape = None\n self._head = None",
"def set_symbol(self, symbol):\r\n self.symbol = symbol",
"def set_initial_symb(self, value):\n self.symb_val[0] = value",
"def __init__(self):\n self._graph = GenericGraph()\n self._atom_index = {}\n self._next_locant = 1\n self._brutto_formula = ''\n self.modified = True",
"def symbol(self, symbol):\n self._symbol = symbol",
"def set_symbols_from(self,sigma=_Set())-> None:\n \n m=len(self.chars)\n \n _s=[]\n symbols=set()\n for e in sigma.symbols:\n \"ignoring the empty word\"\n if not e == '':\n symbols.add(e)\n \n for s in symbols:\n _s=split(s)\n n=len(s)\n for i in range(0,m-n+1):\n if _s==self.chars[i:i+n]:\n self.symbols[i]=s\n i=+1\n\n \"\"\"Eliminating None values from the list object self.symbols\"\"\"\n temp=[]\n for elt in self.symbols:\n if elt==None:\n pass\n else:\n temp.append(elt)\n self.symbols=temp",
"def setSymbol(self, *args):\n return _libsbml.InitialAssignment_setSymbol(self, *args)",
"def reset(self):\n # This also resets the history.\n self.__init__(**self.init_kwargs)",
"def __init__(self, sym: ghidra.program.model.pcode.HighSymbol):\n ...",
"def __init__(self, symbol):\n Expression.__init__(self, None)\n self.symbol = symbol\n self.index = None\n self.basetype = symbol.basetype",
"def symbol(self, symbol):\n\n self._symbol = symbol",
"def history(self, history):\n self._history = history",
"def __init__(self):\n\n self.st = {} # symbol table\n self.st[\"itrs\"] = [{}, {}, {}]\n self.existVecs = False\n self.existMats = False\n self.applyOnce = {}",
"def FreshStart(self):\n # Create a vector holding historical data for the purpose of plotting.\n # The length may vary because the sampling speed of different are\n # sensors may vary.\n\n self.history = {'time': collections.deque( [], self.history_length ),\\\n 'data': collections.deque( [], self.history_length )\n }",
"def __init__(self):\n super(CamTacToe, self).__init__()\n self.state = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\n self.history = [self.state[:]]\n self.player_char = 'x'\n self.opponent_char = 'o'\n self.difficulty = 0\n self.player_start = 0",
"def _init_symbol_tracker(self):\n # Initialize with an empty set\n atoms_indx = {symb: set([]) for symb in self.symbols}\n\n # Populate the sets\n for atom in self.atoms:\n symb = atom.symbol\n atoms_indx[symb].add(atom.index)\n return atoms_indx",
"def test_hist_init():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.init'\n History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n with LazyJSON(FNAME) as lj:\n obs = lj['here']\n assert_equal('yup', obs)\n os.remove(FNAME)",
"def history(self, history):\n\n self._history = history",
"def reset(self):\n self.fscore_history = []",
"def history():",
"def __init__(self, config, symbol_code):\n pass",
"def __init__(self, symbol, start_date, delta=0, days_to_expiration=20, percentages=[0.5, 0.5]):\n self.symbol = symbol\n self.start_date = start_date\n self.delta = delta\n self.days_to_expiration = days_to_expiration\n self.init_option_symbols()\n self.percentages = percentages",
"def terminal_init(self):\n pass"
] | [
"0.6629664",
"0.61422133",
"0.5893893",
"0.5795038",
"0.56736887",
"0.5658736",
"0.5606722",
"0.54772836",
"0.53585345",
"0.5345294",
"0.5338148",
"0.5327372",
"0.53225595",
"0.5298655",
"0.5272885",
"0.52654237",
"0.52591574",
"0.5245366",
"0.5237385",
"0.52224797",
"0.52105945",
"0.5202802",
"0.5156852",
"0.5139432",
"0.5138536",
"0.51314944",
"0.51287943",
"0.50747937",
"0.50729203",
"0.50716364"
] | 0.63857645 | 1 |
Extends the current history by ``word`` | def consume(self, word):
if len(self.history) >= self.history_len:
self.history = self.history[1:]
self.history.append(str(word)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def logP(self, history, word):",
"def new_match(self, new_word): \n self.rhyming_words.append(new_word)",
"def this_word(self):\n self.append = self.add_to_current_word",
"def addWord(self, word):\n if word[0] not in self.child:\n self.child[word[0]] = WordDictionary()\n if len(word) > 1:\n self.child[word[0]].addWord(word[1:])\n elif len(word) == 1:\n self.child[word[0]].isend = True",
"def insert(self, word):\n now = self.tree\n for i in word:\n now[i] = now.setdefault(i,{})\n now = now[i]\n now['end']=True",
"def addWord(self, word):\n if word:\n self.word_dict[len(word)].append(word)",
"def add_word(self):\n word = self.word # easier to call word now\n\n wordlist_path = self.get_wordlist_path()\n with open(wordlist_path) as f:\n data = json.load(f)\n\n if exists_already(data,word):\n exit()\n\n next_index = int(data[\"cur_index\"]) + 1 # new index\n data[\"words\"][next_index] = word # update wordlist\n data[\"words\"] = dict(sorted(data[\"words\"].items(), key=lambda item: item[1])) # alphabetisize\n data[\"cur_index\"] = next_index # update index\n\n with open(wordlist_path, 'w') as f:\n json.dump(data, f, indent = 4)\n\n print(f\"[{word}] added to [{self.pos}]. This is the [{next_index}] indexed word added.\")",
"def addWord(self, word: str) -> None:\n self.dict[len(word)].append(word)",
"def insert(self, word: str) -> None:\n curr_chars = self.chars\n for c in list(word):\n if c not in curr_chars:\n curr_chars[c] = {}\n curr_chars = curr_chars[c]\n\n curr_chars[self.end_of_word] = self.end_of_word",
"def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1",
"def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1",
"def add(self, word: str) -> None:\n self.words.add(word)\n self.added_words.add(word)",
"def add(self, word):\n\t\tif word not in self.link_words:\n\t\t\tself.link_words.append(word)",
"def _add_new_word(self, word):\n if word not in self.word_to_id:\n word_id = len(self.word_to_id)\n self.word_to_id[word] = word_id\n self.id_to_word[word_id] = word",
"def next_word(self):\n self.append = self.add_new_word",
"def _add_word(self, word):\n if not word in self._word2idx.keys():\n self._word2idx[word] = self.vocab_size\n self.freqs[word] = 0\n self._idx2word[self.vocab_size] = word\n self.vocab_size += 1\n self.freqs[word] += 1",
"def addWord(self, word):\n current_node = self\n for idx, letter in enumerate(word):\n if letter not in current_node.kids:\n current_node.kids[letter] = WordDictionary()\n current_node.kids[letter].val = letter\n current_node = current_node.kids[letter]\n if idx == len(word) - 1:\n current_node.isWord = True",
"def addWord(self, word: str) -> None:\n\n temp = self.start\n for i in range(len(word)):\n if temp.children[ord(word[i])-ord('a')] is None:\n temp.add_children()\n\n temp = temp.children[ord(word[i])-ord('a')]\n if i+1 == len(word):\n temp.end = True",
"def addWord(self, word: str) -> None:\n cur = self.root\n for c in word:\n cur = cur.children[c]\n cur.end = True",
"def add_word_to_trigram(new_word):\n # Trigrams require 2 previous words\n # If we don't have those yet, then set them\n if len(prev_words) < 2:\n prev_words.append(new_word)\n return\n\n # If it exists, add the word to the list\n # If it doesn't exist, create it\n word_tuple = (prev_words[0], prev_words[1])\n if word_tuple in trigrams:\n trigrams[word_tuple].append(new_word)\n else:\n trigrams[word_tuple] = [new_word]\n\n # Increment the prev words\n prev_words.pop(0)\n prev_words.append(new_word)",
"def insert(self, word):\n level = self.trie\n for c in word:\n if c in level:\n level = level[c]\n else:\n level[c] = {}\n level = level[c]\n level[self.end] = 1",
"def add(self, word: str) -> None:\n self.d.add(word)\n self.d.add(word.lower())\n self.save_user_dict()",
"def append(self, search):\n self._search_history.append(search)",
"def addWord(self, word: str) -> None:\n curr = self.trie\n for char in word:\n if char not in curr:\n curr[char] = {}\n curr = curr[char]\n curr['$'] = {}",
"def addWord(self, word):\n selected_node = self.root\n for i in word:\n if selected_node.next.get(i) is None:\n new_node = WordDictionary.Node()\n selected_node.next[i] = new_node\n selected_node = new_node\n else:\n selected_node = selected_node.next[i]\n if not selected_node.isFinish:\n selected_node.isFinish = True\n self.size += 1",
"def addWord(self, word: str) -> None:\n # Find split node\n word += '0'\n curr = self.trie\n i = 0\n while i < len(word) and word[i] in curr:\n curr = curr[word[i]]\n i += 1\n \n # Add the rest of the word\n while i < len(word):\n curr[word[i]] = {}\n curr = curr[word[i]]\n i += 1",
"def add_word(self, word):\n word = word.lower()\n if word in self.word_list:\n self.word_list[word] += 1\n else:\n self.word_list[word] = 1",
"def addWord(self, word: 'str') -> 'None':\n p=self.dictword\n for s in word:\n if s not in p:\n p[s]={}\n p=p[s]\n else:\n p=p[s]\n p['#']=None",
"def addWord(self, word: str) -> None:\n trie = self.trie\n for c in word:\n trie = trie[c]\n trie[None] = None",
"def addWord(self, word):\n lenw = len(word)\n if not lenw in self.bag:\n self.bag[lenw] = []\n self.bag[lenw].append(word)"
] | [
"0.6599941",
"0.6505923",
"0.64983183",
"0.6454854",
"0.64470184",
"0.64331174",
"0.6424763",
"0.63540274",
"0.6337719",
"0.62547296",
"0.62547296",
"0.62502587",
"0.6189653",
"0.6189369",
"0.6183528",
"0.6178551",
"0.61075634",
"0.6099807",
"0.60856986",
"0.6058385",
"0.60567147",
"0.6045778",
"0.6045517",
"0.6044017",
"0.6041283",
"0.6040682",
"0.60359865",
"0.60300964",
"0.6015193",
"0.6001225"
] | 0.762686 | 0 |
Retrieve all Session from OpenVidu Server Only available if OpenVidu is enabled. | def get(self):
response = openvidu().list_sessions()
if response.status_code == 200:
return response.json()["content"]
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()",
"def fusion_api_get_active_sessions(self):\n return self.loginsession.get_active_sessions()",
"def get_sessions(self):\n\n return self.all_sessions",
"def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def GetSessions(firebase: firebase) -> None:\n\n global sessions\n obj_key_list = []\n \n result = firebase.get('/session', None)\n \n if result is None:\n print(\"no sessions found\")\n return\n \n for i in result.keys():\n obj_key_list.append(i)\n \n for i in obj_key_list:\n session = Session()\n session.setId(i)\n session.setCourseId(result[i]['courseid'])\n session.setDOW(result[i]['DOW'])\n session.setSessionNumber(result[i]['session_number'])\n session.setSessionDate(result[i]['session_date'])\n session.setSessionTimeStart(result[i]['session_time_start'])\n session.setSessionTimeEnd(result[i]['session_time_end'])\n sessions.append(session)",
"def sessions(self):\n return utils.listItems(self, '/status/sessions')",
"def sessions(self):\n return self.rpc.compatiblesessions(self.modulename)",
"def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str",
"def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions",
"def sessions(self):\n return list(Session.get_sessions(self))",
"def get_all_sessions(self) -> list:\n sessions = list()\n for stream_id in self.streams.keys():\n tcpsession, session_position, network_tuple = self.streams[stream_id]\n sessions.append(tcpsession.get_session(session_position - 1))\n return sessions",
"def get_conference_sessions(self, request):\n return self.session_service.get_conference_sessions(\n request.websafeConferenceKey)",
"def get_sessions(self):\n return self.current_sessions",
"def _getSessionsBySpeaker(self, request):\n # Ensure that the speaker key is valid and that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Return all of the speaker's sessions\n return ndb.get_multi(speaker.sessions)",
"def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body",
"def _getConferenceSessions(self, request):\n # Ensure that websafeConferenceKey is a valid conference key\n confKey = _raiseIfWebsafeKeyNotValid(request.websafeConferenceKey,\n 'Conference')\n # Retrieve all sessions that have a matching conference key\n sessions = Session.query(Session.conference == confKey).fetch()\n return sessions",
"def get_sessions(url: str, token: str) -> List[Session]:\n sessions_url = f'{url}api/sessions'\n response = requests.get(sessions_url, params={'token': token})\n assert(response.status_code == 200)\n sessions_raw = json.loads(response.text)\n sessions = []\n for session_raw in sessions_raw:\n session = Session(\n path = session_raw['path'],\n last_activity = dateutil.parser.isoparse(session_raw['kernel']['last_activity']),\n execution_state = session_raw['kernel']['execution_state']\n )\n assert(session['execution_state'] in valid_execution_states)\n sessions.append(session)\n\n sessions.sort(key=lambda session: session['last_activity'], reverse=True)\n return sessions",
"def fusion_api_get_active_user_sessions(self, param='', api=None, headers=None):\n return self.usersessions.get(api=api, headers=headers, param=param)",
"def list(self, request, *args, **kwargs):\n self.check_authentication(request)\n serializer = SessionSerializer(\n context={\"request\": request, \"view\": self},\n instance=[_Session(request)],\n many=True,\n )\n return Response(serializer.data)",
"def _sessions(self):\n return self.__sessions",
"def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results",
"def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match",
"def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions",
"def iter_sessions():\n return iter(_session_stack)",
"def sessions(self):\n return self._sessions",
"def get_or_create_sessions(self):\n\t\tpath = f'{self.BIKE_ENDPOINT}user/current/session?{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response",
"def sessions(self):\n for session_id in self.get_sessions(): \n session = Session(self.session_cache, self.sid, session_id)\n yield session",
"def active_sessions(self):\n skey = self.r_key('active_sessions')\n sessions_to_expire = []\n for user_id in self.r_server.smembers(skey):\n ukey = self.r_key('session', user_id)\n if self.r_server.exists(ukey):\n yield user_id, self.load_session(user_id)\n else:\n sessions_to_expire.append(user_id)\n\n # clear empty ones\n for user_ids in sessions_to_expire:\n self.r_server.srem(skey, user_id)",
"def getSessionsData(self):\n export_data = self.get_api_results(\n \"/api/session/export?api_key={0}&format=json\")\n export_data = self.purge_misc_sessions(export_data)\n return export_data",
"def get_iscsi_sessions(host_executor):\n rc, out, err = host_executor.run_cmd(config.ISCSIADM_SESSION)\n if rc:\n if \"No active sessions\" in err:\n return None\n else:\n logger.error(\n \"Unable to execute command %s\", config.ISCSIADM_SESSION\n )\n raise Exception(\n \"Error executing %s command: %s\"\n % (config.ISCSIADM_SESSION, err)\n )\n return out.rstrip().splitlines()"
] | [
"0.69744915",
"0.68106383",
"0.680421",
"0.6639985",
"0.6492907",
"0.6467396",
"0.6462376",
"0.6411373",
"0.63915443",
"0.6341369",
"0.6337059",
"0.6332384",
"0.63185257",
"0.62985736",
"0.6282246",
"0.6229181",
"0.6196593",
"0.6155813",
"0.61123633",
"0.6087496",
"0.6086961",
"0.60764813",
"0.6054554",
"0.6046503",
"0.6037189",
"0.59891266",
"0.5959943",
"0.5949349",
"0.59216374",
"0.5889147"
] | 0.6961742 | 1 |
Retrieve a Session from OpenVidu Server Only available if OpenVidu is enabled. | def get(self, *, session_id):
response = openvidu().get_session(session_id)
if response.status_code == 200:
return response.json()
elif response.status_code == 404:
abort(NotFound, query=f"Session `{session_id}` does not exist")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n\n response = openvidu().list_sessions()\n\n if response.status_code == 200:\n return response.json()[\"content\"]\n abort(response)",
"def getSession():\n return call(\"getSession\")",
"def test_client_get_session(self):\n server, client = loopback()\n session = client.get_session()\n assert isinstance(session, Session)",
"def getSession(self):\n if self.accessToken is None:\n self.authenticate()\n\n s = requests.Session()\n s.auth = self.getAuthObj()\n s.headers = {\"Accept\": \"application/json\"}\n return s",
"def get(database, session_id: SessionId):\n return database.sessions.find_one({\"session_id\": session_id})",
"def getSession(self, universe, username):\n for session in self.sessions:\n if (session.identifier == (universe, username)):\n return session\n return False",
"def test_server_get_session(self):\n server, client = loopback()\n session = server.get_session()\n assert isinstance(session, Session)",
"def get(self, *, session_id, connection_id):\n\n response = openvidu().get_connection(session_id, connection_id)\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 400:\n abort(NotFound, query=f\"Session `{session_id}` does not exist\")\n elif response.status_code == 404:\n abort(NotFound, query=f\"Connection `{connection_id}` does not exist\")\n abort(response)",
"def _get_session():\n api_version = \"1.0\"\n originator = \"salt_cloud_{}_driver\".format(__virtualname__)\n url = config.get_cloud_config_value(\n \"url\", get_configured_provider(), __opts__, search_global=False\n )\n user = config.get_cloud_config_value(\n \"user\", get_configured_provider(), __opts__, search_global=False\n )\n password = config.get_cloud_config_value(\n \"password\", get_configured_provider(), __opts__, search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n \"ignore_ssl\",\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False,\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n \"url: %s user: %s password: %s, originator: %s\",\n url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = str(ex.__dict__[\"details\"][1])\n slash_parts = url.split(\"/\")\n new_url = \"/\".join(slash_parts[:2]) + \"/\" + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n \"session is -> url: %s user: %s password: %s, originator:%s\",\n new_url,\n user,\n \"XXX-pw-redacted-XXX\",\n originator,\n )\n session.xenapi.login_with_password(user, password, api_version, originator)\n return session",
"def open_session(self):\n return self.Session()",
"def session(self):\r\n # Use auto-auth to retrieve the session for a logged in user\r\n session = requests.Session()\r\n response = session.get(STUDIO_BASE_URL + \"/auto_auth?staff=true\")\r\n\r\n # Return the session from the request\r\n if response.ok:\r\n return session\r\n\r\n else:\r\n msg = \"Could not log in to use Studio restful API. Status code: {0}\".format(response.status_code)\r\n raise StudioApiLoginError(msg)",
"def _get_session(self):\n if current_uow and not self._outside_uow:\n return current_uow.get_session(self.provider.name)\n else:\n new_connection = self.provider.get_connection()\n if not new_connection.is_active:\n new_connection.begin()\n return new_connection",
"def get_session(_id):\n token = get_token()\n headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}\n endpoint = f\"https://api.signicat.io/identification/v2/sessions/{_id}\" \n \n response = requests.get(endpoint, headers=headers).json()\n identification = response['identity']\n print(identification)\n return response['identity']",
"def get_session(self):\n return ESSession(self)",
"def session(self):\n return self.session_store.get_session()",
"def session(get_session):\n return get_session()",
"def getPlaySession(roomId):\n\turi = \"https://togethertube.com/rooms/{}\".format(roomId)\n\tr = requests.get(uri)\n\tif args.verbose: print(\"probe room: status code:\", r.status_code)\n\treturn r.cookies[\"PLAY_SESSION\"]",
"def session(self):\n return self.ssession()",
"def get(self, *, session_id):\n\n response = openvidu().list_connections(session_id)\n\n if response.status_code == 200:\n return response.json()[\"content\"]\n elif response.status_code == 404:\n abort(NotFound, query=f\"Session `{session_id}` does not exist\")\n abort(response)",
"def get_session(self):\n return self.session",
"def get_session(id):\n session = Session.query.get(id)\n result = session_schema.dump(session).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def get_session(self, renew: Optional[bool] = False) -> neo4j.work.simple.Session:\n if self.session is None or renew:\n sess = self.driver.session()\n self.session = sess\n return self.session",
"def load_session(self, user_id):\n ukey = self.r_key('session', user_id)\n return self.r_server.hgetall(ukey)",
"def get_session(cls):\r\n if cls._session is not None:\r\n return cls._session\r\n else:\r\n raise RuntimeError('Session not set.')",
"def get_session(self):\n return self._session()",
"def get_session(context, key):\n session_manager = getToolByName(context, 'session_data_manager')\n\n if not session_manager.hasSessionData():\n return None\n\n session = session_manager.getSessionData()\n\n if not key in session.keys():\n return None\n\n return session[key]",
"def getCruSession(user: str, password: str, url: str) -> Session:\n \n payload = {\n 'Username': user,\n 'Password': password,\n 'ufprt': 'CAF8477BDE81160B023359032A131EBA8ED2EFCCA369B1D4406ABA090A186F53D289629E6B2901EADF92F7200C4504C4EC8DA6A8AE05FD4C6298C6E3A5B090CF7AFEEC6E47030ACB04698860352BD8EA3914830C3D6099400102E9B87AD1389C568A810357BDE2D5BF115952F6B1B1D8C4E8783B',\n 'RememberMe': 'false'\n }\n \n session = HTMLSession()\n \n res = session.post(\"https://cruonline.crugroup.com/login\", data=payload)\n assert res.status_code == 200\n \n # POST a different ufprt value so that if site detects that another user has logged on,\n # we force a log off\n logoff_payload = {\n 'ufprt': '7674E654A97C61E8335D82E3E72CB21E3B8BC67B82C4A2C28B5359C474B3E5AFAAD640D57AE7CC8A40515A37EF5740681F41C249B2F85397D86C2560F38BD27A43B4401FFB761DFEB88F3E34BAC675CA9AEADA14AD076EB02A85EAADD50D4678BE783B30BF67E11B6394BEE63C522175658A6BAD34B4EEA8982EC0C59790667556CC29D0'\n }\n res_final = session.post(\"https://cruonline.crugroup.com/login\", data=logoff_payload)\n assert res_final.status_code == 200\n \n return res_final.session",
"def get_session(base_url, group_id, token, session_id):\n url = base_url + route_session.format(session_id=session_id)\n response = requests.get(url, headers=headers(group_id, token))\n return response",
"def session(self):\n return session",
"def load_session(self, id, default=None):\n \n db = self.open()\n return db.get(id, default)"
] | [
"0.6957267",
"0.6738384",
"0.6525127",
"0.6468331",
"0.6465521",
"0.6465363",
"0.6464419",
"0.6318534",
"0.63104165",
"0.6275549",
"0.6267169",
"0.6256999",
"0.6253086",
"0.6247025",
"0.6239282",
"0.62275785",
"0.61824",
"0.6169235",
"0.6146193",
"0.6141159",
"0.610484",
"0.6096942",
"0.6075575",
"0.6074965",
"0.6045808",
"0.60340565",
"0.60249203",
"0.60168076",
"0.6003997",
"0.5998825"
] | 0.70654297 | 0 |
List all Connections from a Session Only available if OpenVidu is enabled. | def get(self, *, session_id):
response = openvidu().list_connections(session_id)
if response.status_code == 200:
return response.json()["content"]
elif response.status_code == 404:
abort(NotFound, query=f"Session `{session_id}` does not exist")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.list_conns()",
"def list_connections(self, show_passthrough=True):\n return self._exprmapper.list_connections(show_passthrough)",
"async def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in await self.query(CONNECTION_URL)]",
"def ssh_list_connections(cls):\n for name in cls._ssh_connections.keys():\n print (name)",
"def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in self.query(CONNECTION_URL)]",
"def list_connections(self):\n return self.network.list_connections()",
"def list_connections(self, status, **kwargs):\n logging.debug('ConnectionsClient/list_connections()')\n url = '/pod/v1/connection/list'\n params = {'status' : status}\n return self.bot_client.execute_rest_call('GET', url, params=params)",
"def getConnectionList(self):\n return []",
"async def connections_list(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n\n tag_filter = {}\n for param_name in (\n \"invitation_id\",\n \"my_did\",\n \"their_did\",\n \"request_id\",\n \"invitation_key\",\n \"their_public_did\",\n \"invitation_msg_id\",\n ):\n if param_name in request.query and request.query[param_name] != \"\":\n tag_filter[param_name] = request.query[param_name]\n\n post_filter = {}\n if request.query.get(\"alias\"):\n post_filter[\"alias\"] = request.query[\"alias\"]\n if request.query.get(\"state\"):\n post_filter[\"state\"] = list(ConnRecord.State.get(request.query[\"state\"]).value)\n if request.query.get(\"their_role\"):\n post_filter[\"their_role\"] = list(\n ConnRecord.Role.get(request.query[\"their_role\"]).value\n )\n if request.query.get(\"connection_protocol\"):\n post_filter[\"connection_protocol\"] = request.query[\"connection_protocol\"]\n\n profile = context.profile\n try:\n async with profile.session() as session:\n records = await ConnRecord.query(\n session, tag_filter, post_filter_positive=post_filter, alt=True\n )\n results = [record.serialize() for record in records]\n results.sort(key=connection_sort_key)\n except (StorageError, BaseModelError) as err:\n raise web.HTTPBadRequest(reason=err.roll_up) from err\n\n return web.json_response({\"results\": results})",
"def get_client_list(self):\r\n cursor = self.conn.cursor()\r\n cursor.execute(\"\"\"SELECT * FROM CLIENT\"\"\")\r\n return cursor.fetchall()",
"def list_conns(self):\n\t\tres = []\n\t\tself.AL.acquire()\n\t\tfor ls in self.ls.keys():\n\t\t\tinfo = self.ls[ls]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Relay\", \"LOCAL\", info[\"local\"], info[\"peer\"],\n\t\t\t\t\tinfo[\"port\"], info[\"got\"], None,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tfor s in self.s2i.keys():\n\t\t\tinfo = self.s2i[s]\n\t\t\tif info[\"creator\"] == self.cid:\n\t\t\t\tfai = \"LOCAL\"\n\t\t\t\ttai = info[\"peer\"]\n\t\t\telse:\n\t\t\t\tfai = info[\"creator\"]\n\t\t\t\ttai = info[\"peer\"]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Conn\", fai, info[\"local\"], tai, info[\"port\"],\n\t\t\t\t\tinfo[\"recv\"], info[\"send\"]\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tself.AL.release()\n\t\treturn res",
"def sessions(self):\n return self.rpc.compatiblesessions(self.modulename)",
"def snmpqosqos_sch_session_conns(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_session_conns\n\t\texcept Exception as e:\n\t\t\traise e",
"def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str",
"def cmd_list(self):\n rc = self.socket_command('list', False)\n return rc",
"def get_conference_sessions(self, request):\n return self.session_service.get_conference_sessions(\n request.websafeConferenceKey)",
"def get_all_sessions(self) -> list:\n sessions = list()\n for stream_id in self.streams.keys():\n tcpsession, session_position, network_tuple = self.streams[stream_id]\n sessions.append(tcpsession.get_session(session_position - 1))\n return sessions",
"def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()",
"def get_connections(self, id, connection_name, **args):\n return self.request(id + \"/\" + connection_name, args)",
"def get_connections(self):\n return self.connections",
"def get_connections(self):\n return self.connections",
"def get_sessions(self):\n\n return self.all_sessions",
"def get(self, *, session_id, connection_id):\n\n response = openvidu().get_connection(session_id, connection_id)\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 400:\n abort(NotFound, query=f\"Session `{session_id}` does not exist\")\n elif response.status_code == 404:\n abort(NotFound, query=f\"Connection `{connection_id}` does not exist\")\n abort(response)",
"def available_auto_connection():\n path = os.path.dirname(verticapy.__file__) + \"/connections.verticapy\"\n confparser = ConfigParser()\n confparser.optionxform = str\n try:\n confparser.read(path)\n confparser.remove_section(\"VERTICAPY_AUTO_CONNECTION\")\n except:\n pass\n all_connections = confparser.sections()\n return all_connections",
"def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results",
"def sitecurclntconnections(self) :\n\t\ttry :\n\t\t\treturn self._sitecurclntconnections\n\t\texcept Exception as e:\n\t\t\traise e",
"def collect_channels(session):\n channel_objects = []\n brew_channels = session.listChannels()\n\n for brew_channel in brew_channels:\n channel_objects.append(channel(brew_channel[\"name\"], brew_channel[\"id\"]))\n\n return channel_objects",
"def sessions(self):\n return utils.listItems(self, '/status/sessions')",
"def user_connections(self):\r\n return users.UserConnections(self)"
] | [
"0.6733905",
"0.6326909",
"0.62833834",
"0.62431115",
"0.6201212",
"0.6139315",
"0.61312217",
"0.6088229",
"0.6044139",
"0.59260124",
"0.58997154",
"0.5763768",
"0.5634482",
"0.5621717",
"0.5603118",
"0.5596979",
"0.5581345",
"0.55810916",
"0.55774164",
"0.55729145",
"0.5551006",
"0.5551006",
"0.5540285",
"0.5540141",
"0.553783",
"0.55224854",
"0.5502185",
"0.549007",
"0.54892117",
"0.54872763"
] | 0.63346416 | 1 |
Get a Connection from a Session Only available if OpenVidu is enabled. | def get(self, *, session_id, connection_id):
response = openvidu().get_connection(session_id, connection_id)
if response.status_code == 200:
return response.json()
elif response.status_code == 400:
abort(NotFound, query=f"Session `{session_id}` does not exist")
elif response.status_code == 404:
abort(NotFound, query=f"Connection `{connection_id}` does not exist")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_conn(self):\n if self.game_request:\n # Prima cerca di ricavare la connessione ciclando sulle connessioni\n for connection in connections.values():\n if connection.player == self:\n return connection\n\n # Nell'eventualità remota che non ve la faccia prova nell'altro modo\n try:\n session = self.game_request.getSession()\n except error.AlreadyCalled:\n return None\n if session in connections:\n return connections[session]\n\n return None",
"def _get_connection(self, conf):\n return get_session()",
"def get(self, *, session_id):\n\n response = openvidu().list_connections(session_id)\n\n if response.status_code == 200:\n return response.json()[\"content\"]\n elif response.status_code == 404:\n abort(NotFound, query=f\"Session `{session_id}` does not exist\")\n abort(response)",
"def _get_session(self):\n if current_uow and not self._outside_uow:\n return current_uow.get_session(self.provider.name)\n else:\n new_connection = self.provider.get_connection()\n if not new_connection.is_active:\n new_connection.begin()\n return new_connection",
"def get_connection(self, simple_rows=False):\n return self.open(simple_rows)",
"def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]",
"def get_connection(self, session_cls=None):\n # If this connection has to be created within an existing session,\n # ``session_cls`` will be provided as an argument.\n # Otherwise, fetch a new ``session_cls`` from ``get_session()``\n if session_cls is None:\n session_cls = self.get_session()\n\n conn = session_cls()\n conn = self._execute_database_specific_connection_statements(conn)\n\n return conn",
"def get_conn(self, *args, **kwargs):\n connections = self.__connections_for('get_conn', args=args, kwargs=kwargs)\n\n if len(connections) == 1:\n return connections[0]\n else:\n return connections",
"def get_conn(self):\n return self.get_connection(self.mssql_conn_id)",
"def retrieve(self, connectionId) :\n conn = None\n\n try :\n conn = self.remoteConnections[connectionId]\n except :\n print 'Error retrieving connection with id ' + connectionId\n\n return conn",
"def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(self.query(f'{CONNECTION_URL}/{conn_id}'))",
"async def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(await self.query(f'{CONNECTION_URL}/{conn_id}'))",
"def get_vc_connection(self):\n\n if self._vc_connection:\n # if connected return the connection.\n if self._vc_connection.is_connected(5) and \\\n self._vc_connection.is_peer_connected(5):\n _log.debug('Returning current connection')\n return self._vc_connection\n\n _log.debug(\"Resetting connection as the peer wasn't responding.\")\n # reset the connection so we can try it again below.\n self._vc_connection.kill()\n self._vc_connection = None\n\n def sync_status_to_vc(status, context):\n \"\"\"\n Sync the status of the current vcp object with that of the one that\n is connected to the vc instance.\n\n :param status:\n :param context:\n \"\"\"\n conn = self._vc_connection\n conn.vip.health.set_status(status, context)\n\n self.vip.health.add_status_callback(sync_status_to_vc)\n\n def enable_connection_heartbeat():\n \"\"\"\n Start publishing the heartbeat with the status messages.\n \"\"\"\n conn = self._vc_connection\n status = self.vip.health.get_status()\n conn.vip.health.set_status(\n status['status'], status['context']\n )\n conn.vip.heartbeat.start()\n\n # We are going to use an identity of platform.address_hash for\n # connections to vc. This should allow us unique connection as well\n # as allowing the vc to filter on the heartbeat status of the pubsub\n # message to determine context.\n vcp_identity_on_vc = 'platform.'\n\n # First check to see if there is a peer with a volttron.central\n # identity, if there is use it as the manager of the platform.\n peers = self.vip.peerlist().get(timeout=5)\n if VOLTTRON_CENTRAL in peers:\n _log.debug('VC is a local peer, using {} as instance_id'.format(\n self._instance_id))\n self._vc_connection = build_agent(\n self.core.address,\n # peer=VOLTTRON_CENTRAL,\n publickey=self.core.publickey,\n secretkey=self.core.secretkey,\n serverkey=self._vc_serverkey,\n identity=self._instance_id,\n agent_class=VCConnection\n )\n self._vc_connection.set_main_agent(self)\n if self._vc_connection.is_connected() and \\\n self._vc_connection.is_peer_connected():\n _log.debug(\"Connection has been established to local peer.\")\n else:\n _log.error('Unable to connect to local peer!')\n if self._vc_connection.is_connected():\n enable_connection_heartbeat()\n\n return self._vc_connection\n\n if self._vc_address is None or self._vc_serverkey is None:\n _log.warn('volttron_central_address is None in config store '\n 'and volttron.central is not a peer.')\n _log.warn('Recommend adding volttron.central address or adding a '\n '\"config\" file to the config store.')\n return None\n\n self._vc_connection = build_agent(\n identity=vcp_identity_on_vc,\n # peer=VOLTTRON_CENTRAL,\n address=self._vc_address,\n serverkey=self._vc_serverkey,\n publickey=self.core.publickey,\n secretkey=self.core.secretkey,\n agent_class=VCConnection\n )\n\n self._vc_connection.set_main_agent(self)\n if not self._vc_connection.is_peer_connected():\n _log.error('Peer: {} is not connected to the external platform'\n .format(self._vc_connection.peer))\n self._vc_connection.kill()\n self._vc_connection = None\n self._registration_state = RegistrationStates.NotRegistered\n return None\n\n if self._vc_connection.is_connected():\n enable_connection_heartbeat()\n\n return self._vc_connection",
"def get_session(*args, **kwargs):\n settings = _get_connection_settings(*args, **kwargs)\n return Session(settings)",
"def get_connection(self, host, login, passwd, conn):\n # force all string values to unicode\n host = unicode(host)\n login = unicode(login)\n passwd = unicode(passwd) if passwd is not None else None\n\n connection = self._search_connection(host, login, passwd, conn)\n\n if (connection is None):\n self._connections_lock.acquireWrite()\n try:\n # we have to search for the connection again after aquireing the write lock\n # as the thread previously holding the write lock may have already added our connection\n connection = self._search_connection(host, login, passwd, conn)\n if (connection is None):\n # create a new connection if a matching connection does not already exist\n connection = wvmConnection(host, login, passwd, conn)\n\n # add new connection to connection dict\n if host in self._connections:\n self._connections[host].append(connection)\n else:\n self._connections[host] = [connection]\n finally:\n self._connections_lock.release()\n\n elif not connection.connected:\n # try to (re-)connect if connection is closed\n connection.connect()\n\n if connection.connected:\n # return libvirt connection object\n return connection.connection\n else:\n # raise libvirt error\n raise libvirtError(connection.last_error)",
"def get_conn(args):\n\n # connect this thing\n from pyVmomi import vim\n from pyVim.connect import SmartConnect, Disconnect\n import atexit\n try:\n si = SmartConnect(host=args.host, port=args.port, user=args.user, pwd=args.password)\n except Exception as exc:\n if isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg:\n try:\n import ssl\n default_context = ssl._create_default_https_context\n ssl._create_default_https_context = ssl._create_unverified_context\n si = SmartConnect(\n host=args.host,\n port=args.port,\n user=args.user,\n pwd=args.password,\n )\n ssl._create_default_https_context = default_context\n except Exception as exc1:\n raise Exception(exc1)\n else:\n import ssl\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n context.verify_mode = ssl.CERT_NONE\n si = SmartConnect(\n host=args.host,\n port=args.port,\n user=args.user,\n pwd=args.password,\n sslContext=context)\n atexit.register(Disconnect, si)\n return si",
"def get_connection(self, socket):\r\n\r\n return self.connections_m[socket]",
"def session(self):\n session = None\n connected = False\n while not connected:\n try:\n session = self.session_pool.pop()\n except IndexError:\n connection = connect(self.address, self.ssl_context, **self.config)\n session = Session(self, connection)\n connected = True\n else:\n if session.healthy:\n #session.connection.reset()\n connected = session.healthy\n return session",
"def _open_session(self):\n return self.cluster.connect()",
"def connection(self):\n return self.session.connection",
"def get_connection(self):\n if self.conn is None or self.conn.closed != 0:\n self._connect()\n logger.debug(f'The connection object is: {self.conn}.')\n return self.conn",
"def open_session(self):\n return self.Session()",
"def connection(self, connection=None):\n if connection is None:\n return self.engine.acquire()\n return ConnectionProxy(connection=connection)",
"def _open(self):\n if self.channel is None:\n self.channel = self.transport.open_session()\n\n return self.channel",
"def exposed_getconn(self):\n return self._conn",
"def open_connection(self):\n return SelectConnection(\n parameters=self._connection_parameters,\n on_open_callback=self.on_connection_open,\n on_close_callback=self.on_connection_closed,\n on_open_error_callback=self.on_connection_closed,\n )",
"def get_connection(self):\n return self.connection",
"def get_connection(self):\n return self.connection",
"def getConexion(self):\n return self.conn",
"async def get_connection(self, username: str, password: str) -> asyncssh.connect:\n conn = await asyncssh.connect(self.ip, known_hosts=None, username=username, password=password,\n server_host_key_algs=['ssh-rsa'])\n # return created connection\n return conn"
] | [
"0.6831988",
"0.65772164",
"0.6539034",
"0.62769437",
"0.6270954",
"0.62527394",
"0.6241328",
"0.6051569",
"0.60344905",
"0.60256827",
"0.60075736",
"0.5991678",
"0.5909446",
"0.58730656",
"0.58506244",
"0.58354396",
"0.5833348",
"0.5819763",
"0.5816927",
"0.580332",
"0.58014196",
"0.5770022",
"0.57427907",
"0.56939346",
"0.567555",
"0.56713635",
"0.5666023",
"0.5666023",
"0.5658982",
"0.56544065"
] | 0.6966095 | 0 |
Retrieve all Recordings from OpenVidu Server Only available if OpenVidu is enabled. | def get(self):
response = openvidu().list_recordings()
if response.status_code == 200:
return response.json()["items"]
elif response.status_code == 501:
abort(NotImplemented, query="OpenVidu Server recording module is disabled")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_records():\n with RECORD_LOCK: # since flask 1.0 multi-threaded is enabled by default\n return jsonify(RECORDS)",
"async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json",
"def get_all_records(self, data: dict, execution_context: dict):",
"def get_records(self) -> List[DBModelInstance]:\n return self._get_all_records()",
"def get_records(self):\n url = f\"{self.baseurl}\" + \"?limit=\" + RECORDS_PER_PAGE\n\n while True:\n log.debug(\"Retrieving from OpenCity URL %s\", url)\n response = urlopen(url).read()\n json_content = json.loads(response)\n\n url = json_content[\"next\"]\n\n objects = json_content[\"items\"]\n for res in objects:\n lid = res[\"id\"]\n ltitle = res[\"title\"]\n log.info(f'Found id:{lid} \"{ltitle}\"')\n yield res\n\n if url is None:\n break",
"def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()",
"def see_all():\n database = get_connection()\n patients_in_db = []\n patient: dict = database.patients.find()\n for p in patient:\n pat = p[\"patient_data\"]\n patients_in_db.append(pat)\n print(patients_in_db)\n return patients_in_db",
"def getAllrecordings():\n\tlistRecords = []\n\twith jsonlines.open('permanRecords.jsonl', mode='r') as reader:\n\t\tfor obj in reader:\n\t\t\t#print(obj['SuspiciousDocName'])\n\t\t\tif \"casesReuse\" in obj.keys():\n\t\t\t\tlistRecords.append([obj[\"suspDoc\"]['suspDocHTML'], obj[\"srcDoc\"]['srcDocHTML'],\n\t\t\t\tstr(obj[\"casesReuse\"][\"recordingType\"]), obj[\"casesReuse\"][\"obfuscation\"]])\n\t\t\telif \"casesPlag\" in obj.keys():\n\t\t\t\tlistRecords.append([obj[\"suspDoc\"]['suspDocHTML'], obj[\"srcDoc\"]['srcDocHTML'],\n\t\t\t\tstr(obj[\"casesPlag\"][\"recordingType\"]), obj[\"casesPlag\"][\"obfuscation\"]])\n\treturn listRecords",
"def show_all_records(request, patient_id):\n if (request.user.patient_username.id != patient_id):\n Logs.objects.create(type='READ', user_id=request.user.uid, interface='PATIENT', status=STATUS_ERROR, details='[Show All Records] Logged in user does not match ID in URL. URL ID: ' + str(patient_id))\n return redirect('/patient/login/')\n\n patient = patient_does_not_exists(patient_id)\n\n # Get all records from Readings, TimeSeries, Documents, Images, and Videos\n readings = Readings.objects.filter(patient_id=patient)\n timeseries = TimeSeries.objects.filter(patient_id=patient)\n documents = Documents.objects.filter(patient_id=patient).exclude(type='Healthcare Professional Note')\n images = Images.objects.filter(patient_id=patient)\n videos = Videos.objects.filter(patient_id=patient)\n\n results = list(chain(readings, timeseries, documents, images, videos))\n\n Logs.objects.create(type='READ', user_id=patient.username.uid, interface='PATIENT', status=STATUS_OK, details='Show All Records')\n\n context = {\n 'patient': patient,\n 'results': results\n }\n\n return render(request, 'show_all_records.html', context)",
"def records(self):\n return self.db_data['records']",
"def get(self):\n with open_session() as session:\n try:\n records = session.query(Patient).all()\n except NoResultFound:\n logger.info(\"No record found\") # TODO: remove debugging\n return gen_response(\"No result found\")\n except Exception as error:\n logger.exception(\"Exeption: %s\" % (str(error)))\n return gen_response(\"Internal server error\")\n\n # Build the response list\n rlist = [to_dict(record) for record in records]\n return gen_response(rlist)",
"def get_all_records(self, all_dates=False):\n query = ('SELECT * FROM data_record_view '\n 'WHERE NOT %(all_dates)s OR \"Date\" = CURRENT_DATE '\n 'ORDER BY \"Date\", \"Time\", \"Lab\", \"Plot\"')\n return self.query(query, {'all_dates': all_dates})",
"def get(self):\n\n response = getAllKekRecords(request)\n return response, 200",
"def get_records(self):\n logging.debug('Return all records in table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n self._cursor.execute(\"\"\"SELECT * FROM {}\"\"\".format(self._name))\n rows = self._cursor.fetchall()\n\n records = []\n for r in rows:\n record = {'date': r['date'],\n 'time': r['time'],\n 'location': r['location'],\n 'nodeID': r['nodeID']}\n logging.info('{}|{}|{}|{}'.format(r['date'],r['time'],r['location'],r['nodeID']))\n records.append(record)\n\n return records",
"async def get(self):\r\n try:\r\n query = System.select().execute()\r\n table = []\r\n for facility in query:\r\n table.append(facility)\r\n return web.Response(body=str(table), status=200)\r\n except Exception as ex:\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(error_message), status=404)",
"def lifedata_hpi():\n\n # A constant that defines the record fields that we wish to retrieve.\n FIELDS = {\n '_id': False, 'Country': True, 'CountryCode': True, 'Life Expectancy': True, 'Well-being(0-10)': True, 'Happy Life Years': True, 'Happy Planet Index': True,\n 'Population': True, 'GDP/capita': True, 'Governance Rank(1 - highest gov.)': True\n }\n\n # Open a connection to MongoDB using a with statement such that the\n # connection will be closed as soon as we exit the with statement\n with MongoClient(MONGODB_HOST, MONGODB_PORT) as conn:\n # Define which collection we wish to access\n collection = conn[DBS_NAME][COLLECTION_NAME]\n # Retrieve a result set only with the fields defined in FIELDS\n # and limit the the results to 55000\n projects = collection.find(projection=FIELDS, limit=55000)\n # Convert projects to a list in a JSON object and return the JSON data\n return json.dumps(list(projects))",
"def get(self, *, recording_id):\n\n response = openvidu().get_recording(recording_id)\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def retrieve_resource_records(self):\n log('Retrieving records for {}'.format(self.domain))\n current_records = self._api_connection('dnsListRecords')\n for current_resource_record in current_records.iter('resource_record'):\n self.current_records.append(\n dict(\n (resource_record.tag, resource_record.text)\n for resource_record\n in current_resource_record.iter()\n )\n )\n log('{} records retrieved for {}'.format(len(self.current_records), self.domain))\n log(self.current_records)",
"async def _get_records(self):\n if self.shard_iter is None:\n log.debug(\"Shard %s has been closed, exiting\", self.shard_id)\n raise ShardClosedException\n try:\n resp = await self.client.get_records(ShardIterator=self.shard_iter)\n except ClientError as e:\n code = e.response.get('Error', {}).get('Code')\n if code in RETRY_EXCEPTIONS:\n raise RetryGetRecordsException\n else:\n log.error(\"Client error occurred while reading: %s\", e)\n raise ReaderExitException\n else:\n self.shard_iter = resp.get('NextShardIterator')\n self.millis_behind_latest = resp.get('MillisBehindLatest')\n return resp.get('Records')",
"def records(self):\n return self._records",
"def db_show_all():\n the_list = []\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n for key in db:\n person = Person()\n person.name = key\n person.phone = db[key]\n the_list.append(person)\n display_list(the_list)\n db.close()",
"def read_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 3)",
"def get_recordrange(self):\r\n if self.version >= 10.1:\r\n querystr = \"\"\"?where=&outFields=*&returnGeometry=false&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=[{%0D%0A++++\"statisticType\"%3A+\"count\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidcount\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"min\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmin\"%0D%0A++}%2C{%0D%0A++++\"statisticType\"%3A+\"max\"%2C%0D%0A++++\"onStatisticField\"%3A+\"objectid\"%2C+++++\"outStatisticFieldName\"%3A+\"oidmax\"%0D%0A++}]&returnZ=false&returnM=false&returnDistinctValues=false&f=pjson\"\"\"\r\n req = requests.get(self.endpointurl + querystr)\r\n self.recordinfo = req.json()[\"features\"][0][\"attributes\"]\r\n\r\n elif self.version < 10.1:\r\n querystr = \"\"\"?text=&geometry=&geometryType=esriGeometryPoint&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&objectIds=&where=objectid+>+-1&time=&returnCountOnly=true&returnIdsOnly=false&returnGeometry=false&maxAllowableOffset=&outSR=&outFields=&f=pjson\"\"\"\r\n req = requests.get(self.endpontquerystr + qs)\r\n self.recordinfo = {\"oidmin\": 0, \"oidmax\": req.json()[\"count\"]}\r\n\r\n [\r\n self.iterlist.append([x, x + 999])\r\n for x in range(\r\n self.recordinfo[\"oidmin\"]\r\n if self.recordinfo[\"oidmin\"] != self.recordinfo[\"oidmax\"]\r\n else 1 - self.recordinfo[\"oidmin\"],\r\n self.recordinfo[\"oidmax\"],\r\n 1000,\r\n )\r\n ]",
"def sound_recordings(request):\n recordings = SoundRecording.objects.all()\n context = {\n 'recordings': recordings,\n }\n return render(request, 'sound_recordings.html', context)",
"def recordings(self):\r\n return recordings.Recordings(self)",
"def recordings(self):\r\n return recordings.Recordings(self)",
"def records(self):\r\n raise NotImplementedError()",
"def getRecordByUUID(uuid):\n\n url = GEONETWORK.csw_url\n tpl = tplEnv.get_template('geonetwork/get_record_by_uuid_kvp.json')\n kvps = json.loads(tpl.render(uuid=uuid))\n\n try:\n gnosresp = requests.get(url, params=kvps, auth=auth(GEONETWORK))\n except requests.exceptions.ConnectionError:\n return None\n\n try:\n parsedresp = xmltodict.parse(gnosresp.content)\n records = json.dumps(parsedresp)\n return records\n except Exception:\n return None",
"def get_all(self):\n url = self._dbname + '/_all'\n return self._connection.get(url).json()",
"def read_all_records(self, view=None):\n\n if view is None:\n view = DataBreachRecordService.data_breach_records_view()\n path = '/api/data-breach-record/find'\n provider = self._scrolling_request(path,\n method='POST',\n body=view)\n return DSPaginationGroupingIterator(provider, DataBreachRecord)"
] | [
"0.6345349",
"0.59497505",
"0.5949022",
"0.5938353",
"0.5870377",
"0.5860587",
"0.5783985",
"0.5765751",
"0.57480633",
"0.5731185",
"0.56760776",
"0.5669972",
"0.56515956",
"0.5623034",
"0.5570114",
"0.55670875",
"0.55388093",
"0.5510589",
"0.5506595",
"0.5493533",
"0.54669464",
"0.5466714",
"0.5428358",
"0.5421688",
"0.5414714",
"0.5414714",
"0.5413654",
"0.54132336",
"0.5393943",
"0.53933036"
] | 0.7766611 | 0 |
Retrieve a Recording from OpenVidu Server Only available if OpenVidu is enabled. | def get(self, *, recording_id):
response = openvidu().get_recording(recording_id)
if response.status_code == 200:
return response.json()
elif response.status_code == 404:
abort(NotFound, query=f"Recording `{recording_id}` does not exist")
elif response.status_code == 501:
abort(NotImplemented, query="OpenVidu Server recording module is disabled")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self):\n\n response = openvidu().list_recordings()\n\n if response.status_code == 200:\n return response.json()[\"items\"]\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def get(self, *, recording_id):\n\n response = openvidu().get_recording(recording_id)\n\n if response.status_code == 200:\n recording = response.json()\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n else:\n abort(response)\n\n if recording[\"url\"] is None:\n abort(\n Conflict,\n query=\"The recording has not finished\",\n )\n request = openvidu().request.get(recording[\"url\"], stream=True)\n\n # Response does not accept `headers=request.headers` so we create them ourself\n headers = {}\n for header in request.headers:\n headers[header] = request.headers[header]\n\n return Response(\n stream_with_context(request.iter_content(chunk_size=2048)),\n headers=headers,\n )",
"def handle_recording():\n\n recording_url = request.values.get(\"RecordingUrl\", None)\n\n resp = VoiceResponse()\n resp.say(\"Listen to your recorded message.\")\n resp.play(recording_url)\n resp.say(\"Goodbye.\")\n return str(resp)",
"def GetRecording(self, id):\n\n return self.__GetJson(\"/updates/\"+str(id)+\"/recording\", False)",
"def getRecordByUUID(uuid):\n\n try:\n url = GEONETWORK.csw_url\n tpl = tplEnv.get_template('geonetwork/get_record_by_uuid_kvp.json')\n kvps = json.loads(tpl.render(uuid=uuid))\n except Exception as e:\n log.error('Could not set needed variable')\n log.error(e)\n return None\n\n try:\n gnosresp = requests.get(url, params=kvps, auth=auth(GEONETWORK))\n return gnosresp.content\n except requests.exceptions.ConnectionError:\n log.error('Could not connect to gnos')\n return None",
"async def fetch_recording(meeting_id: str) -> dict:\n return await zoom_service.zoom.get_recording(meeting_id)",
"def recording(self, sid):\r\n return recordings.Recording(self, sid)",
"def getRecordByUUID(uuid):\n\n url = GEONETWORK.csw_url\n tpl = tplEnv.get_template('geonetwork/get_record_by_uuid_kvp.json')\n kvps = json.loads(tpl.render(uuid=uuid))\n\n try:\n gnosresp = requests.get(url, params=kvps, auth=auth(GEONETWORK))\n except requests.exceptions.ConnectionError:\n return None\n\n try:\n parsedresp = xmltodict.parse(gnosresp.content)\n records = json.dumps(parsedresp)\n return records\n except Exception:\n return None",
"def voice(request):\n call_sid = None\n call_from = None\n if request.method == \"POST\":\n call_sid = request.POST.get('CallSid', None)\n call_from = request.POST.get('From', None)\n elif request.method == \"GET\":\n call_sid = request.GET.get('CallSid', None)\n call_from = request.GET.get('From', None)\n if call_from:\n call_detail = CallDetail(call_sid=call_sid, call_from=call_from)\n call_detail.save()\n resp = VoiceResponse()\n resp.play('http://roelofvandijk.com/mp33/IVR/PNGK-whereAreYouFrom.mp3')\n resp.record(method='GET', max_length=5, action='/VoiceTwo', timeout=15)\n # resp.play('http://roelofvandijk.com/mp33/IVR/Thank-You-IVR.mp3')\n return HttpResponse(str(resp))",
"def record_audio():\n voiceObj = voice_rec()\n text = voiceObj.start() \n return text",
"def recording_data(self):\n return self._get('recording/data')",
"def get_arxiv_record(report_nr):\n arxiv_base_url = \"http://export.arxiv.org/oai2\"\n params = {\n \"verb\": \"GetRecord\",\n \"identifier\": \"oai:arXiv.org:{}\".format(report_nr),\n \"metadataPrefix\": \"arXiv\",\n }\n url = furl(arxiv_base_url).add(params).url\n print(\"Querying the arXiv API, report_nr \" + report_nr)\n import time; time.sleep(5)\n arxiv_response = requests.get(url)\n if not arxiv_response.ok:\n # FIXME: there could be an exception\n return None\n\n return arxiv_response.content",
"def handle_recording():\n \n recording_url = request.values.get(\"RecordingUrl\", None)\n \n resp = twilio.twiml.Response()\n resp.say(\"Thanks for howling... take a listen to what you howled.\")\n resp.play(recording_url)\n resp.say(\"Goodbye.\")\n return str(resp)",
"def voice_two(request):\n call_sid = None\n recording_url = None\n\n if request.method == \"POST\":\n call_sid = request.POST.get('CallSid', '')\n recording_url = request.POST.get('RecordingUrl', None)\n if request.method == \"GET\":\n call_sid = request.GET.get('CallSid', '')\n recording_url = request.GET.get('RecordingUrl', None)\n if recording_url:\n call_detail = CallDetail.objects.get(call_sid=call_sid)\n call_detail.country_name = recording_url\n call_detail.save()\n resp = VoiceResponse()\n resp.play('http://roelofvandijk.com/mp33/IVR/ThanksComments.mp3')\n resp.record(method='GET', max_length=10, action='/VoiceThree', timeout=15)\n return HttpResponse(str(resp))",
"def post(self, *, recording_id):\n\n response = openvidu().stop_recording(recording_id)\n\n if response.status_code == 200:\n data = response.json()\n room = (\n current_app.session.query(Room)\n .filter_by(openvidu_session_id=data[\"sessionId\"])\n .one_or_none()\n )\n Log.add(\"recording_stopped\", data=data, room=room)\n return data\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 406:\n abort(\n NotAcceptable,\n query=\"Recording has starting status. Wait until started status before stopping the recording\",\n )\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def get_record(oid):\n\n username = _authenticate_admin_from_session(request)\n\n if username:\n\n # execute raw MongoDB query and return the record with the specified oid.\n recs = Metadata.objects.get_or_404(pk=oid)\n return jsonify(dict(results=recs))\n\n else:\n return Response('Bad or missing session id.', status=401)",
"def capture_recording(self, data={}):\n pass",
"def isRecording(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVideoRecorder\")\n return self.proxy.isRecording()",
"def post(self, args, *, session_id):\n\n response = openvidu().start_recording(session_id, json=args)\n\n if response.status_code == 200:\n data = response.json()\n room = (\n current_app.session.query(Room)\n .filter_by(openvidu_session_id=data[\"sessionId\"])\n .one_or_none()\n )\n Log.add(\"recording_started\", data=data, room=room)\n return data\n elif response.status_code == 400:\n abort(UnprocessableEntity, json=response.json().get(\"message\"))\n elif response.status_code == 422:\n if not args[\"hasAudio\"] and not args[\"hasVideo\"]:\n err = \"Either `has_audio` or `has_video` must not be false\"\n abort(UnprocessableEntity, json=dict(has_video=err, has_audio=err))\n abort(response)\n elif response.status_code == 404:\n abort(NotFound, query=f\"Session `{session_id}` does not exist\")\n elif response.status_code == 406:\n abort(NotAcceptable, query=\"The session has no connected participants\")\n elif response.status_code == 409:\n abort(\n Conflict,\n query=\"The session is not configured for using MediaMode ROUTED or it is already being recorded\",\n )\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def get(self):\n\n response = openvidu().config()\n if response.status_code != 200:\n abort(response)\n config = response.json()\n return dict(\n version=config[\"VERSION\"],\n domain_or_public_ip=config[\"DOMAIN_OR_PUBLIC_IP\"],\n https_port=config[\"HTTPS_PORT\"],\n public_url=config[\"OPENVIDU_PUBLICURL\"],\n cdr=config[\"OPENVIDU_CDR\"],\n streams=dict(\n videoMinSendBandwidth=config[\n \"OPENVIDU_STREAMS_VIDEO_MIN_SEND_BANDWIDTH\"\n ],\n videoMaxSendBandwidth=config[\n \"OPENVIDU_STREAMS_VIDEO_MAX_SEND_BANDWIDTH\"\n ],\n videoMinRecvBandwidth=config[\n \"OPENVIDU_STREAMS_VIDEO_MIN_RECV_BANDWIDTH\"\n ],\n videoMaxRecvBandwidth=config[\n \"OPENVIDU_STREAMS_VIDEO_MAX_RECV_BANDWIDTH\"\n ],\n ),\n sessions=dict(\n garbage_interval=config[\"OPENVIDU_SESSIONS_GARBAGE_INTERVAL\"],\n garbage_threshold=config[\"OPENVIDU_SESSIONS_GARBAGE_THRESHOLD\"],\n ),\n recording=dict(\n version=config[\"OPENVIDU_RECORDING_VERSION\"],\n path=config[\"OPENVIDU_RECORDING_PATH\"],\n public_access=config[\"OPENVIDU_RECORDING_PUBLIC_ACCESS\"],\n notification=config[\"OPENVIDU_RECORDING_NOTIFICATION\"],\n custom_layout=config[\"OPENVIDU_RECORDING_CUSTOM_LAYOUT\"],\n autostop_timeout=config[\"OPENVIDU_RECORDING_AUTOSTOP_TIMEOUT\"],\n )\n if config[\"OPENVIDU_RECORDING\"]\n else None,\n webhook=dict(\n endpoint=config[\"OPENVIDU_WEBHOOK_ENDPOINT\"],\n headers=config[\"OPENVIDU_WEBHOOK_HEADERS\"],\n events=config[\"OPENVIDU_WEBHOOK_EVENTS\"],\n )\n if config[\"OPENVIDU_WEBHOOK\"]\n else None,\n )",
"def get_record(self, id: uplink.Path):\n pass",
"def recording(self) -> bool:\n\t\treturn self._raw_result['data']['recording']",
"def recording_status(self):\n return self._get('recording/status')",
"def view_recorder(recorder_name):\n enc = sys.getfilesystemencoding()\n # get recorder by name\n recorder = get_recorder_by_name(recorder_name)\n recorder_dict = recorder_view_data(recorder, width=800, height=600)\n content = {\n \"charset\" : enc,\n \"title\" : f\"Camera: {recorder_name}\",\n \"recorder_name\": recorder_name,\n }\n return render_template('recorder.html', content=content, recorder=recorder_dict)",
"def handle_recording():\n \n count = increment_count()\n\n recording_url = request.form.get(\"RecordingUrl\", None)\n call_sid = request.form.get('CallSid', None)\n\n print \"handle-recording. url: \" + str( recording_url )\n\n from_number = phones[call_sid]\n print \"from_number: \" + str( from_number )\n \n if from_number:\n sample_id = from_number\n else:\n sample_id = call_sid\n\n filename = sample_id + \".mp3\"\n\n rec_file = \"static/\" + filename;\n print \"rec file: \" + str( rec_file )\n\n if recording_url:\n urllib.urlretrieve( recording_url, rec_file )\n samples[call_sid] = url_for('static', filename=filename)\n\n resp = twilio.twiml.Response()\n resp.say(\"Thanks for shouting.\")\n # resp.play(recording_url)\n\n push_to_pusher(\"twilio\", str(from_number), str(sample_id), str(samples[call_sid]) )\n\n resp.say(\"Check the app for your shout.\")\n\n resp.say(\"Goodbye...\")\n\n return str(resp)",
"def get(id, username=None, password=None):\n elink = requests.get(url + '2416api?osti_id=' + str(id),\n auth=(username, password))\n\n if elink.status_code == 200:\n xml = ET.fromstring(elink.content)\n\n return etree_to_dict(xml)['records']\n elif elink.status_code == 403:\n raise ForbiddenException('User does not have access to this record.')\n elif elink.status_code == 404:\n raise NotFoundException('Record is not on file.')\n else:\n raise ServerException('Unknown HTTP status code: ' + str(elink.status_code))",
"def play(self, request):\n return self.get_recorder().playback(request)",
"def sound_recording(request, id):\n recording = get_object_or_404(SoundRecording, id=id)\n context = {\n 'recording': recording,\n }\n return render(request, 'sound_recording.html', context)",
"def recording(bool): #py:recording\n RUR._recording_(bool)",
"async def get_recording_mode(secspy, camera_id):\n _LOGGER.info(\"GET RECORDING MODE:\")\n\n result = await secspy.get_recording_mode(camera_id)\n for row in result:\n _LOGGER.info(f\"C: {row.mode_always} - M: {row.mode_motion} - A: {row.mode_action} - R: {row.is_recording} \")"
] | [
"0.7211836",
"0.72032636",
"0.6159613",
"0.6069545",
"0.5994706",
"0.5908121",
"0.58630157",
"0.5737009",
"0.57243073",
"0.56887484",
"0.5688493",
"0.5659468",
"0.56548274",
"0.55429417",
"0.54919446",
"0.5488744",
"0.54677695",
"0.5453038",
"0.5440263",
"0.54098064",
"0.53806597",
"0.5318399",
"0.5260827",
"0.52481335",
"0.5236049",
"0.51673096",
"0.51425964",
"0.5135891",
"0.5112209",
"0.5107899"
] | 0.7719126 | 0 |
Delete a Recording This will delete all of the recording files from disk Only available if OpenVidu is enabled. | def delete(self, *, recording_id):
response = openvidu().delete_recording(recording_id)
if response.status_code == 204:
return
elif response.status_code == 404:
abort(NotFound, query=f"Recording `{recording_id}` does not exist")
elif response.status_code == 409:
abort(
Conflict,
query="The recording has started status. Stop it before deletion",
)
elif response.status_code == 501:
abort(NotImplemented, query="OpenVidu Server recording module is disabled")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_record_file(self, record_file, logStat):\n result = self.storage_delete_file(record_file.group, record_file.storage)\n if result:\n logStat(deleted=True, file_obj=record_file)\n record_file.delete()\n return result",
"def __del__(self): \n self.stop_record_microphone()\n \n if not self.save_audio_file:\n self.__clear_audio_files()",
"def delete_exam_recording(exam_recording_id):\n try:\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n if examiner:\n exam_recording = ExamRecording.query.get(exam_recording_id)\n if exam_recording:\n db.session.delete(exam_recording)\n db.session.commit()\n return jsonify(exam_recording.to_dict()), 200\n return jsonify({'message':'Exam recording with id {} could not be found'.format(exam_recording_id)}), 404\n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except exc.SQLAlchemyError as e:\n db.session.rollback()\n return jsonify({ 'message': e.args }), 500\n except Exception as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500",
"def delete_record_files(self, record, logStat):\n from corrdb.common.models import FileModel\n final_result = True\n for _file_id in record.resources:\n _file = FileModel.objects.with_id(_file_id)\n result = self.delete_record_file(_file, logStat)\n if not result:\n final_result = result\n return final_result",
"def delete_records(self, records_to_delete):\n for record in records_to_delete:\n self.records.remove(record)\n self._store_writer.remove_img_file(record)\n\n self._process_change()",
"def delete(self, filename):\n pass",
"def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()",
"def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()",
"def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)",
"def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')",
"def on_recording_remove(self, action, value=None):\n if show_dialog(DialogType.QUESTION, self._app._main_window) != Gtk.ResponseType.OK:\n return\n\n rows = self._recordings_list_box.get_selected_rows()\n if rows:\n settings = self._app._settings\n\n with UtfFTP(host=settings.host, user=settings.user, passwd=settings.password) as ftp:\n ftp.encoding = \"utf-8\"\n for r in rows:\n resp = ftp.delete_file(r.file)\n if resp.startswith(\"2\"):\n GLib.idle_add(self._recordings_list_box.remove, r)\n else:\n show_dialog(DialogType.ERROR, transient=self._app._main_window, text=resp)\n break",
"def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)",
"def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass",
"def stop_recording(self):\n\n\t\tself.eyetribe.stop_recording()\n\t\tself.recording = False",
"def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)",
"def stop_recording(self):\n\n if self.recorder_thread is not None:\n self.recorder_thread.stop()\n self.recorder_thread = None",
"def delete(self, filename):\n raise NotImplementedError",
"def delete_record(records):\n delete_record()",
"def stop_recording(self):\n self.timer.stop()\n self.camera.release()",
"def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass",
"def delete(self, remote):\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"DELETE\",\n file_path = remote)",
"def stopRecording(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVideoRecorder\")\n return self.proxy.stopRecording()",
"def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)",
"def delete(self, store, uuid):\n\n session = get_session()\n session.begin()\n\n stored_file = self._retrieve(store.object_type, uuid)\n\n try:\n session.delete(stored_file)\n session.commit()\n finally:\n session.close()",
"def delete_file(sender, instance, **kwargs):\n if bool(instance.exam_file): # check if exam file exists\n try:\n instance.exam_file.delete()\n except OSError:\n pass\n # if exam file has already been deleted, then do nothing and continue\n # with deleting the exam model",
"def stop_recording(self, *args, **kwargs):\n return self.recorder.stop_recording(*args, **kwargs)",
"def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")",
"async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):",
"def delete(self, *args, **kwargs):\n self.file.storage.delete(self.file.name)\n super().delete(*args, **kwargs)",
"def delete_db(self):\n import os.path\n os.remove(self.filepath)"
] | [
"0.65803087",
"0.65568966",
"0.6367536",
"0.6111408",
"0.60413176",
"0.60305023",
"0.6022",
"0.60196805",
"0.60104823",
"0.5969963",
"0.5963714",
"0.59381646",
"0.59376913",
"0.59030396",
"0.5889724",
"0.5853442",
"0.58431613",
"0.5831818",
"0.58182937",
"0.58172864",
"0.57728624",
"0.5772404",
"0.5755739",
"0.5751938",
"0.5751257",
"0.571596",
"0.5712009",
"0.5706719",
"0.567874",
"0.5668161"
] | 0.78824013 | 0 |
Download a Recording from OpenVidu Server Only available if OpenVidu is enabled. | def get(self, *, recording_id):
response = openvidu().get_recording(recording_id)
if response.status_code == 200:
recording = response.json()
elif response.status_code == 404:
abort(NotFound, query=f"Recording `{recording_id}` does not exist")
elif response.status_code == 501:
abort(NotImplemented, query="OpenVidu Server recording module is disabled")
else:
abort(response)
if recording["url"] is None:
abort(
Conflict,
query="The recording has not finished",
)
request = openvidu().request.get(recording["url"], stream=True)
# Response does not accept `headers=request.headers` so we create them ourself
headers = {}
for header in request.headers:
headers[header] = request.headers[header]
return Response(
stream_with_context(request.iter_content(chunk_size=2048)),
headers=headers,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_ostrich_video(download_to_path):\n urlretrieve(REMOTE_OSTRICH_VID_PATH, download_to_path)",
"def download_skateline_video(download_to_path=None):\n urlretrieve(REMOTE_SKATELINE_VID_PATH, download_to_path)",
"def get(self, *, recording_id):\n\n response = openvidu().get_recording(recording_id)\n\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 404:\n abort(NotFound, query=f\"Recording `{recording_id}` does not exist\")\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)",
"def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))",
"def download(self, release: DoabRelease, **kwargs):\n # Download release\n release.download()",
"def download(track_id, ext):\n\n if ext != 'mp3':\n return Response('', status=404)\n\n track = models.Track.query.get(track_id)\n if track is None:\n abort(404)\n\n track_file = open(track.get_path(), 'r')\n filename_header = (\n 'Content-Disposition', 'attachment; filename=\"%s.mp3\"' % track.title\n )\n\n return Response(response=track_file.read(), mimetype='audio/mpeg',\n headers=[filename_header])",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download():\n return response.download(request,db)",
"def download(s, snap):\n\n id = snap['id']\n name = snap['sender']\n ts = str(snap['sent']).replace(':', '-')\n\n result = s.get_media(id)\n\n if not result:\n return False\n\n ext = s.is_media(result)\n filename = '{}+{}+{}.{}'.format(ts, name, id, ext)\n path = PATH + filename\n with open(path, 'wb') as fout:\n fout.write(result)\n return True",
"def download():\n raise NotImplementedError",
"def download():\n\treturn response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)",
"def download():\n return response.download(request, db)"
] | [
"0.660001",
"0.63986474",
"0.6218075",
"0.59306",
"0.5901923",
"0.5869856",
"0.5765283",
"0.5765283",
"0.5765283",
"0.5765283",
"0.5765283",
"0.5765283",
"0.5758184",
"0.5756224",
"0.57226616",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057",
"0.57060057"
] | 0.6501813 | 1 |
Start the recording of a session Only available if OpenVidu is enabled. | def post(self, args, *, session_id):
response = openvidu().start_recording(session_id, json=args)
if response.status_code == 200:
data = response.json()
room = (
current_app.session.query(Room)
.filter_by(openvidu_session_id=data["sessionId"])
.one_or_none()
)
Log.add("recording_started", data=data, room=room)
return data
elif response.status_code == 400:
abort(UnprocessableEntity, json=response.json().get("message"))
elif response.status_code == 422:
if not args["hasAudio"] and not args["hasVideo"]:
err = "Either `has_audio` or `has_video` must not be false"
abort(UnprocessableEntity, json=dict(has_video=err, has_audio=err))
abort(response)
elif response.status_code == 404:
abort(NotFound, query=f"Session `{session_id}` does not exist")
elif response.status_code == 406:
abort(NotAcceptable, query="The session has no connected participants")
elif response.status_code == 409:
abort(
Conflict,
query="The session is not configured for using MediaMode ROUTED or it is already being recorded",
)
elif response.status_code == 501:
abort(NotImplemented, query="OpenVidu Server recording module is disabled")
abort(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_recording(self):\n\n\t\tself.eyetribe.start_recording()\n\t\tself.recording = True",
"def recording_start(self):\n self._post('recording/start')",
"def start_recording(self, *args, **kwargs):\n return self.recorder.start_recording(*args, **kwargs)",
"def start_recording(self):\n\n self.stop_recording()\n self.recorder_thread = RecorderThread()\n self.recorder_thread.start()",
"def start(self):\n self.recording = True",
"def start_recording(self):\n self.start_recording_and_saving_data(self.eeg_file_path)",
"def start_recording(self):\n self.flag_event.set()\n self.statusBar().showMessage('Starting the Recording')\n startThread = threading.Thread(name='record', target=self.record)\n startThread.start()\n self.statusBar().showMessage('Recording')",
"def start_recording(VD, CD):\n \n if VD['inference_process'] == None:\n VD['terminal_output'] += \"Cannot record and infer, no inference process is active.\\n\"\n else:\n create_folders(CD)\n VD['record_and_infer'] = True # Indicates that recording and inference is active\n VD['terminal_output'] += \"\\nRecording speech and inferring text ...\\n\"",
"def start_recording(self) -> None:\n # Clear the internal ring buffer.\n self._buffer.fill(0)\n\n # Start recording using sounddevice's InputStream.\n self._stream.start()",
"def start_record_microphone(self):\n if not os.path.exists(self.audio_file_folder):\n os.makedirs(self.audio_file_folder)\n\n self.microphone_handler.start_recording()\n self.current_session.put(self.microphone_handler.current_session)",
"def startRecording2(self, folderPath, fileName, overwrite):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVideoRecorder\")\n return self.proxy.startRecording(folderPath, fileName, overwrite)",
"def startRecording(self, folderPath, fileName):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVideoRecorder\")\n return self.proxy.startRecording(folderPath, fileName)",
"async def start_session(self):\n\t\t...",
"def testStartRecordAlreadyRecording(self):\n self.mgr.isRecording = True\n self.mgr.captureMode = CAPTURE_MODE_VIDEO\n self.mgr.handleRecordCommand( CAPTURE_MODE_VIDEO, RECORD_COMMAND_START )\n self.assertTrue(self.mgr.isRecording)\n assert not self.mgr.sendGoProCommand.called\n assert not self.mgr.sendState.called",
"def start(self):\n\t\tself.create_app_folders()\n\n\t\tself._logger.info('Starting the recording.')\n\n\t\tself._video_thread = Thread(target=self._video_manager.run)\n\t\tself._audio_thread = Thread(target=self._audio_manager.run)\n\t\tself._input_thread = Thread(target=self._run)\n\t\tself._video_thread.start()\n\t\tself._audio_thread.start()\n\t\tself.is_running = True\n\t\tself._input_thread.start()",
"def startSession(self):\n if(self.verb >= DLS_VERB_HIGH):\n print \"--Starting session with %s (no action)\" % (self.server)",
"def _device_start_capture(self):\n\n # TODO: we may want to provide an option to flush the SDRam buffer here before capture stops?\n self._start_capture_to_ram()\n self._start_streaming_ram_to_host()",
"def start_recording(self, capture_duration):\n self.camera = cv2.VideoCapture(0)\n\n # Create timer to enforce how long the camera records for\n self.start_time = time.time()\n self.capture_duration = capture_duration\n\n self.timer.start(0, self)",
"def __macroStartRecording(self):\n self.activeWindow().macroRecordingStart()",
"def start_single_record(self):\r\n self.autoRecordWidget.set_recording(True)\r\n self.autoRecordWidget.set_display_message()\r\n self.autoRecordWidget.start_timer(self.timeUntilEnd)\r\n if self.controller.record_talk_id(self.singleID):\r\n log.debug(\"Auto-recording for the current talk started.\")\r\n self.recorded = True\r\n self.beforeEndTimer.setInterval((self.timeUntilEnd + 1) * 1000)\r\n self.beforeEndTimer.setSingleShot(True)\r\n self.beforeEndTimer.start()",
"def StartRecording( self ):\r\n\r\n self._socket.write( 'B' ) \r\n \r\n return self.GetServerResponse()",
"def start_record(cr):\r\n \"\"\"Emulate the keyboard \"\"\"\r\n _player = input_playback.InputPlayback()\r\n _player.emulate(input_type='keyboard')\r\n _player.find_connected_inputs()\r\n \"\"\"To get list of UI elements\"\"\"\r\n ui = ui_utils.UI_Handler()\r\n ui.start_ui_root(cr)\r\n list=ui.get_name_role_list()\r\n \"\"\"To Open status tray and click on Screen Recording option\"\"\"\r\n logging.info(\"Opening status tray\")\r\n ui.doDefault_on_obj(STATUS_TRAY_REGEXP, True, role='button')\r\n time.sleep(WAIT)\r\n ui.doDefault_on_obj('/Close/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen capture/i', True, role='button')\r\n ui.doDefault_on_obj('/Screen record/i', True,role='toggleButton')\r\n ui.doDefault_on_obj('/Record full screen/i', True,role='toggleButton')\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_enter')\r\n \"\"\"To open Chrome Page\"\"\"\r\n _player.blocking_playback_of_default_file(input_type='keyboard', filename='keyboard_ctrl+t')\r\n time.sleep(WAIT)\r\n logging.info(\"Recording Started\")\r\n return ui",
"def __start_session(self):\n sdk_version = ConfigHelper.get_sdk_version()\n\n logging.info(f\"SDK version: {sdk_version}\")\n\n self._request_session_from_agent()\n\n AgentClient.__agent_version = self._agent_response.agent_version\n\n self._agent_session = AgentSession(\n self._agent_response.server_address,\n self._agent_response.session_id,\n self._agent_response.dialect,\n self._agent_response.capabilities,\n )\n\n SocketManager.instance().open_socket(\n urlparse(self._remote_address).hostname,\n self._agent_response.dev_socket_port,\n )\n\n logging.info(\"Development session started...\")",
"def start_loop_recording(self, track):\n pass",
"async def recording_mode(secspy, camera_id, recording_mode):\n _LOGGER.info(\"SET RECORDING MODE:\")\n\n result = await secspy.set_recording_mode(camera_id, recording_mode)\n _LOGGER.info(result)",
"def StartRecordEnv(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def recording(bool): #py:recording\n RUR._recording_(bool)",
"def start_offline_recording(self, live=True):\n threading.Thread(target=self._record).start()",
"def trigger_recording_started(_):\n log_threadsafe(obs.LOG_DEBUG, 'Recording started')\n \n global state\n with mutex_state_sending:\n state = int(time.time())\n pipe_send_state()",
"def testStartRecord(self):\n self.mgr.handleRecordCommand( CAPTURE_MODE_VIDEO, RECORD_COMMAND_START )\n self.mgr.sendGoProCommand.assert_called_with(mavutil.mavlink.GOPRO_COMMAND_SHUTTER, (1, 0, 0, 0))"
] | [
"0.69981426",
"0.69059336",
"0.6835243",
"0.67142385",
"0.6643458",
"0.6552146",
"0.6403572",
"0.6286545",
"0.62625855",
"0.62147516",
"0.61870366",
"0.6178485",
"0.6161698",
"0.6149195",
"0.61226386",
"0.6110635",
"0.600413",
"0.5947124",
"0.5946636",
"0.59133965",
"0.59057724",
"0.5881868",
"0.5834944",
"0.5818174",
"0.5807457",
"0.57821065",
"0.57600766",
"0.5755672",
"0.56979394",
"0.5652275"
] | 0.69072986 | 1 |
Start asynchronous input processing | def start(self):
self.has_event = False
self.running = True
self._condition.acquire()
self._thread = threading.Thread(target=read_input, args=(self,))
self._thread.start() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def async_process_input(self, inp: inputs.Input) -> None:\n raise NotImplementedError()",
"def process_input(self, inp: inputs.Input) -> None:\n self.task_registry.create_task(self.async_process_input(inp))",
"def run(self) -> None:\n\n while True:\n try:\n input_element = self.input_queue.get_nowait()\n self.process(input_element)\n except Empty:\n return",
"async def __bufferedReader():\n while True:\n # Get char and then append to prevent a race condition caused by the async await\n charIn = await __terminalState.osSupport.getInputChar()\n\n wasHandled = False\n for key, handlers in __terminalState.inputHandlers.items():\n if key is None or charIn in key:\n for handler in handlers:\n asyncio.get_event_loop().call_soon(handler, charIn)\n wasHandled = True\n\n if not wasHandled:\n __terminalState.inputBuffer += charIn",
"async def async_start(self, event: Event) -> None:\n await self._async_scan_serial()",
"async def _async_scan(self) -> None:\n for callback in self._request_callbacks:\n callback()\n await self._async_scan_serial()",
"def wait() -> None:\n\n process_input(input())",
"def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()",
"def run(self):\n while True:\n line = self.stream.readline()\n if not len(line):\n # EOF, stop!\n break\n else:\n # Put the text on the queue, along with the time it was read.\n self.callback_queue.put(line)",
"def _input_callback(self, _, blocks: numpy.array) -> None:\n try:\n self._output_queue.put_nowait(blocks)\n except queue.Full:\n self._output_queue.get_nowait()\n self._output_queue.put_nowait(blocks)",
"def start(self):\n\n\t\twhile True:\n\t\t\tinputReady, outputReady, exceptReady = select.select(\n\t\t\t\t[self.s],\n\t\t\t\t[],\n\t\t\t\t[],\n\t\t\t\t3\n\t\t\t)\n\n\t\t\t# Ready for receiving\n\t\t\tif len(inputReady) > 0 and inputReady[0] == self.s:\n\t\t\t\t# Read lines until input buffer is empty\n\t\t\t\tfor line in self.receiveLines():\n\t\t\t\t\tif len(line) > 0:\n\t\t\t\t\t\tprint(line)\n\n\t\t\t\t\tself.handle(line)\n\n\t\t\t# Only send if there is something to send\n\t\t\tif not self.outQueue.empty():\n\t\t\t\tm = self.outQueue.get_nowait()\n\n\t\t\t\tprint(\"Sending '{}'\".format(m.rstrip(\"\\r\\n\")))\n\t\t\t\tself.s.send(bytes(m, \"utf-8\"))\n\t\t\t\tself.outQueue.task_done()",
"def start_processing(self):",
"def stdin_thread(self):\n while True:\n if not self.is_running():\n time.sleep(0.1)\n continue\n msg = self._stdin_queue.get()\n if msg is None:\n break # Ask to stop\n self._say(msg)",
"async def start(self):\n\n while True:\n try:\n data = await self.reader.read(8192)\n\n if self._trace_enabled:\n self._logger.trace(\n \"Received %d bytes from remote server:\\n%s\",\n len(data),\n msg.dump(data),\n )\n await self.process(data)\n except asyncio.CancelledError:\n return\n except:\n logging.exception(\"Unhandled error in Message Reader\")\n raise",
"def wait_for_input(self):\n pass",
"def run(self):\n def target():\n # Pass these inputs to STDIN with delays\n for i in self.delayed_inputs:\n if type(i) is int or type(i) is float:\n time.sleep(i)\n elif type(i) is bytes:\n try:\n self.process.stdin.write(i) \n except IOError as e:\n lg.info(\n \"Input: {} failed to write to stdin due to\\n{}\".format(i, e)\n )\n break\n if self.disable_communicate:\n self.process.wait()\n else:\n self.stdout_res, self.stderr_res = self.process.communicate(\n input=self.inputs)\n\n try:\n self.process = Popen(self.command, stdin=self.stdin,\n stdout=self.stdout, stderr=self.stderr,\n start_new_session=True, cwd=self.cwd, env=self.env)\n except OSError:\n lg.error(\"Couldn't Popen command {}\".format(self.command))\n raise\n self.thread = Thread(target=target)\n self.thread.start()",
"def run(self):\n self.read_from_serial()",
"def run(self):\n while True:\n job_id, job_service, = self.input_queue.get()\n if job_id is None:\n break\n # Process the job\n with self.app.app_context():\n self.process(job_id, job_service)\n self.input_queue.task_done()\n time.sleep(1)\n # Done\n self.input_queue.task_done()\n return",
"def startLoop():\n patchAsyncio()",
"def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')",
"async def setup(self) -> None:\n if self.args.sync:\n self._processing = threading.Thread(target=self.start_events_sync)\n self._processing.daemon = True\n self._processing.start()\n else:\n self._processing = asyncio.ensure_future(self.start_events_async())",
"def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)",
"def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)",
"async def start(self) -> None:",
"def _read_thread(proc, ready_event):\n ready = False\n while True:\n line = proc.stdout.readline()\n if not line:\n break\n\n if output_lines is not None:\n output_lines.append(line)\n\n if not ready and indicator in line:\n ready = True\n ready_event.set()",
"def listen(self):\n while self.active:\n self.handle_input()",
"async def start(self):",
"async def start(self):",
"def Start(self):\n for unused_i in range(0, self.args.message_count):\n self.CallClient(\n standard.ReadBuffer, offset=0, length=100, next_state=\"Process\")",
"def process_input(self):\n print('--- IxaMedTagger: processing input ---')\n start_time = time.time()\n os.system('java -jar {0} {1}'.format(self.__ixamedtagger, self.__input_formatted_filepath))\n end_time = time.time() - start_time\n print('--- {} seconds ---'.format(end_time))"
] | [
"0.71012735",
"0.6623622",
"0.652197",
"0.6454346",
"0.64008456",
"0.6354719",
"0.634934",
"0.63402873",
"0.6291486",
"0.6217243",
"0.62050235",
"0.62028325",
"0.6196018",
"0.6141272",
"0.6123779",
"0.6099275",
"0.6010271",
"0.59452224",
"0.59437335",
"0.5911982",
"0.5861372",
"0.5855286",
"0.5855286",
"0.58346426",
"0.5834131",
"0.5828085",
"0.5813871",
"0.5813871",
"0.5810775",
"0.5801137"
] | 0.6959562 | 1 |
Execute callback action for an incoming event | def callback(self, event):
button = event["button"]
cmd = self._callbacks.get(self._uuidstr(self.global_id, button), None)
cmd = self._callbacks.get(self._uuidstr(event["name"], button), cmd)
cmd = self._callbacks.get(self._uuidstr(event["instance"], button), cmd)
if cmd is None:
return
if callable(cmd):
cmd(event)
else:
bumblebee.util.execute(cmd, False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def event_in_cb(self, msg):\n self.event = msg.data",
"def onEvent(self, eventName, callBack):\n self.msg_namespace.on('ops', callBack)",
"def perform_callback(self, *args, **kwargs):\n pass",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def onAction(*args):",
"def process_event(self, event):\r\n pass",
"def runEventCallbacks(self, event, *args):\n\n if not event in self.EVENT_TYPES:\n raise Exception(\"XnatIo (onEvent): invalid event type '%s'\"%(\\\n event))\n if not hasattr(self, 'eventCallbacks__'):\n print('self has no attribute eventCallbacks__')\n return\n\n for callback in self.eventCallbacks__[event]:\n #print(f\"EVENT CALLBACK {event}\")\n callback(*args)",
"def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover",
"def _notify_callback_event(self, event, data):\n # Notify callbacks\n for cback in self.callbacks:\n cback.invoke(self, event, data)",
"def dispatchEvent(self, event):\n # See if any callbacks have been registered for this event type:\n if event.event_type in self.callbacks:\n # Yes, at least one has been registered. Call them in order:\n for callback in self.callbacks[event.event_type]:\n # Call the function with the event as an argument:\n callback(event)",
"def event_queue_proc(self,event):\r\n event()",
"def _emited(self, *args):\n\t\tdebug(\"OnEventDeferred : event catched\")\n\t\tself.callback(*args)\n\t\tself._clean()",
"def callback(self, *args):\n raise NotImplementedError()",
"def __call__(self, args, kwargs):\n callback = self._callback_ref()\n if callback is not None:\n callback(*args, **kwargs)",
"def on_action(self, connection, event):\r\n print('[{}] OnAction from {}' .format(event.type.upper(), event.source))",
"def callevent_handler(data):\n return CallEventHandler(data)",
"def event_receive(self,event):\n\n pass",
"def handler(event, context):\n if event['Records'][0]['Sns']['Message'] is None:\n _print_info('Unrecognized event, function will not be executed. Enable debug to log the actual event.')\n _print_debug('event: {}'.format(event))\n return\n\n message = event['Records'][0]['Sns']['Message']\n _print_debug('message received: {}'.format(message))\n\n event = json.loads(message)\n _print_info('event: {}'.format(json.dumps(event)))\n\n if event[ACTION] in ALLOWED_ACTIONS:\n\n _print_info('Requested action: {}'.format(event[ACTION]))\n\n _print_info('Initializing.')\n _init_vars_()\n\n # create a hive cursor which can be passed around and then closed when done.\n cursor = _create_hive_cursor()\n\n if event[ACTION] == FULL_SYNC:\n _sync_all(cursor)\n if event[ACTION] == DELTA_SYNC:\n if event[USER] and event[NAMESPACE]:\n _sync_delta(cursor, event[USER], event[NAMESPACE])\n else:\n _print_error(\n 'Invalid request. Expecting both: a valid \\'{}\\' and a valid \\'{}\\''.format(\n USER, NAMESPACE))\n\n # close the hive cursor when done\n _close_hive_cursor(cursor)\n else:\n _print_error(\n 'Unknown action. Expecting one of: \\'{}\\', \\'{}\\''.format(FULL_SYNC,\n DELTA_SYNC))",
"def fire_event(self, event = None):\n for e in self.events:\n if e[\"event\"] == event:\n if type(e[\"args\"]) == type([]):\n e[\"callback\"](*e[\"args\"])\n elif type(e[\"args\"]) == type({}):\n e[\"callback\"](**e[\"args\"])\n elif e[\"args\"] == None:\n e[\"callback\"]()\n else:\n e[\"callback\"](e[\"args\"])\n return True",
"def eventInCallback(self, msg):\n rospy.loginfo(\"event_in msg received\")\n self.event_in = msg.data",
"def dispatch(self, sender, event, *args, **kwargs):\n pass # pragma: no cover",
"def callback(self):\n pass # pragma: no cover",
"def lambda_handler(event, context):\n return dispatch(event)",
"def process(self, event):\n pass",
"def trigger(self, callback_type, *args):\n if self.callbacks.has_key(callback_type):\n for cb in self.callbacks[callback_type]:\n cb(*args)",
"def _callback(self, channel, callback):\n # warte eine zufaellige Zeit\n sleep(randint(5, 10))\n # falls das Programm noch laueft,\n # der channel noch nicht gecleart wurde\n # und der Channel noch nicht entfernt wurde\n # (zum Beispiel durch einen Wechsel des Channels zu einem Ausgang)\n # rufe die Callbackfunktion auf\n if self.mode is not None and channel in self.channels and channel in self.events:\n callback(channel)",
"def handle_event(self, event_type, event):\n\n leg = self.create_update_leg(event)\n\n waiting = leg._waiting.get(event_type)\n if waiting:\n while True:\n try:\n f = waiting.pop()\n f.set_result(event)\n except IndexError:\n break\n else:\n f = getattr(self, f\"on_{event_type.replace('.', '_')}\",\n getattr(self, 'on_unknown', None))\n if f:\n asyncio.ensure_future(f(event, leg))\n else:\n raise Exception(f'no callback: {event}')",
"def kbaction_callback(self, kb_event):\n evtype = kb_event.event_type\n keyname = kb_event.name\n self.lastmesg = db2_movement_convert(evtype=evtype, kname=keyname)\n if self.lastmesg is None:\n return\n self.sock.send(self.lastmesg.encode())"
] | [
"0.72822505",
"0.70127434",
"0.700589",
"0.6768786",
"0.6768786",
"0.6768786",
"0.6768786",
"0.6697053",
"0.6596345",
"0.65904605",
"0.6590431",
"0.6574243",
"0.6534539",
"0.65215206",
"0.64999133",
"0.6488703",
"0.6477715",
"0.6464448",
"0.646269",
"0.6445889",
"0.64355564",
"0.6409472",
"0.6409058",
"0.6395859",
"0.6374339",
"0.6368126",
"0.6350005",
"0.63492763",
"0.63466305",
"0.6311342"
] | 0.72343135 | 1 |
Return the sum of the square of the digits of a number, e.g. 32 > 13. | def square_digit_sum(number):
return sum(precomputed_digit_squares[digit] for digit in str(number)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sum_squares(num):\n sum = 0\n while (num != 0):\n sum += math.pow((num % 10), 2)\n num = num/10\n return int(sum)",
"def sum_of_digits_in_number(n: int) -> int:\n return sum(int(digit) for digit in str(n))",
"def sum_digits(n):\n num = n\n incTen = 1\n summy = 0\n if num > 10:\n while incTen * 10 < num:\n incTen = incTen * 10\n while incTen >= 10:\n summy += num // incTen\n num = num % incTen\n incTen = incTen // 10\n summy += num\n return summy\n elif num == 10:\n return 1\n else:\n return num",
"def square_digits(num):\n nums = ''\n for n in str(num):\n nums += str(int(n)* int(n))\n return int(nums)",
"def get_sum_of_digits(number):\n return sum(int(digit) for digit in str(number))",
"def sum_of_digits(n):\n return sum(int(c) for c in str(n))",
"def sum_digits(n):\n digits = [int(i) for i in str(n)]\n return sum(digits)",
"def digit_sum(n):\n s = 0\n while n:\n s += n % 10\n n //= 10\n return s",
"def calculate_digits_sum(number: int) -> int:\n\n # Return value\n ret = 0\n\n while number != 0:\n # Extract the last digit number and add it to ret\n ret += number % 10\n\n # Delete the last digit of the number\n number //= 10\n\n return ret",
"def sum_digits(n):\n sum = 0\n while n > 0:\n num = n % 10\n sum += num\n n //= 10\n return sum",
"def sum_of_digits(number):\n # Students: While you are welcome to try to understand this\n # function definition, all you have to do is trust\n # that the green doc-string is correct (it is!).\n if number < 0:\n number = -number\n\n digit_sum = 0\n while True:\n if number == 0:\n break\n digit = number % 10 # Get the digit\n digit_sum = digit_sum + digit # Accumulate it into the sum\n number = number // 10 # Get ready for the next digit\n\n return digit_sum",
"def calculate(x: int) -> int:\n\n digits = list(map(int, list(str(x))))\n return sum(list(map(lambda a: a**2, digits)))",
"def digitSum ( n ) :\n return sum ( map ( int , str ( n ) ) )",
"def digit_sum(x):\n s = 0\n while x>0:\n s = s+(x%10)\n x = x//10\n\n return s",
"def sum_of_digits(n):\n rest_of_num, last_num = split(n)\n if rest_of_num < 10:\n \treturn last_num + rest_of_num\n return last_num + sum_of_digits(rest_of_num)",
"def squareOfSum(num):\n return sum(range(1, num + 1)) ** 2",
"def sum_of_digits(number):\n \n if number < 0:\n number = -number\n \n digit_sum = 0\n while True:\n if number == 0:\n break\n digit = number % 10 # Get the digit\n digit_sum = digit_sum + digit # Accumulate it into the sum\n number = number // 10 # Get ready for the next digit\n \n return digit_sum",
"def digit_sum(n):\n\treturn sum(int(c) for c in str(n))",
"def Sum_Numbers_x_Power_Digits(x):\n totalSum = 0 \n for i in xrange(10, 999999):\n if i == sum([int(j)**x for j in str(i)]):\n totalSum += i\n return totalSum",
"def sum_digits(n):\n if (n < 10):\n return n\n else:\n all_but_last, last = split(n)\n return sum_digits(all_but_last) + last",
"def sumOfSquares(num):\n sum = 0\n for i in range(1, num + 1):\n sum += i ** 2\n return sum",
"def sum_digits(n):\n \"*** YOUR CODE HERE ***\"\n count=0\n length=len(str(n))\n last=0\n sum=0\n while count<length:\n last=n%10\n n//=10\n sum+=last\n count+=1\n return sum",
"def sumDigit():",
"def digital_sum(n):\n r = 0\n while n:\n r, n = r + n % 10, n // 10\n return r",
"def digit_sum(n):\n sum_of_digits = 0\n for c in str(n):\n sum_of_digits += int(c)\n return sum_of_digits",
"def sum_of_digit_powers_in_number(n: int, power: int) -> int:\n return sum(int(digit) ** power for digit in str(n))",
"def square_of_sum(n):\n return ((n * (n+1)) / 2)**2",
"def factorial_digit_sum(n):\n sum = 0\n factStr = str(factorial(n))\n for digit in factStr:\n sum += int(digit)\n return sum",
"def count_digits(num):\n total = 0\n while num is not 0:\n total += num % 10\n num //= 10\n return total",
"def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6"
] | [
"0.8314024",
"0.7841442",
"0.77029717",
"0.7654653",
"0.7648175",
"0.76243734",
"0.7467898",
"0.74651736",
"0.74583864",
"0.74521124",
"0.74133635",
"0.7342469",
"0.73378193",
"0.73087245",
"0.72862816",
"0.7282983",
"0.7277899",
"0.7254383",
"0.7231389",
"0.7196259",
"0.7186361",
"0.7134909",
"0.70007545",
"0.70002997",
"0.6987232",
"0.6982078",
"0.6843299",
"0.673299",
"0.6723736",
"0.6703411"
] | 0.8486469 | 0 |
Scales the input features of each intermediate layer in order of increasing stride. | def forward(self, x):
# The scaled feature maps for each interim layer
scaled_features = OrderedDict()
for (_, module), (layer_name, features) in zip(self.adaptor.items(), x.items()):
patches_per_axis = int(sqrt(features.shape[1]))
# Reshape to 2d and reorder dimensions to traditional convolution dims. (B, C, H, W)
features_2d = features.reshape(features.shape[0], patches_per_axis, patches_per_axis, features.shape[2]) \
.permute(0, 3, 1, 2)
scaled_features[layer_name] = module(features_2d)
return scaled_features | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _scale_features(self, features):\n assert isinstance(features, np.ndarray), \"Input is not a numpy array!\"\n\n return self.scaler.transform(features.reshape(1, -1))",
"def scale(step_size: float) -> GradientTransformation:\n return _scale(step_size=step_size, already_flattened=False)",
"def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor",
"def ScaleParameters(net, indToScale, delta_vector):\n i = 0\n for index, param in enumerate(net.parameters()):\n if index in indToScale:\n l1Norm = torch.sum(torch.abs(param.data), dim =1)\n rows, _ = param.data.size()\n with torch.no_grad():\n for j in range(int(rows)):\n param.data[j,:] *= delta_vector[i]/l1Norm[j]\n i += 1",
"def scale(self, factor):\n for a in self.symbol_attributes:\n a.scale(factor)",
"def forward(self, x):\n x = x.permute(0, 3, 1, 2) # [batch_size*num_slots, features, height, width]\n x = self.dec(x)\n return x",
"def forward(self, x):\n x = x.permute(0, 3, 1, 2) # [batch_size*num_slots, features, height, width]\n x = self.dec(x)\n return x",
"def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);",
"def scale_in(self, count):\n pass",
"def rescale(self):\n # Get the L1 norm of data and scale correction for each fiber\n data_dims = self.data_dims\n if data_dims is ():\n tens_scale = self.data.abs()\n else:\n tens_scale = torch.sum(self.data.abs(), dim=data_dims, keepdim=True)\n log_shift = torch.floor(TARGET_SCALE(self.shape, data_dims) - \n torch.log2(tens_scale))\n\n # Keep the scale for zero fibers unchanged\n if torch.any(torch.isinf(log_shift)):\n log_shift = torch.where(torch.isfinite(log_shift), log_shift,\n torch.zeros_like(log_shift))\n\n return STensor(self.data*(2**log_shift), \n self.scale-log_shift)",
"def _fold_to_scale(conv_wrapper: QcQuantizeWrapper, bn_wrapper: QcQuantizeWrapper):\n # pylint: disable=protected-access, too-many-locals, too-many-branches, bad-whitespace, too-many-statements\n conv = conv_wrapper._module_to_wrap\n bn = bn_wrapper._module_to_wrap\n\n weight_quantizer = conv_wrapper.param_quantizers[\"weight\"]\n\n if not isinstance(weight_quantizer, LearnedGridTensorQuantizer):\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm folding to scale supports LearnedGridTensorQuantizer only; \"\n f\"got {type(weight_quantizer)}.\"\n )\n\n output_quantizer = conv_wrapper.output_quantizers[0]\n\n if output_quantizer.enabled:\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm should belong to the same supergroup with the layer to be folded to.\"\n )\n\n if \"bias\" in conv_wrapper.param_quantizers:\n bias_quantizer = conv_wrapper.param_quantizers[\"bias\"]\n if bias_quantizer.enabled:\n raise _BatchNormFoldingNotSupported(\n \"Can't fold BatchNorm to scale if bias quantizer is enabled.\"\n )\n\n encodings = weight_quantizer.encoding\n\n if encodings is None:\n raise RuntimeError\n\n if isinstance(encodings, libpymo.TfEncoding):\n encodings = [encodings]\n\n if isinstance(conv, _ConvTransposeNd) and conv.groups != 1:\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm folding to scale is not supported for grouped ConvTransposeNd.\"\n )\n\n # Add quantization noise to the BN params (bn weight & bn bias) before folding.\n # NOTE: Quantization of foldable batchnorms is automatically disabled when\n # initializing quantsim. However, it is still safer to call _quantize_params here\n # as we can't guarantee this is always the case.\n # For example, the user can manually enable quantization of batchnorms, etc...\n # (FYI: _quantize_params takes effect only when the parameter quantizers are enabled)\n with bn_wrapper._quantize_params():\n _fold_to_weight(conv, bn, fold_backward=True)\n\n gamma = bn.weight\n sigma = torch.sqrt(bn.running_var + bn.eps)\n\n new_encodings = []\n for old_encoding, c in zip(encodings, gamma/sigma):\n new_encoding = libpymo.TfEncoding()\n new_encoding.delta = old_encoding.delta * abs(c)\n if c >= 0:\n new_encoding.max = old_encoding.max * c\n new_encoding.min = old_encoding.min * c\n else:\n new_encoding.max = old_encoding.min * c\n new_encoding.min = old_encoding.max * c\n new_encoding.offset = old_encoding.offset\n new_encoding.bw = old_encoding.bw\n new_encodings.append(new_encoding)\n\n weight_quantizer.encoding = new_encodings\n\n # Copy batchnorm's output quantizers to conv output quantizers\n for conv_output_quantizer, bn_output_quantizer in\\\n zip(conv_wrapper.output_quantizers, bn_wrapper.output_quantizers):\n conv_output_quantizer.enabled = bn_output_quantizer.enabled\n\n if bn_output_quantizer.encoding is not None:\n encoding = libpymo.TfEncoding()\n encoding.delta = bn_output_quantizer.encoding.delta\n encoding.max = bn_output_quantizer.encoding.max\n encoding.min = bn_output_quantizer.encoding.min\n encoding.offset = bn_output_quantizer.encoding.offset\n encoding.bw = bn_output_quantizer.encoding.bw\n conv_output_quantizer.encoding = encoding\n\n bn_output_quantizer.enabled = False\n\n if \"bias\" not in conv_wrapper.param_quantizers:\n bias_quantizer = LearnedGridTensorQuantizer(weight_quantizer.bitwidth,\n weight_quantizer.round_mode,\n weight_quantizer.quant_scheme,\n weight_quantizer.use_symmetric_encodings,\n enabled_by_default=False,\n data_type=weight_quantizer.data_type)\n bias_quantizer._ch_axis = weight_quantizer._ch_axis\n conv_wrapper.param_quantizers[\"bias\"] = bias_quantizer",
"def scale_dataset(ds):\n for i in range(0,ds.dims):\n fmax = ds.data[0][i]\n for j in range(1,len(ds)):\n curr = ds.data[j][i]\n if curr > fmax:\n fmax = curr \n if fmax > 0:\n for j in range(0,len(ds)):\n ds.data[j][i] /= fmax",
"def scaling():\n \n for i in range(cfg.nfea):\n dm = 0\n var = 0\n for j in range(cfg.ntrain):\n dm += cfg.a[j,i]\n dm = dm/cfg.ntrain\n \n for j in range(cfg.ntrain):\n var += (cfg.a[j,i]-dm)**2\n\n var = var/cfg.ntrain\n var = np.sqrt(var)\n \n if var >= 10**(-5):\n cfg.clin[i] = 1.0/var \n cfg.dlin[i] = -dm/var \n \n else: \n if np.abs(dm)<=1.0:\n cfg.clin[i] = 1.0\n cfg.dlin[i] = 0.0 \n else: \n cfg.clin[i] = 1.0/dm\n cfg.dlin[i] = 0.0 \n \n for j in range(cfg.ntrain):\n cfg.a_scaled[j,i] = cfg.clin[i]*cfg.a[j,i] + cfg.dlin[i]\n \n return",
"def forward(self, xs, ilens, masks):\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs, _ = self.encoders(xs, masks)\n if self.normalize_before:\n xs = self.after_norm(xs)\n hlens = [xs.size(1) for i in range(xs.size(0))]\n return xs, hlens",
"def _transform_inputs(self, inputs):\n\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [\n resize(\n input=x,\n size=inputs[0].shape[2:],\n mode='bilinear',\n align_corners=self.align_corners) for x in inputs\n ]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n\n return inputs",
"def __scale(data, max_value_list, min_value_list, scale_value_list, process_cols_list):\n features = np.array(data.features, dtype=float)\n for i in process_cols_list:\n value = features[i]\n if value > max_value_list[i]:\n value = max_value_list[i]\n elif value < min_value_list[i]:\n value = min_value_list[i]\n\n features[i] = (value - min_value_list[i]) / scale_value_list[i]\n _data = copy.deepcopy(data)\n _data.features = features\n return _data",
"def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...",
"def scale(data, factor):\n\n if np.ndim(data) != 2: # only process one IV dataset at a time\n raise IndexError('Incorrect data format')\n\n if np.size(data, 0) < np.size(data, 1):\n data = data.T # make sure data is in columns\n\n # match data types for float multiplication/division\n new_data = data.copy().astype(float)\n\n new_data[:, 1] *= factor\n\n return new_data",
"def scale(self, factor):\n self.x *= factor\n self.y *= factor\n for a in self.annotations:\n a.scale(factor)",
"def batches(self, batch_size): \n if self.shuffle:\n idx = np.arange(len(dataset.train_x))\n np.random.shuffle(idx)\n self.train_x = self.train_x[idx]\n \n n_batches = len(self.train_x) // batch_size\n for ii in range(0, len(self.train_x), batch_size):\n x = self.train_x[ii:ii+batch_size]\n \n yield self.scaler(x)",
"def scale(x, feature_range=(-1, 1)):\n \n # scale from 0-1 to feature_range\n min, max = feature_range\n #x = x * (max - min) + min\n #x = torch.add(torch.mul(x, (max-min)), min)\n x = x.mul(max-min).add_(min)\n return x",
"def __call__(self, features: List[List[float]]) -> List[List[float]]:\n self.count += 1\n if self.count <= 1:\n self.feature_fix = features\n self.gmin = np.amin(features, axis=0) \n self.gmax = np.amax(features, axis=0) \n self.gdiff = np.subtract(self.gmax,self.gmin)\n \n features_scale = np.full((np.array(features).shape[0],np.array(features).shape[1]), np.nan)\n for i in range(np.array(features).shape[0]):\n for j in range(np.array(features).shape[1]):\n features_scale[i,j] = (np.array(features)[i,j] - self.gmin[j])/self.gdiff[j]\n \n return features_scale",
"def force_rescale(self,rescaleFactor):\n if not self.built:\n raise Exception(\"model should be built before calling this function\")\n for l in self.layerList:\n l.rescale(rescaleFactor)\n self.rescaleFactor.assign(rescaleFactor)",
"def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out",
"def scaling_tf(X, input_range_type):\n\n if input_range_type == 1:\n # The input data range is [0, 1]. \n # Convert to [0, 255] by multiplying 255\n X = X*255\n elif input_range_type == 2:\n # The input data range is [-0.5, 0.5]. Convert to [0,255] by adding 0.5 element-wise.\n X = (X+0.5)*255\n elif input_range_type == 3:\n # The input data range is [-1, 1]. Convert to [0,1] by x/2+0.5.\n X = (X/2+0.5)*255\n\n # Caution: Resulting in zero gradients.\n # X_uint8 = tf.clip_by_value(tf.rint(X), 0, 255)\n red, green, blue = tf.split(X, 3, 3)\n X_bgr = tf.concat([\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n # TODO: swap 0 and 2. should be 2,1,0 according to Keras' original code.\n ], 3)\n\n # x[:, :, :, 0] -= 103.939\n # x[:, :, :, 1] -= 116.779\n # x[:, :, :, 2] -= 123.68\n return X_bgr",
"def scaling_tf(X, input_range_type):\n\n if input_range_type == 1:\n # The input data range is [0, 1]. \n # Convert to [0, 255] by multiplying 255\n X = X*255\n elif input_range_type == 2:\n # The input data range is [-0.5, 0.5]. Convert to [0,255] by adding 0.5 element-wise.\n X = (X+0.5)*255\n elif input_range_type == 3:\n # The input data range is [-1, 1]. Convert to [0,1] by x/2+0.5.\n X = (X/2+0.5)*255\n\n # Caution: Resulting in zero gradients.\n # X_uint8 = tf.clip_by_value(tf.rint(X), 0, 255)\n red, green, blue = tf.split(X, 3, 3)\n X_bgr = tf.concat([\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n # TODO: swap 0 and 2. should be 2,1,0 according to Keras' original code.\n ], 3)\n\n # x[:, :, :, 0] -= 103.939\n # x[:, :, :, 1] -= 116.779\n # x[:, :, :, 2] -= 123.68\n return X_bgr",
"def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x",
"def forward(self, inp):\n outp = []\n x = inp\n\n if self.resize_input:\n x = F.interpolate(x, size=(299, 299), mode=\"bicubic\",)\n\n if self.normalize_input:\n x = denormalize(x)\n x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)\n if torch.cuda.is_available():\n x = x.to(\"cuda\")\n\n for idx, block in enumerate(self.blocks):\n x = block(x)\n if idx in self.output_blocks:\n outp.append(x)\n\n if idx == self.last_needed_block:\n break\n\n return outp",
"def subsample(inputs, factor, scope=None):\n if factor == 1:\n return inputs\n else:\n return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)",
"def _rescale_module(module):\n for sub in module.modules():\n if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)):\n std = sub.weight.std().detach()\n scale = (std / 0.1) ** 0.5\n sub.weight.data /= scale\n if sub.bias is not None:\n sub.bias.data /= scale"
] | [
"0.6069742",
"0.6027195",
"0.59086466",
"0.58238685",
"0.58129877",
"0.58056754",
"0.58056754",
"0.58042914",
"0.57350945",
"0.57300484",
"0.57218236",
"0.57141936",
"0.568602",
"0.5638847",
"0.56379384",
"0.563669",
"0.562825",
"0.5611087",
"0.56021976",
"0.5591613",
"0.55675614",
"0.55359983",
"0.55350524",
"0.55136573",
"0.55001926",
"0.55001926",
"0.54994494",
"0.5488665",
"0.54873323",
"0.5466724"
] | 0.6467156 | 0 |
Extract the module file name and line by inspection. | def filename_line(skip: int = 2) -> Tuple[str, int]:
stack = inspect.stack()
start = skip
parentframe = stack[start][0]
filename = 'N/A'
module = inspect.getmodule(parentframe)
if module:
filename = os.path.basename(os.path.realpath(module.__file__))
return filename, parentframe.f_lineno | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_definition(self, info):\r\n token = info.obj\r\n lines = info.lines\r\n source_code = info.source_code\r\n filename = info.filename\r\n\r\n line_nr = None\r\n if '.' in token:\r\n token = token.split('.')[-1]\r\n\r\n line_nr = get_definition_with_regex(source_code, token,\r\n len(lines))\r\n if line_nr is None:\r\n return\r\n line = info.line\r\n exts = python_like_exts()\r\n if not osp.splitext(filename)[-1] in exts:\r\n return filename, line_nr\r\n if line.startswith('import ') or line.startswith('from '):\r\n alt_path = osp.dirname(filename)\r\n source_file = python_like_mod_finder(line, alt_path=alt_path,\r\n stop_token=token)\r\n if (not source_file or\r\n not osp.splitext(source_file)[-1] in exts):\r\n line_nr = get_definition_with_regex(source_code, token,\r\n line_nr)\r\n return filename, line_nr\r\n mod_name = osp.basename(source_file).split('.')[0]\r\n if mod_name == token or mod_name == '__init__':\r\n return source_file, 1\r\n else:\r\n with open(filename, 'rb') as fid:\r\n code = fid.read()\r\n code = encoding.decode(code)[0]\r\n line_nr = get_definition_with_regex(code, token)\r\n\r\n return filename, line_nr",
"def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )",
"def caller_info(self):\n\n frames = traceback.extract_stack()\n frames.reverse()\n try:\n (_, mod_name) = __name__.rsplit('.', 1)\n except ValueError:\n mod_name = __name__\n for (fpath, lnum, _, _) in frames:\n (fname, _) = os.path.basename(fpath).rsplit('.', 1)\n if fname != mod_name:\n break\n\n return (fname, lnum)",
"def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))",
"def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror",
"def trace():\n import traceback\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, __file__, synerror",
"def __extract_module(log):\n module = \"UNKNOWN\"\n if \"module\" in log:\n module = log[\"module\"]\n elif \"executorName\" in log:\n module = log[\"executorName\"]\n elif \"http_uri\" in log:\n module = Transformer.__extract_module_from_url(log[\"http_uri\"])\n if module == \"UNKNOWN\" and \"header_referer\" in log:\n module = Transformer.__extract_module_from_url(log[\"header_referer\"])\n return module",
"def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"",
"def get_code_file_and_lines(obj, pyccel_folder, mod_name = None):\n if not pyccel_folder:\n pyccel_folder = os.getcwd()\n\n obj_parts = obj.split('.')\n if mod_name is None:\n idx = len(obj_parts)\n print(pyccel_folder, obj)\n filename = os.path.join(pyccel_folder, '/'.join(obj_parts[:idx])+'.py')\n while idx > 0 and not os.path.isfile(filename):\n idx -= 1\n filename = os.path.join(pyccel_folder, '/'.join(obj_parts[:idx])+'.py')\n assert idx != 0\n mod_name = '.'.join(obj_parts[:idx])\n obj_parts = obj_parts[idx:]\n\n mod = importlib.import_module(mod_name)\n filename = mod.__file__.split('/')\n file = os.path.relpath(mod.__file__, pyccel_folder)\n\n if obj_parts:\n # Get the object\n obj = mod\n for o in obj_parts:\n obj = getattr(obj, o)\n\n # If the object is a class property, get the underlying function\n obj = getattr(obj, 'fget', obj)\n\n source, start_line = inspect.getsourcelines(obj)\n length = len(source)\n return file, start_line, start_line+length-1\n else:\n # Module\n return file, 1, 1",
"def find_actual_caller(self):\n\n # Gleaned from code in the logging module itself...\n try:\n f = sys._getframe(1)\n ##f = inspect.currentframe(1)\n except Exception:\n f = None\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown module)\", \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n mod = inspect.getmodule(f)\n\n if mod is None:\n modname = '__main__'\n else:\n modname = mod.__name__\n\n if modname == __name__:\n # Crawl back until the first frame outside of this module\n f = f.f_back\n continue\n\n rv = (modname, filename, f.f_lineno, co.co_name)\n break\n return rv",
"def _parse_line(line):\n if line.startswith('import '):\n line = line.replace('import ', '')\n return line",
"def info(target):\n co = target.__code__\n return (co.co_name, co.co_filename, co.co_firstlineno)",
"def get_err_source_info(original_traceback=None) -> dict:\n try: # carefully try to get the actual place where the error happened\n if not original_traceback:\n original_traceback = sys.exc_info()[2] # class, exc, traceback\n first_call = traceback.extract_tb(original_traceback)[-1]\n return dict(\n src_module=first_call[0],\n src_linenr=first_call[1],\n src_func=first_call[2],\n src_code=first_call[3],\n )\n except Exception as e:\n current_app.warning(\n \"I was unable to retrieve error source information: %s.\" % str(e)\n )\n return dict(module=\"\", linenr=0, method=\"\", src_code=\"\")",
"def module_info():\n pass",
"def __extract_fullname(self, line):\n if not line.startswith(DOCTEST_DOCUMENT_BEGIN):\n raise ValueError(\"First line of output text should be a line \"\n \"beginning '%s'\" % DOCTEST_DOCUMENT_BEGIN)\n return line.replace(DOCTEST_DOCUMENT_BEGIN, \"\").strip()",
"def extract_function_name():\n tb = sys.exc_info()[-1]\n stk = traceback.extract_tb(tb, 1)\n fname = stk[0][3]\n return fname",
"def _get_source(self, fullmodname):\n submodname, is_package, relpath = self._get_info(fullmodname)\n fullpath = self.path_entry + relpath\n source = self.datablocks[relpath]\n if hasattr(source, \"decode\"):\n source = source.decode(\"UTF-8\")\n source = source.replace('\\r\\n', '\\n')\n source = source.replace('\\r', '\\n')\n return submodname, is_package, fullpath, source",
"def getLineInformation(line):\n \n pass",
"def extract_line_information(line_information):\n file_and_line = line_information.split(\":\")\n # This is a dirty windows specific hack to deal with drive letters in the\n # start of the file-path, i.e D:\\\n if len(file_and_line[0]) == 1:\n # If the first component is just one letter, we did an accidental split\n file_and_line[1] = file_and_line[0] + \":\" + file_and_line[1]\n # Join the first component back up with the second and discard it.\n file_and_line = file_and_line[1:]\n\n if len(file_and_line) != 2 and len(file_and_line) != 3:\n return None\n # The case where we have no line number, in this case we return the line\n # number as 1 to mark the whole file.\n if len(file_and_line) == 2:\n line_num = 1\n if len(file_and_line) == 3:\n try:\n line_num = int(file_and_line[1])\n except ValueError:\n return None\n\n file_name = os.path.relpath(file_and_line[0])\n return file_name, line_num",
"def getsourcelines(object):\n lines, lnum = findsource(object)\n\n if inspect.ismodule(object): return lines, 0\n else: return inspect.getblock(lines[lnum:]), lnum + 1",
"def desc_with_module(self):\n if self.module_path.endswith('.py') \\\n and not isinstance(self.definition, parsing.Module):\n position = '@%s' % (self.line_nr)\n else:\n # is a builtin or module\n position = ''\n return \"%s:%s%s\" % (self.module_name, self.description, position)",
"def parse_mod_entry(self, line):\n split_line = line.split(' ')\n enable = split_line.pop(0)\n path = split_line.pop()\n version = split_line.pop()\n name = split_line.pop()\n return enable, name, version, path",
"def findCaller(self):\n frames = inspect.stack()\n thisfile = os.path.normcase(frames[0][1])\n for frame in frames:\n filename = os.path.normcase(frame[1])\n if filename != thisfile and filename != logging._srcfile:\n major, minor, micro, _, _ = sys.version_info\n if (major, minor, micro) >= (2, 4, 2):\n return filename, frame[2], frame[3]\n else:\n return filename, frame[2]",
"def get_line(cls, frame, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\n\t\tif not code: \n\t\t\treturn ''\n\t\t\n\t\treturn code.splitlines()[frame.f_lineno]",
"def get_source_line (self):\n\n # pylint: disable=no-member\n if 'meta' in self:\n return os.path.join (self.meta.path, self.meta.filename), self.meta.lineno\n if self.parent:\n return self.parent.get_source_line ()\n return '<unknown>', 0",
"def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)",
"def _path_and_line(self):\n path, line = (re.match(r'-r (.*) \\(line (\\d+)\\)$',\n self._req.comes_from).groups())\n return path, int(line)",
"def lineno():\n return str(' - Principal - line number: '+str(inspect.currentframe().f_back.f_lineno))",
"def __call__(self, line):\n marker = self.marker\n stripped_line = line.strip()\n if marker == stripped_line:\n assert not self.traceback_section\n self.traceback_section = True\n # print(\"XXX: TRACEBACK-START\")\n elif self.traceback_section:\n matched = self.file_pattern.match(line)\n if matched:\n # matched_range = matched.regs[1]\n filename = matched.groups()[0]\n new_filename = posixpath_normpath(filename)\n if new_filename != filename:\n # print(\"XXX: %r => %r\" % (filename, new_filename))\n line = line.replace(filename, new_filename)\n elif not stripped_line or line[0].isalpha():\n # -- DETECTED TRCAEBACK-END: exception-description\n # print(\"XXX: TRACEBACK-END\")\n self.traceback_section = False\n return line",
"def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))"
] | [
"0.6546095",
"0.64204025",
"0.6286522",
"0.6074252",
"0.604478",
"0.59372175",
"0.59232354",
"0.58754164",
"0.58697623",
"0.5837855",
"0.57866216",
"0.57653844",
"0.5745258",
"0.574496",
"0.5744206",
"0.5693252",
"0.5683909",
"0.56837696",
"0.5660498",
"0.5617504",
"0.56070703",
"0.5598407",
"0.55950177",
"0.5582164",
"0.5560064",
"0.54903984",
"0.548391",
"0.5483442",
"0.5475263",
"0.5472497"
] | 0.65381515 | 1 |
Return generator over check results. | def check_results(self):
while True:
# If no checks left, stop
if len(self._check_results) == 0:
break
# Return earliest result and remove from list
yield self._check_results.pop(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iterate(self): # pragma: no mccabe\n for case_result in self.suite_result.passed:\n for scenario_result in case_result.passed:\n yield scenario_result, True, True\n for scenario_result in case_result.failed:\n yield scenario_result, False, True # pragma: no cover\n\n for case_result in self.suite_result.failed:\n for scenario_result in case_result.passed:\n yield scenario_result, True, False # pragma: no cover\n for scenario_result in case_result.failed:\n yield scenario_result, False, False",
"def run(self) -> Generator[CheckResult, None, None]:\n for visitor_class in self._visitors:\n visiter = visitor_class()\n visiter.visit(self.tree)\n\n for error in visiter.errors:\n lineno, col_offset, message = error.node_items()\n yield lineno, col_offset, message, type(self)",
"def get_results():\n result = self._recv_result() # blocks\n del self._tasks_in_progress[result.task_id]\n del self._task_results_waiting[result.task_id]\n yield result.value",
"def all_results(self):\n res = [(True, result) for result in self.successes]\n res.extend([(False, result) for result in self.failures])\n return res",
"def run(self):\n checks = [\n self.check_files_permissions,\n self.check_suid_bin,\n self.check_nfs_root_squashing,\n self.is_docker_installed,\n self.check_sudo_rules,\n self.get_possible_exploit,\n ]\n\n for check in checks:\n yield check()",
"def run(self):\n validator_predicates = {\n validator_predicate\n for constraint_block in self._validator_predicates_deriver()\n for validator_predicate in constraint_block\n }\n for node in ast.walk(self.__ast):\n if all([validator_predicate(\n node=node, knowledge=self.__knowledge_template\n ) for validator_predicate in validator_predicates]):\n try:\n yield Result(\n name=self.__name,\n line=node.lineno, column=node.col_offset\n )\n except AttributeError:\n pass",
"def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x",
"def process_results_iter(self):\n while self.results: # iteratively process each cue in cues\n msgs = bytearray()\n r = self.results.popleft()\n log.info(\"%s got result: %s\\n\", self.name, r)\n msgs.extend(r[\"result\"])\n yield r[\"label\"], msgs",
"def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")",
"def check_valves(self):\n for name in self._pipes:\n if self._data[name].check_valve:\n yield name",
"def __iter__(self):\n for classresult in self.classresults:\n yield classresult",
"def results(self, checkid):\r\n return results.Results(self, checkid)",
"def __iter__(self):\n for instresult in self.instresults:\n yield instresult",
"def iter_existing_hashes(results):\n for result in iter_results(results):\n yield result.commit_hash, result.date",
"def check(self, *args, low: int, high: int) -> Generator:\n for element in args:\n yield self.validate(element, low, high)",
"def result_generator(results, metadata):\n try:\n yield '{\"result\": [\\n'\n for line in interleave(['\\n, '], results):\n yield line\n yield '\\n],\\n'\n yield '\"metadata\": {0},\\n'.format(json.dumps(metadata()))\n yield '\"status\": \"success\"}\\n'\n except Exception:\n exc = traceback.format_exc()\n yield '---\\n'\n yield exc",
"def get_results():\n self._recv_result() # blocks\n tasks = self._tasks_in_progress\n results = self._task_results_waiting\n\n for task_id in tasks.keys():\n if task_id not in results:\n break\n\n del tasks[task_id]\n result = results.pop(task_id)\n yield result.value",
"def _get_validation_generator(self):\n for data_element in self.validation:\n if self.debug: \n print(\"validating on: {}\".format(data_element))\n \n image, heatmap = self._generate_input_tuple(data_element)\n \n yield (image, heatmap)",
"def test_normal_basic():\r\n yield check_normal_basic, False\r\n yield check_normal_basic, False, True\r\n yield check_normal_basic, True",
"def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i",
"def getResults():",
"def getTestResults():",
"def __iter__(self):\n while True:\n results = self.poll()\n for x in results:\n yield x\n if not results:\n time.sleep(self.poll_delay)",
"def _checker_worker(self):\n results = {}\n for cmd in self.check_cmds:\n res = subprocess.call(cmd.split(), stdout=open('/dev/null', 'w'))\n self.log(\"'%s' finished, result: %s\" % (cmd, res))\n results[cmd] = res\n if rospy.is_shutdown():\n return\n with self._lock:\n # just add results into the data structure\n self._results.add(results)",
"def get_xml_of_checkresults(doc, checks, run_time):\n check_results = doc.createElement('checkresults')\n doc.appendChild(check_results)\n\n for check in checks:\n if check.needs_to_run():\n element = Handler.make_xml(check)\n check.set_next_run(run_time)\n check_results.appendChild(element)\n\n return doc",
"def iterator(self):\n yield",
"def _test_generator(get_output, get_expected, input_filename, **options):\n def test(self):\n output_docs, output_errors = get_output(input_filename, **options)\n expect_docs, expect_errors = get_expected(input_filename, **options)\n\n self.assertEqual(expect_docs, output_docs)\n self.assertEqual(expect_errors, output_errors)\n\n return test",
"def get_test_results(self, action_instance_id, filters=None):\n yield",
"def test_check(self):\n return self._testCheck()",
"def executeSafetyCheck(self, state):\n logAllChecksDescription(\"SAFETY\", \"GENERATOR %s\" % self.name, indentation=2)\n checkStatus = dict()\n try:\n checkStatus[\"R1\"] = self.safetyCheckR1(state)\n checkStatus[\"R2\"] = self.safetyCheckR2(state)\n checkStatus[\"R3\"] = self.safetyCheckR3(state)\n checkStatus[\"R4\"] = self.safetyCheckR4(state)\n checkStatus[\"R8a\"] = self.safetyCheckR8a(state)\n checkStatus[\"R8b\"] = self.safetyCheckR8b(state)\n checkStatus[\"R9a\"] = self.safetyCheckR9a(state)\n checkStatus[\"R9b\"] = self.safetyCheckR9b(state)\n logAllChecksPassed(\"SAFETY\", \"GENERATOR %s\" % self.name, all(checkStatus.values()), indentation=2)\n except Exception, e:\n logError(\"Unknown exception or error: %s\" % e.message, indentation=2)\n return checkStatus"
] | [
"0.66472244",
"0.6500041",
"0.6259373",
"0.6240636",
"0.6147593",
"0.59843",
"0.5965806",
"0.5961395",
"0.5939978",
"0.59119546",
"0.58932894",
"0.58822715",
"0.5831415",
"0.58069813",
"0.57816905",
"0.5770246",
"0.57701373",
"0.5715555",
"0.570003",
"0.5638769",
"0.56072867",
"0.55088973",
"0.5497617",
"0.5471317",
"0.5455615",
"0.5438781",
"0.54298216",
"0.54271245",
"0.5426023",
"0.5415685"
] | 0.7698506 | 0 |
Do preconfiguration checks. Return a list of CheckResult items, one for each check. | def pre_config_checks(self):
if self.host is not None:
self.tell("Doing pre-config checks")
self.do_checklist([]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_checks(self):\n retval = []\n retval.extend(self.check_slick_status())\n retval.extend(self.check_java_processes())\n retval.extend(self.check_firefox_processes())\n retval.extend(self.check_disk_space())\n return retval",
"def __runChecks(self):\n runnedChecks = []\n runnedChecks.append(Checks.checksFilesInstalled(self.__targetPath, verbose=True))\n return all(runnedChecks)",
"def getChecks(self):\r\n raise AbstractError\r\n return []",
"def check_results(self):\n\n\t\twhile True:\n\n\t\t\t# If no checks left, stop\n\t\t\tif len(self._check_results) == 0:\n\t\t\t\tbreak\n\n\t\t\t# Return earliest result and remove from list\n\t\t\tyield self._check_results.pop(0)",
"def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")",
"def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()",
"def RunChecks(self):\n results = []\n\n affected_files = self.input_api.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n affected_js_files = filter(\n lambda f: f.LocalPath().endswith('.js'), affected_files)\n\n if affected_js_files:\n self.input_api.logging.info(\n 'Running appengine eslint on %d JS file(s)', len(affected_js_files))\n results += self.RunESLintChecks(affected_js_files)\n\n\n if results:\n results.append(self.output_api.PresubmitNotifyResult(\n 'See the JavaScript style guide at https://goo.gl/Ld1CqR.'))\n\n return results",
"def _checkin_resources(self):\n\n for test_prep in self._test_preps.values():\n test_prep.sut.checkin()",
"def all(\n command,\n):\n # If we get to this point all tests listed in 'pre' have passed\n # unless we have run the task with the --warn flag\n if not command.config.run.warn:\n print(\n \"\"\"\nAll Checks Passed Successfully\n==========================================\n\"\"\"\n )",
"def check(self, rendered: str) -> List[str]:\n errors = []\n\n if self.check_prefix is None:\n raise AttributeError(\"The class attribute `check_prefix` must be non-null!\")\n\n # Load all YAML documents from string `rendered`\n loaded = self._load_rendered_yaml(rendered)\n\n checks = self._get_all_checks()\n for check in checks:\n result = check(loaded)\n if result:\n errors.append(result)\n\n return errors",
"def test_run_all_sql_data_checks(self):\n\n # we should run all the data checks through the main function to support\n # different sql data checker classes we may create in the future\n results = sql_data_checker.main()\n\n # if any data check threw an exception, its value in the dict will be None\n failed_data_check_ids = []\n for data_check_type in sorted(results, key=lambda key: key.data_check_type_id):\n if results[data_check_type] is None:\n failed_data_check_ids.append(data_check_type.data_check_type_id)\n\n # I want it to display all failed checks so I'm not doing a self.assertEqual(0, len(failed_data_check_ids))\n if len(failed_data_check_ids) is not 0:\n self.fail('Failed SQL Data Check IDs: %s' % [str(s) for s in failed_data_check_ids])",
"def run(self):\n checks = [\n self.check_files_permissions,\n self.check_suid_bin,\n self.check_nfs_root_squashing,\n self.is_docker_installed,\n self.check_sudo_rules,\n self.get_possible_exploit,\n ]\n\n for check in checks:\n yield check()",
"def precheck(ctx):\n rc = run_playbook(precheck_cmd(ctx.obj))\n if rc != 0:\n print_error_msg(\"Upgrade prechecks failed!!!\")\n sys.exit(1)\n print_success_msg(\"Upgrade prechecks ran successfully\")",
"def _get_all_checks(self):\n this_class = self.__class__\n\n check_list = [\n getattr(self, func)\n for func in dir(self.__class__)\n if callable(getattr(this_class, func))\n and func.startswith(self.check_prefix)\n ]\n\n return check_list",
"def run(self) -> None:\n self._does_apply = self.run_precondition()\n if not self._does_apply:\n self._check_result.status = CheckStatus.DOES_NOT_APPLY\n return\n\n self.calc_statistics()\n\n for statistic in self._check_result.statistics:\n\n if statistic.statistic_type == CheckStatisticType.UNDEFINED:\n capture_message('Warning: check statistics type is undefined')\n\n if self._check_result.status == CheckStatus.UNDEFINED:\n self._check_result.status = CheckStatus.PASS\n\n if statistic.value is not None:\n if statistic.thresholds.failure is not None and \\\n statistic.value > statistic.thresholds.failure:\n self._check_result.status = CheckStatus.FAIL\n if statistic.thresholds.warning is not None and \\\n statistic.value > statistic.thresholds.warning:\n if self._check_result.status != CheckStatus.FAIL:\n self._check_result.status = CheckStatus.WARNING",
"def on_sanity_check_start(self):\n for callback in self.callbacks:\n callback.on_sanity_check_start(self, self.get_model())",
"def checks(self):\r\n return checks.Checks(self)",
"def prechecks(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(hostnames=hostnames,\n servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.precheck(hostnames, servicenames)\n return Job(ansible_job)",
"def post_config_checks(self):\n\n\t\tif self.host is not None:\n\t\t\tself.tell(\"Doing post-config checks\")\n\n\t\tself.do_checklist([])",
"def check_all(self):\n # TODO: this should use functions in execute.py to run tests in-sequence so that variable\n # name collisions are accounted for\n self._log_event(EventType.BEGIN_CHECK_ALL)\n\n # TODO: this is a janky way of resolving where the tests are. Formalize a method of \n # determining this and put it into a method in e.g. utils.py\n tests = [os.path.split(file)[1][:-3] for file in glob(os.path.join(self._path, \"*.py\")) \\\n if \"__init__.py\" not in file]\n if len(tests) == 0:\n nb_path = self._resolve_nb_path(None)\n with open(nb_path, encoding=\"utf-8\") as f:\n nb = json.load(f)\n tests = list(nb[\"metadata\"][NOTEBOOK_METADATA_KEY][\"tests\"].keys())\n\n global_env = inspect.currentframe().f_back.f_back.f_globals\n results = []\n if not _SHELVE:\n for test_name in sorted(tests):\n results.append(self.check(test_name, global_env))\n else:\n log = Log.from_file(_OTTER_LOG_FILENAME, ascending=False)\n for file in sorted(tests):\n if \"__init__.py\" not in file:\n test_name = os.path.splitext(os.path.split(file)[1])[0]\n\n entry = log.get_question_entry(test_name)\n env = entry.unshelve()\n global_env.update(env)\n del locals()[\"env\"]\n\n result = self.check(test_name, global_env)\n results.append((test_name, result))\n\n return GradingResults(results)",
"def precheck_cmd(constants: dict) -> list:\n playbook_dir = constants[\"playbooks\"]\n return AnsiblePlay(\n playbook=f\"{playbook_dir}/main.yml\",\n tags=['upgrade_precheck'],\n extra_vars=constants,\n )",
"def checklists(self):\n return self.pods.all().checklists",
"def process_all_checks(self, code, option):\n return code",
"async def items(self):\n response = await self._api.get(\"/v1/agent/checks\")\n return response.body",
"def _CommonChecks(input_api, output_api):\n result = []\n result.extend(_CheckChromeUpdateTriggerRule(input_api, output_api))\n result.extend(_CheckCurrentVersionIncreaseRule(input_api, output_api))\n result.extend(_CheckNoOverlappingFileNamesInResourceDirsRule(input_api,\n output_api))\n\n return result",
"def create_checkers(config):\n\n checkers = []\n if 'checkers' in config:\n for checker_name, checker_config in config['checkers'].iteritems():\n if checker_name in __checkers:\n configs = None\n if type(checker_config) == list:\n configs = checker_config\n else:\n configs = [checker_config]\n for config in configs:\n ch = __checkers[checker_name]()\n ch.set_config(config)\n if ch:\n checkers.append(ch)\n return checkers",
"def do_check(self, change):\n\n return []",
"def _check(self):\n if self.action_on_failure not in self.ACTION_ON_FAILURE:\n raise type_utils.TestListError(\n 'action_on_failure must be one of \"NEXT\", \"PARENT\", \"STOP\"')\n\n if self.parallel:\n if not self.subtests:\n raise type_utils.TestListError(\n '`parallel` should be set on test group')\n for subtest in self.subtests:\n if not subtest.IsLeaf():\n raise type_utils.TestListError(\n 'Test %s: all subtests in a parallel test should be leaf nodes' %\n self.id)\n if subtest.enable_services or subtest.disable_services:\n raise type_utils.TestListError(\n 'Test %s cannot be parallel with enable_services or '\n 'disable_services specified.' % subtest.id)\n\n # all subtests should come before teardown tests\n it = iter(self.subtests)\n if not self.teardown:\n # find first teardown test\n it = itertools.dropwhile(lambda subtest: not subtest.teardown, it)\n for subtest in it:\n if not subtest.teardown:\n raise type_utils.TestListError(\n '%s: all subtests should come before teardown tests' % self.id)\n\n for subtest in self.subtests:\n subtest._check() # pylint: disable=protected-access",
"def load_checks_to_execute(\n bulk_checks_metadata: dict,\n bulk_compliance_frameworks: dict,\n checks_file: str,\n check_list: list,\n service_list: list,\n severities: list,\n compliance_frameworks: list,\n categories: set,\n provider: str,\n) -> set:\n checks_to_execute = set()\n\n # Handle if there are checks passed using -c/--checks\n if check_list:\n for check_name in check_list:\n checks_to_execute.add(check_name)\n\n # Handle if there are some severities passed using --severity\n elif severities:\n for check in bulk_checks_metadata:\n # Check check's severity\n if bulk_checks_metadata[check].Severity in severities:\n checks_to_execute.add(check)\n\n # Handle if there are checks passed using -C/--checks-file\n elif checks_file:\n try:\n checks_to_execute = parse_checks_from_file(checks_file, provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are services passed using -s/--services\n elif service_list:\n checks_to_execute = recover_checks_from_service(service_list, provider)\n\n # Handle if there are compliance frameworks passed using --compliance\n elif compliance_frameworks:\n try:\n checks_to_execute = parse_checks_from_compliance_framework(\n compliance_frameworks, bulk_compliance_frameworks\n )\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are categories passed using --categories\n elif categories:\n for cat in categories:\n for check in bulk_checks_metadata:\n # Check check's categories\n if cat in bulk_checks_metadata[check].Categories:\n checks_to_execute.add(check)\n\n # If there are no checks passed as argument\n else:\n try:\n # Get all check modules to run with the specific provider\n checks = recover_checks_from_provider(provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n else:\n for check_info in checks:\n # Recover check name from import path (last part)\n # Format: \"providers.{provider}.services.{service}.{check_name}.{check_name}\"\n check_name = check_info[0]\n checks_to_execute.add(check_name)\n\n return checks_to_execute",
"def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]"
] | [
"0.6579942",
"0.61347735",
"0.5918062",
"0.5915588",
"0.5889537",
"0.58700436",
"0.58586943",
"0.5851722",
"0.572591",
"0.57106316",
"0.5689189",
"0.5632314",
"0.5584315",
"0.5511301",
"0.54948235",
"0.54906636",
"0.5459327",
"0.542329",
"0.5401744",
"0.5384123",
"0.53488743",
"0.53367096",
"0.5311734",
"0.5253316",
"0.5198254",
"0.5178151",
"0.51617193",
"0.51473576",
"0.51342803",
"0.51129705"
] | 0.6837133 | 0 |
Do postconfiguration checks. Return a list of CheckResult items, one for each check. | def post_config_checks(self):
if self.host is not None:
self.tell("Doing post-config checks")
self.do_checklist([]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_checks(self):\n retval = []\n retval.extend(self.check_slick_status())\n retval.extend(self.check_java_processes())\n retval.extend(self.check_firefox_processes())\n retval.extend(self.check_disk_space())\n return retval",
"def test_run_all_sql_data_checks(self):\n\n # we should run all the data checks through the main function to support\n # different sql data checker classes we may create in the future\n results = sql_data_checker.main()\n\n # if any data check threw an exception, its value in the dict will be None\n failed_data_check_ids = []\n for data_check_type in sorted(results, key=lambda key: key.data_check_type_id):\n if results[data_check_type] is None:\n failed_data_check_ids.append(data_check_type.data_check_type_id)\n\n # I want it to display all failed checks so I'm not doing a self.assertEqual(0, len(failed_data_check_ids))\n if len(failed_data_check_ids) is not 0:\n self.fail('Failed SQL Data Check IDs: %s' % [str(s) for s in failed_data_check_ids])",
"def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")",
"def get_postprocess_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/tests/postprocessing/postprocess_test_configs\"))",
"def __runChecks(self):\n runnedChecks = []\n runnedChecks.append(Checks.checksFilesInstalled(self.__targetPath, verbose=True))\n return all(runnedChecks)",
"def run_checks(self):\n\n try:\n check_obj = self.metadata.get_described_element()\n except ObjectDoesNotExist:\n pass\n\n if self.metadata.is_service_metadata:\n if self.metadata.is_service_type(OGCServiceEnum.WMS):\n self.check_wms(check_obj)\n elif self.metadata.is_service_type(OGCServiceEnum.WFS):\n self.check_wfs(check_obj)\n\n elif self.metadata.is_layer_metadata:\n self.check_layer(check_obj)\n elif self.metadata.is_featuretype_metadata:\n self.check_featuretype(check_obj)\n elif self.metadata.is_dataset_metadata:\n self.check_dataset()\n\n # all checks are done. Calculate the health state for all monitoring results\n health_state = HealthState.objects.create(monitoring_run=self.monitoring_run, metadata=self.metadata)\n health_state.run_health_state()",
"def run(self) -> None:\n self._does_apply = self.run_precondition()\n if not self._does_apply:\n self._check_result.status = CheckStatus.DOES_NOT_APPLY\n return\n\n self.calc_statistics()\n\n for statistic in self._check_result.statistics:\n\n if statistic.statistic_type == CheckStatisticType.UNDEFINED:\n capture_message('Warning: check statistics type is undefined')\n\n if self._check_result.status == CheckStatus.UNDEFINED:\n self._check_result.status = CheckStatus.PASS\n\n if statistic.value is not None:\n if statistic.thresholds.failure is not None and \\\n statistic.value > statistic.thresholds.failure:\n self._check_result.status = CheckStatus.FAIL\n if statistic.thresholds.warning is not None and \\\n statistic.value > statistic.thresholds.warning:\n if self._check_result.status != CheckStatus.FAIL:\n self._check_result.status = CheckStatus.WARNING",
"def RunChecks(self):\n results = []\n\n affected_files = self.input_api.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n affected_js_files = filter(\n lambda f: f.LocalPath().endswith('.js'), affected_files)\n\n if affected_js_files:\n self.input_api.logging.info(\n 'Running appengine eslint on %d JS file(s)', len(affected_js_files))\n results += self.RunESLintChecks(affected_js_files)\n\n\n if results:\n results.append(self.output_api.PresubmitNotifyResult(\n 'See the JavaScript style guide at https://goo.gl/Ld1CqR.'))\n\n return results",
"def after_make_averaged_dataset(msg, config, checklist):\n return []",
"def after_archive_tarball(msg, config, checklist):\n return []",
"def getChecks(self):\r\n raise AbstractError\r\n return []",
"def update_results(failures, errors, case_):\n for check in case_.checks:\n if check.result == FAILURE:\n failures.append(check)\n elif check.result == ERROR:\n errors.append(check)",
"def all(\n command,\n):\n # If we get to this point all tests listed in 'pre' have passed\n # unless we have run the task with the --warn flag\n if not command.config.run.warn:\n print(\n \"\"\"\nAll Checks Passed Successfully\n==========================================\n\"\"\"\n )",
"def submit_to_nagios(self, checkresults):\n\n try:\n server = self.config.get('nrdp', 'parent')\n token = self.config.get('nrdp', 'token')\n except Exception as ex:\n logging.exception(ex)\n\n # Get the list of servers (and tokens, if available)\n servers = server.split(',')\n tokens = token.split(',')\n\n for i, server in enumerate(servers):\n\n # Grab a token, or the last token\n try:\n tmp_token = tokens[i]\n token = tmp_token\n except IndexError:\n pass\n\n # The POST requests don't follow redirects, so we have to make sure\n # the address is completely accurate.\n if not server.endswith('/'):\n server += '/'\n\n logging.debug('XML to be submitted: %s', checkresults)\n ret_xml = utils.send_request(url=server, token=token, XMLDATA=checkresults, cmd='submitcheck')\n\n if ret_xml is not None:\n try:\n Handler.log_result(server, ret_xml)\n except Exception as ex:\n logging.debug(ret_xml)\n logging.exception(ex)",
"def postProcess_check() -> dict:\n nonlocal d_test, b_status, d_filter, d_stats\n\n if len(self.args['fileFilter']) or len(self.args['dirFilter']):\n d_filter = self.filterFileHitList()\n b_status = d_filter['status']\n if self.b_test:\n d_test = self.test_run(*args, **kwargs)\n b_status = b_status and d_test['status']\n if self.b_stats or self.b_statsReverse or \\\n self.b_jsonStats or self.args['du'] or self.args['duf']:\n d_stats = stats_process()\n b_status = b_status and d_stats['status']\n self.verbosityLevel = 1\n if self.toConsole():\n if not self.args['du'] and not self.args['duf']:\n print(d_stats['filterLog'].border_draw())\n print(d_stats['bodyLog'].border_draw())\n elif self.args['du'] or self.args['duf']:\n print(d_stats['bodyLog'])\n else:\n d_stats['filterLog'] = d_stats['filterLog'].json_dump()\n d_stats['bodyLog'] = d_stats['bodyLog'].json_dump()\n\n return {\n 'status': b_status,\n 'filter': d_filter,\n 'test': d_test,\n 'stats': d_stats\n }",
"def pre_config_checks(self):\n\n\t\tif self.host is not None:\n\t\t\tself.tell(\"Doing pre-config checks\")\n\n\t\tself.do_checklist([])",
"async def items(self):\n response = await self._api.get(\"/v1/agent/checks\")\n return response.body",
"def after_launch_remote_worker(msg, config, checklist):\n return []",
"def load_checks_to_execute(\n bulk_checks_metadata: dict,\n bulk_compliance_frameworks: dict,\n checks_file: str,\n check_list: list,\n service_list: list,\n severities: list,\n compliance_frameworks: list,\n categories: set,\n provider: str,\n) -> set:\n checks_to_execute = set()\n\n # Handle if there are checks passed using -c/--checks\n if check_list:\n for check_name in check_list:\n checks_to_execute.add(check_name)\n\n # Handle if there are some severities passed using --severity\n elif severities:\n for check in bulk_checks_metadata:\n # Check check's severity\n if bulk_checks_metadata[check].Severity in severities:\n checks_to_execute.add(check)\n\n # Handle if there are checks passed using -C/--checks-file\n elif checks_file:\n try:\n checks_to_execute = parse_checks_from_file(checks_file, provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are services passed using -s/--services\n elif service_list:\n checks_to_execute = recover_checks_from_service(service_list, provider)\n\n # Handle if there are compliance frameworks passed using --compliance\n elif compliance_frameworks:\n try:\n checks_to_execute = parse_checks_from_compliance_framework(\n compliance_frameworks, bulk_compliance_frameworks\n )\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n\n # Handle if there are categories passed using --categories\n elif categories:\n for cat in categories:\n for check in bulk_checks_metadata:\n # Check check's categories\n if cat in bulk_checks_metadata[check].Categories:\n checks_to_execute.add(check)\n\n # If there are no checks passed as argument\n else:\n try:\n # Get all check modules to run with the specific provider\n checks = recover_checks_from_provider(provider)\n except Exception as e:\n logger.error(f\"{e.__class__.__name__}[{e.__traceback__.tb_lineno}] -- {e}\")\n else:\n for check_info in checks:\n # Recover check name from import path (last part)\n # Format: \"providers.{provider}.services.{service}.{check_name}.{check_name}\"\n check_name = check_info[0]\n checks_to_execute.add(check_name)\n\n return checks_to_execute",
"def on_sanity_check_end(self):\n for callback in self.callbacks:\n callback.on_sanity_check_end(self, self.get_model())",
"def _get_check_run_results(\n self, commits: List[dict]) -> List[str]:\n failed_status = {'failure', 'cancelled', 'timed_out', 'action_required'}\n check_run_results = []\n for commit in commits:\n commit_ref = commit['sha']\n commit_check_run_results = get_commit_check_runs(\n self._repo_name, commit_ref, self._auth)\n if not commit_check_run_results:\n continue\n num_check_runs = commit_check_run_results['total_count']\n if num_check_runs == 0:\n check_run_results.append('none')\n continue\n status = 'passed'\n for commit_check_run_result in commit_check_run_results[\n 'check_runs']:\n conclusion = commit_check_run_result['conclusion']\n if conclusion in failed_status:\n status = 'failed'\n break\n check_run_results.append(status)\n return check_run_results",
"def process_all_checks(self, code, option):\n return code",
"def execute(checks, logger, concurrency=1):\n\n epoll = select.epoll()\n todo = checks[::-1]\n doing = {}\n\n done = []\n success = 0\n\n def handle_failure(job, msg, kill=True):\n logger.warning(msg)\n handle_done(job)\n\n if kill:\n try:\n job.kill()\n except OSError:\n pass\n\n def handle_done(job):\n epoll.unregister(job.fd)\n if job.fd in doing:\n del doing[job.fd]\n\n try:\n while todo or doing:\n # Schedule new jobs up to the concurrency level\n while todo and len(doing) < concurrency:\n check = todo.pop()\n logger.info(\"Executing check '{}'\".format(check.name))\n\n job = check.run()\n doing[job.fd] = job\n epoll.register(job.fd, select.EPOLLIN)\n\n # Poll for stdout events\n for (fd, event) in epoll.poll(0.01):\n job = doing[fd]\n\n # Handle job completion\n if job.poll() is not None:\n job.wait()\n\n if job.isfailure():\n msg = \"Check '{}' failed: {}\"\n msg = msg.format(job.check.name, job.output)\n handle_failure(job, msg, kill=False)\n else:\n handle_done(job)\n success += 1\n\n done.append(job)\n\n # Enforce timeout on running jobs\n for job in doing.values():\n if job.timedout():\n msg = \"Check '{}' exceeded {}s timeout\"\n msg = msg.format(job.check.name, job.check.timeout)\n handle_failure(job, msg)\n\n finally:\n for job in doing.values():\n msg = \"Error running check '{}'\".format(job.check.name)\n handle_failure(job, msg)\n\n epoll.close()\n\n return (len(done) == len(checks) == success, done)",
"def doCleanups(self):\r\n result = self._resultForDoCleanups\r\n ok = True\r\n while self._cleanups:\r\n function, args, kwargs = self._cleanups.pop(-1)\r\n try:\r\n function(*args, **kwargs)\r\n except Exception:\r\n ok = False\r\n result.addError(self, sys.exc_info())\r\n return ok",
"def run_through_checklists(uncompleted_checklists, expected_responses = OrderedDict(), actual_responses = OrderedDict()):\n\tcurrent_checklist = uncompleted_checklists.keys()[0]\n\t\n\tif current_checklist == \"Context\":\n\t\tresponse = \"y\"\n\telse:\n\t\tprint (\"\\nWould you like to check \" + current_checklist + \"?\")\n\t\tresponse = raw_input(\"(y/n) > \")\n\n\tif response == \"y\":\n\t\texpected_responses[current_checklist] = uncompleted_checklists[current_checklist]\n\t\tactual_responses[current_checklist] = run_checklist(uncompleted_checklists[current_checklist])\n\telse:\n\t\tpass\n\n\t#Remove this checklist\n\tdel uncompleted_checklists[current_checklist]\n\n\t#Check if checklists exist\n\tif not uncompleted_checklists:\n\t\t#No more checklists found\n\t\tpass\n\telse:\n\t\trun_through_checklists(uncompleted_checklists)\n\t\n\tcompleted_checklists = {\"expected_responses\": expected_responses, \"actual_responses\": actual_responses}\n\treturn (completed_checklists)",
"def check_all(self, exit_on_error=False):\n \n \"\"\"Returns if all checks passed\"\"\"\n checks = [\n self.check_all_objects_have_names,\n self.test_entity_hierarchy,\n self.check_current_states,\n self.check_transition_sanity,\n self.check_update_sanity,\n self.check_action_sanity,\n self.check_influence_sanity,\n self.check_objects_have_parents_and_are_not_referenced_twice,\n self.check_port_connections,\n ]\n\n no_problems = True\n\n for check in checks:\n try:\n logger.debug(f\"Starting check {check.__name__}\")\n check()\n except AssertionError as exc:\n logger.error(f\"Problem in check '{check.__name__}': {str(exc)}\")\n if exit_on_error:\n raise exc\n no_problems = False\n else:\n logger.info(f\"Check {check.__name__} passed without problems\")\n\n logger.info(\"Finished all checks.\")\n return no_problems",
"def __execute_tests(self, lst_tests):\n tests_pass = tests_fail = 0\n queue_of_result = multiprocessing.Queue()\n for test in lst_tests:\n process = multiprocessing.Process(\n target=TestRunner.__helper_execute_test,\n kwargs={\"test_cls\": test,\n \"time_out\": self.__args.timeout,\n \"channel\": queue_of_result})\n process.start()\n process.join()\n temp_result = {}\n if not queue_of_result.empty():\n temp_result = queue_of_result.get_nowait()\n\n if \"status\" in temp_result:\n if temp_result[\"status\"] == result.Status.PASSED:\n tests_pass += 1\n else:\n tests_fail += 1\n\n if \"json_path\" in temp_result:\n self.__lst_json_files.append(temp_result[\"json_path\"])\n\n if \"log_path\" in temp_result:\n self.__lst_log_files.append(temp_result[\"log_path\"])\n\n return tests_pass, tests_fail",
"def run_pr_checks(pr_tests, ghprb_actual_commit, sha1):\n # Ensure we save off the current HEAD to revert to\n current_pr_head = run_cmd([\"git\", \"rev-parse\", \"HEAD\"], return_output=True).strip()\n pr_results = list()\n\n for pr_test in pr_tests:\n test_name = pr_test + \".sh\"\n pr_results.append(\n run_cmd(\n [\n \"bash\",\n os.path.join(SPARK_HOME, \"dev\", \"tests\", test_name),\n ghprb_actual_commit,\n sha1,\n ],\n return_output=True,\n ).rstrip()\n )\n # Ensure, after each test, that we're back on the current PR\n run_cmd([\"git\", \"checkout\", \"-f\", current_pr_head])\n return pr_results",
"def _send_batch(self, service_checks: list):\n for service_check in service_checks:\n self._send(service_check)",
"def _checker_worker(self):\n results = {}\n for cmd in self.check_cmds:\n res = subprocess.call(cmd.split(), stdout=open('/dev/null', 'w'))\n self.log(\"'%s' finished, result: %s\" % (cmd, res))\n results[cmd] = res\n if rospy.is_shutdown():\n return\n with self._lock:\n # just add results into the data structure\n self._results.add(results)"
] | [
"0.5916621",
"0.5734533",
"0.5659765",
"0.54654485",
"0.5404088",
"0.53832567",
"0.5361073",
"0.53190947",
"0.5267433",
"0.5263102",
"0.524129",
"0.52240616",
"0.5184659",
"0.514193",
"0.5114651",
"0.51029646",
"0.5099988",
"0.50570494",
"0.5046535",
"0.5043843",
"0.5043554",
"0.4960393",
"0.49587882",
"0.4955442",
"0.49475768",
"0.49424672",
"0.49204236",
"0.48899302",
"0.48794958",
"0.48735398"
] | 0.6934056 | 0 |
Return the configuration of the actual device. | def device_config(self):
try:
return self._dev
except:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config(self) -> 'outputs.DeviceConfigResponse':\n return pulumi.get(self, \"config\")",
"def get_configuration(self):\n\t\tdevice = DeviceBase(self.name)\n\n\t\tif len(self.master_url) > 0:\n\t\t\tdevice.master_url = self.master_url\n\t\t\tr = requests.get(self.master_url + '/configuration/' + self.name)\n\n\t\t\tif r.status_code == 200:\n\t\t\t\ttry:\n\t\t\t\t\t#Request success\n\t\t\t\t\tconfig = json.loads(r.text)\n\t\t\t\t\tif config['deviceType'] == 1:\n\t\t\t\t\t\t\"\"\" HID Reader \"\"\"\n\t\t\t\t\t\tdevice = HIDReader(self.name)\n\t\t\t\t\tif config['deviceType'] == 2:\n\t\t\t\t\t\t\"\"\" ZK45Reader \"\"\"\n\t\t\t\t\t\tdevice = ZK45Reader(self.name)\n\t\t\t\t\tif config['deviceType'] == 4:\n\t\t\t\t\t\t\"\"\" ZFM20Reader \"\"\"\n\t\t\t\t\t\tdevice = ZFM20Reader(self.name)\n\t\t\t\t\tif config['deviceType'] == 5:\n\t\t\t\t\t\t\"\"\" IOEcho \"\"\"\n\t\t\t\t\t\tdevice = IOEcho(name=self.name, pin_and_label_matrix='')\n\t\t\t\t\telif config['deviceType'] == 0:\n\t\t\t\t\t\t\"\"\" None \"\"\"\n\t\t\t\t\t\tdevice = DeviceBase(name=self.name)\n\t\t\t\t\telse:\n\t\t\t\t\t\t\"\"\" Disable \"\"\"\n\t\t\t\t\t\tdevice = DeviceBase(self.name)\n\n\t\t\t\t\tdevice.zone_id = config['zone']\n\n\t\t\t\t\tdevice.is_zone_enabled = config['enabled']\n\t\t\t\t\tdevice.is_zone_day_time_only = config['dayTimeOnly']\n\t\t\t\t\tdevice.is_configuration_loaded = True\n\n\t\t\t\t\tdevice.master_secret = config['secret']\n\t\t\t\t\tdevice.master_url = self.master_url\n\n\t\t\t\t\tdevice.is_in_error = False\n\t\t\t\t\tdevice.error_status = \"OK\"\n\t\t\t\t\tdevice.type = config['deviceType']\n\n\t\t\t\t\tprint(\"Configuration loaded.\")\n\t\t\t\texcept Exception as e:\n\t\t\t\t\terror_message = \"Device type not supported by current platform. Configuration aborted. (\" + str(e) + \")\"\n\t\t\t\t\tprint(error_message)\n\t\t\t\t\tdevice.zone_id = 1\n\n\t\t\t\t\tdevice.is_zone_enabled = False\n\t\t\t\t\tdevice.is_zone_day_time_only = False\n\t\t\t\t\tdevice.is_in_error = True\n\t\t\t\t\tdevice.error_status = error_message\n\t\t\telse:\n\t\t\t\tprint(\"Configuration loading failed. (Server response : \" + str(r.status_code) + \")\")\n\t\t\t\tdevice.zone_id = 1\n\t\t\t\tdevice.is_zone_enabled = False\n\t\t\t\tdevice.is_zone_day_time_only = False\n\t\t\t\tdevice.is_in_error = True\n\t\t\t\tdevice.error_status = \"Configuration loading failed. (Server response : \" + str(r.status_code) + \")\"\n\t\telse:\n\t\t\tself.zone_id = 1\n\t\t\tself.is_zone_enabled = True\n\t\t\tself.is_zone_day_time_only = True\n\t\t\tdevice.is_in_error = True\n\t\t\tdevice.error_status = \"No master URL defined\"\n\n\t\tdevice.report_state()\n\t\treturn device",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"async def get_config(self, timeout=None):\n\n # Display info message\n log.info(\"get_config\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Get config\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n\n # Return de configuration of the device\n return output",
"def get_configuration(self) -> dict:\n return self.config",
"def device_get_config(self, filters={}):\n return {}",
"def get_config(self):\n return self.config",
"def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}",
"def config(self):\n return self[CONFIG_KEY]",
"def get_full_config(self):\n return self._read_config()",
"def getConfiguration(self):\n raise NotImplementedError",
"def config(self) -> pulumi.Output['outputs.ConfigResponse']:\n return pulumi.get(self, \"config\")",
"def get_config(self):\n if self.faucet is not None:\n return self.faucet.get_config()\n return None",
"def get_config(self):\n return super().get_config()",
"def configuration(self):\n return self._config",
"def getConfig(self):\n \n return self.config",
"def getConfiguration(self):\n return self._configuration",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def get_config():\n return CONFIG",
"def config(self):\r\n return self._config",
"def configuration_info(self) -> Optional['outputs.ConfigurationInfoResponse']:\n return pulumi.get(self, \"configuration_info\")",
"def config(self):\n return self._cfg",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config"
] | [
"0.85367155",
"0.76415837",
"0.76286024",
"0.74827415",
"0.74146307",
"0.7380526",
"0.72712225",
"0.72463226",
"0.72262603",
"0.71948266",
"0.7149988",
"0.70847535",
"0.70729476",
"0.7020076",
"0.70090455",
"0.7004731",
"0.70004076",
"0.69863135",
"0.6963824",
"0.6931717",
"0.69226074",
"0.691941",
"0.69044834",
"0.69044834",
"0.69044834",
"0.69044834",
"0.69044834",
"0.69044834",
"0.69044834",
"0.69044834"
] | 0.81186765 | 1 |
Compare device and object configurations. If dev is not None, use the given configuration instead of the current device configuration. If obj is not None, use the given configuration instead of the current object configuration. Keyword arguments dev device configuration (default None) obj object configuration (default None) Returns True if the two configurations are identical, otherwise return False. | def config_match(self, dev=None, obj=None):
if dev is None:
dev = self.device_config
if obj is None:
obj = self.object_config
return dev == obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def device_matches_object(self, obj=None):\n\n\t\treturn self.device_is_configured and self.config_match(obj=obj)",
"def __eq__(self, other_object):\n if type(self) != type(other_object):\n return False\n\n #check the important parameters\n #print \"TESTing\", self.config, other_object.config\n return self.config == other_object.config",
"def __eq__(self, other):\n if not isinstance(other, PodConfig):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return (type(other) == type(self)\n and self.using_default_schema == other.using_default_schema\n and self.settings == other.settings\n and self.schema == other.schema\n and self.comment == other.comment)",
"def __eq__(self, other):\n if not isinstance(other, Context):\n return False\n if self.device_typeid == other.device_typeid and \\\n self.device_id == other.device_id:\n return True\n return False",
"def __eq__(self, other):\n if not isinstance(other, DocumentConfig):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, WritableDeviceType):\n return False\n\n return self.__dict__ == other.__dict__",
"def is_dev(self):\n\n return self.dev",
"def __eq__(self, other):\n if not isinstance(other, DeviceRequest):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, LogicalRouterConfig):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return isinstance(other, Context) and \\\n self.device_typeid == other.device_typeid and \\\n self.device_id == other.device_id",
"def __eq__(self, other):\n if not isinstance(other, ShowSsoConfigResponse):\n return False\n\n return self.__dict__ == other.__dict__",
"def equals(self, obj: object) -> bool:\n ...",
"def __eq__(self, other):\n if not isinstance(other, Lti13PlatformConfigurationSchema):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, StoreUrlConfiguration):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, Setting):\n return False\n\n return self.__dict__ == other.__dict__",
"def _check_object_config(self):\n src_cfg_format = self.config[\"source\"][\"config\"]\n\n job = self.active_queue[0]\n src_cfg_file = os.path.join(os.path.dirname(job[\"objects_filename\"]),\n src_cfg_format.replace(\"$id\", job[\"id\"]))\n job[\"config_filename\"] = src_cfg_file\n\n return os.path.isfile(src_cfg_file), src_cfg_file",
"def isExistingSameDevice(config_db, deviceName, table):\n settings = config_db.get_table(table)\n for key,values in settings.items():\n if \"remote_device\" in values and deviceName == values[\"remote_device\"]:\n return True\n\n return False",
"def __eq__(self, other):\n if not isinstance(other, DomainCdnConfig):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n if not isinstance(other, VolumeParameterConfigJson):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other: 'GatewayChangeRequestGatewayClientGatewayUpdateAttributes') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, Clan):\n return False\n\n return self.__dict__ == other.__dict__",
"def compare_settings(lst):\n\n # Check settings are the same across list of given objects\n for ind, f_obj in enumerate(lst[:-1]):\n if get_settings(f_obj) != get_settings(lst[ind+1]):\n return False\n\n # If no settings fail comparison, return that objects have consistent settings\n return True",
"def __eq__(self, other: 'Gateway') -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, ComAdobeFdFpConfigFormsPortalDraftsandSubmissionConfigServiceProperties):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other: Any) -> bool:\n if isinstance(other, Device):\n return self.device_type == other.device_type and self.device_id == other.device_id\n elif isinstance(other, torch.device):\n return self.device_type == other.type and self.device_id == other.index\n else:\n return NotImplemented",
"def __eq__(self, other):\n if not isinstance(other, PoliciesPeripheralsUsbDeviceCommon):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, ListDevicesRequest):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, XmlNs0RunConfigAllOf):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, obj: \"Property\") -> bool:\n return self.name == obj.name and self.property_type == obj.property_type"
] | [
"0.65390724",
"0.6130971",
"0.5525185",
"0.5412156",
"0.5403968",
"0.5375348",
"0.53177214",
"0.5304435",
"0.5297185",
"0.52221376",
"0.52200085",
"0.5219425",
"0.52145475",
"0.5206618",
"0.51980466",
"0.5179718",
"0.5171252",
"0.51585585",
"0.515688",
"0.5137976",
"0.5131275",
"0.50733465",
"0.50693434",
"0.5052435",
"0.50506026",
"0.504925",
"0.5048824",
"0.50464076",
"0.50395685",
"0.5027788"
] | 0.81821424 | 0 |
Check if device is configured. | def device_is_configured(self):
try:
_ = self._dev
except:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_device_state(self):",
"def is_configured(self):\n pass",
"def system_valid(self):\n return self.udev.devices_exist",
"def is_configured(self):\n return True",
"async def is_configured(hass: HomeAssistant) -> bool:\n manager = await async_get_manager(hass)\n if manager.data is None:\n return False\n return bool(manager.data != manager.default_preferences())",
"def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0",
"def antenny_config_check(self):\n return self.antenny_config.check()",
"def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")",
"def is_connected(cls, device_config):\n if \"console_port_name\" in device_config[\"persistent\"]:\n address = device_config[\"persistent\"][\"console_port_name\"]\n else:\n address = device_config[\"persistent\"][\"hub_port_name\"]\n return os.path.exists(address)",
"async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output",
"def device_matches_object(self, obj=None):\n\n\t\treturn self.device_is_configured and self.config_match(obj=obj)",
"def check_configuration(self):\n self.ensure_one()\n getattr(self, '%s_check_configuration' % self.provider, lambda: None)()",
"def device_config(self):\n\t\ttry:\n\t\t\treturn self._dev\n\t\texcept:\n\t\t\treturn 0",
"def _check_config(self):",
"def __check_config(self):\n if not os.path.exists(self.__config_path):\n return False\n else:\n return True",
"def check_config_mode(self):\n return False",
"def checkstatus(self):\n # define cross-platform /dev/null\n devnull = open(os.devnull, 'w')\n\n # if the OS is windows\n if os.name == 'nt':\n ping = ['ping', '-n', '1', self.device]\n\n # if the OS is posix\n else:\n ping = ['ping', '-c', '1', self.device]\n\n print(self.device + ' Checking for device availability', end='', flush=True)\n time.sleep(5)\n count = 0\n while count < 2:\n print('.', end='', flush=True)\n ping_call = subprocess.Popen(ping, stdout=devnull)\n returncode = ping_call.wait()\n if returncode == 0:\n break\n time.sleep(1)\n count = count + 1\n\n print('')\n if count == 2:\n print(self.device + ' Device is not up')\n print(self.device + ' Exiting...')\n return 'FAIL'\n else:\n print(self.device + ' Device is Online')\n print(self.device + ' Please wait for script initialization')\n time.sleep(5)",
"def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException",
"def is_configured(self):\n return self._session is not None",
"def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)",
"def _check_configured(cls):\r\n if not cls._CONFIGURED:\r\n raise RuntimeError('Registry not configured')",
"def __connect(self):\n try:\n self._device = usb.core.find(idVendor=self.usb_vendor_id, idProduct=self.usb_product_id)\n self._configuration = self._device.get_active_configuration()\n except Exception as e:\n return False\n self.reset()\n return True",
"def is_config_exist(self) -> bool:\n pass",
"def assert_has_feature(self, feature_name):\n if not self.features.get(\"has_{}\".format(feature_name), False):\n self.raise_config_error(\"Platform {} does not support to configure {feature_name}. \"\n \"Please make sure the platform \"\n \"you configured for {feature_name} actually supports that type \"\n \"of devices.\".format(self.__class__, feature_name=feature_name), 99)",
"def devices_exist(self):\n return all(r.sys_path_exists for r in self.rules)",
"def _check_min_required(self):\n self._adb_available = False\n try:\n adb_version = utils.do_shell_command('adb --version')\n if adb_version:\n if adb_version and 'Android Debug Bridge' in adb_version:\n self._adb_available = True\n else:\n self._adb_available = False\n\n if self._adb_available:\n self._adb_available = False\n adb_devices = utils.do_shell_command('adb devices')\n\n try:\n if adb_devices:\n adb_devices = adb_devices.split(os.linesep)\n\n for i, adb_device in enumerate(adb_devices):\n if not adb_device: # skip empty lines at bottom\n continue\n if i == 0: # skip first line 'List of devices attached'\n continue\n if adb_device.startswith('*'): # skip these lines '* daemon started successfully *'\n continue\n\n self._adb_available = True\n\n if not self._adb_available:\n print('No Devices! Make sure \\'Usb-Debugging\\' is enabled in DeveloperSettings')\n\n except Exception as e:\n print(e)\n\n # io error is handled here not in do_shell_command\n # if adb isnt there it gives file not found\n except IOError as io_error:\n # file not found\n if io_error.errno == 2:\n self._adb_available = False",
"def is_configured(self):\n if \"isConfigured\" in self._prop_dict:\n return self._prop_dict[\"isConfigured\"]\n else:\n return None",
"def detect(self):\n # Get PCI devices\n lines = subprocess.check_output([\"lspci\", \"-n\"]).decode().split(\"\\n\")\n for line in lines:\n if len(line) > 0:\n class_id = \"0x{0}\".format(line.split()[1].rstrip(\":\")[0:2])\n if class_id == self.class_id:\n dev = line.split()[2].split(\":\")\n vendor_id = \"0x{0}\".format(dev[0])\n product_id = \"0x{0}\".format(dev[1])\n if vendor_id == self.vendor_id and product_id in self.devices:\n return True\n return False",
"def is_config_exist(self) -> bool:\n return True",
"def is_configured(self) -> bool:\n return self._configured"
] | [
"0.7194064",
"0.7099172",
"0.7063311",
"0.70046985",
"0.6912286",
"0.6897651",
"0.683083",
"0.67324764",
"0.668593",
"0.66775596",
"0.6595061",
"0.65944046",
"0.6571044",
"0.65219057",
"0.64957625",
"0.6487861",
"0.6476712",
"0.64276296",
"0.64197534",
"0.64023596",
"0.6393062",
"0.6337627",
"0.63213813",
"0.63133734",
"0.6284692",
"0.6283001",
"0.62643397",
"0.62551135",
"0.6239051",
"0.6231843"
] | 0.859375 | 0 |
Check if device is configured to match object configuration. If obj is not None, use the given configuration instead of the current object configuration. Keyword arguments obj object configuration (default None) Returns True if the device is configured and if that configuration matches the object configuration. | def device_matches_object(self, obj=None):
return self.device_is_configured and self.config_match(obj=obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config_match(self, dev=None, obj=None):\n\n\t\tif dev is None:\n\t\t\tdev = self.device_config\n\t\tif obj is None:\n\t\t\tobj = self.object_config\n\n\t\treturn dev == obj",
"def _check_object_config(self):\n src_cfg_format = self.config[\"source\"][\"config\"]\n\n job = self.active_queue[0]\n src_cfg_file = os.path.join(os.path.dirname(job[\"objects_filename\"]),\n src_cfg_format.replace(\"$id\", job[\"id\"]))\n job[\"config_filename\"] = src_cfg_file\n\n return os.path.isfile(src_cfg_file), src_cfg_file",
"def device_is_configured(self):\n\n\t\ttry:\n\t\t\t_ = self._dev\n\t\texcept:\n\t\t\treturn False\n\n\t\treturn True",
"def equals(self, obj: object) -> bool:\n ...",
"def determine_obj(self, obj):\n if type(obj) is Ohm:\n self._ohm_exists = self._ohm_exists ^ True\n if type(obj) is Amp:\n self._amp_exists = self._amp_exists ^ True\n if type(obj) is Volt:\n self._volt_exists = self._volt_exists ^ True\n if type(obj) is Power:\n self._power_exists = self._power_exists ^ True",
"def __is_hard_match(self, obj):\n for attr in self.list:\n try:\n if getattr(obj, attr) != getattr(self, attr):\n return False\n except AttributeError:\n pass\n return True",
"def object_config(self):\n\t\ttry:\n\t\t\treturn self._obj\n\t\texcept:\n\t\t\treturn 1",
"def exists(self, obj):\n return False",
"def applies(cls, obj):\n return type(obj) in cls.types",
"def __eq__(self, other_object):\n if type(self) != type(other_object):\n return False\n\n #check the important parameters\n #print \"TESTing\", self.config, other_object.config\n return self.config == other_object.config",
"def check(self):\r\n self._check_object(self._config.name)",
"def is_config_object_created() -> bool:\n return len(G_CONFIG_OBJECT) == 1",
"def bool(self, obj):\n return True",
"def bool(self, obj):\n return True",
"def get_ufp_enabled(self, obj: T) -> bool:\n if (ufp_enabled := self.ufp_enabled) is not None:\n if TYPE_CHECKING:\n # `ufp_enabled` is defined as a `str` in the dataclass, but\n # `__post_init__` converts it to a `tuple[str, ...]` to avoid\n # doing it at run time in `get_nested_attr` which is usually called\n # millions of times per day. This tells mypy that it's a tuple.\n assert isinstance(ufp_enabled, tuple)\n return bool(get_nested_attr(obj, ufp_enabled))\n return True",
"def __eq__(self, obj: \"Property\") -> bool:\n return self.name == obj.name and self.property_type == obj.property_type",
"def has_object_permission(self, request, view, obj):\n if request.method == \"GET\":\n return self.model_admin_config.has_view_permission(self, request, obj=obj)\n if request.method == \"PUT\":\n return self.model_admin_config.has_change_permission(self, request, obj=obj)\n if request.method == \"DELETE\":\n return self.model_admin_config.has_delete_permission(self, request, obj=obj)",
"def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()",
"def in_(self, obj):\r\n return assert_(self.obj in obj, '%r not in %r' % (self.obj, obj))",
"def find_model(config, obj, mods):\n for mod in mods:\n if mod[0] != config:\n continue\n\n if len(mod) == 2:\n return mod[1]\n\n if len(mod) == 3 and mod[1] in obj:\n return mod[2]\n\n return None",
"def find_model(config, obj, mods):\n for mod in mods:\n if mod[0] != config:\n continue\n\n if len(mod) == 2:\n return mod[1]\n\n if len(mod) == 3 and mod[1] in obj:\n return mod[2]\n\n return None",
"def is_live(self, obj):\n most_appropriate_object = get_appropriate_object_from_model(self.model)\n if most_appropriate_object == obj:\n return True\n return False",
"def isConfigClass(cls, tmpcls, session=None):\n if session is not None:\n cls.getclsoptions(tmpcls, session)\n if 'POST' in optionsdict[tmpcls]['OPTIONS']:\n return True\n elif 'PUT' in optionsdict[tmpcls]['OPTIONS']:\n return True\n elif 'PATCH' in optionsdict[tmpcls]['OPTIONS']:\n return True\n elif 'DELETE' in optionsdict[tmpcls]['OPTIONS']:\n return True\n return False",
"def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False",
"def has_object_permission(self, request, view, obj):\n if request.user.is_manager or request.user == obj.family or obj.family is None:\n return True\n return False",
"def __contains__(self, obj):\n\n if isinstance(obj, str):\n return obj in FileStorage.__objects\n return key(type(obj), obj.id) in FileStorage.__objects",
"def supports_protocol(self, obj, protocol):\n\n return self.adapt(obj, protocol, None) is not None",
"def has_object_permission(self, request, view, obj):\n if request.user == obj.family or obj.family is None:\n return True\n return False",
"def _is_this_color(cls, obj: Any) -> bool:\n\n return type(obj) is cls",
"def match(self, obj):\n\n return self._match(self.rule, obj)"
] | [
"0.8256742",
"0.60175085",
"0.57257104",
"0.5673974",
"0.56703717",
"0.5573148",
"0.55559886",
"0.5523081",
"0.5520244",
"0.5481249",
"0.541949",
"0.53592515",
"0.52480066",
"0.52480066",
"0.5242481",
"0.5224062",
"0.5213584",
"0.5182861",
"0.517725",
"0.5157547",
"0.5157547",
"0.51519185",
"0.5150543",
"0.5131692",
"0.51230466",
"0.51059175",
"0.5096602",
"0.5089693",
"0.5063939",
"0.50540537"
] | 0.86104596 | 0 |
Initializes the controller with the view & model. | def __init__(self, view, model):
self.view = view
self.view.set_controller(self)
self.model = model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()",
"def __init__(self, controller):\n self._controller = controller",
"def on_init(self):\n self.controller = gameController.Controller()",
"def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()",
"def initialize_model(self):\n pass",
"def initialize(self, model):\n pass",
"def initialize(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view",
"def setUp(self):\n self.theView = View()",
"def __init__(self, controller):\n super().__init__(controller)\n\n # The hovered input when entering this View.\n self.first_inp = \"s\"\n\n # Initialize selected variable.\n self.selected = None\n\n # Make background graphics.\n self.make_background_graphics()\n\n # Make Buttons.\n self.make_buttons()\n\n # Make the information box. This explains each Button.\n self.make_info_box()\n\n # Initializes popup.\n self.make_popup()\n\n # Map of input to functions.\n enter = self.graphics.ENTER_KEY\n self.controls = {\n # Pressing \"q\" will go back to the main menu.\n \"q\": lambda: Action(\"goto main menu view\", []),\n\n # Movement keys.\n \"w\": lambda: self.move_cursor(Direction.U),\n \"a\": lambda: self.move_cursor(Direction.L),\n \"s\": lambda: self.move_cursor(Direction.D),\n \"d\": lambda: self.move_cursor(Direction.R),\n\n # Repeat the last valid input.\n enter: self.repeat_last_valid_input,\n\n # Click the selected UIElement.\n \"m\": self.click\n }",
"def prepareController(self):\n pass",
"def init_view(self):\n self.view_map = self.ctx.clientmap",
"def init_model(self):\n pass",
"def __init__(self):\n self.model = Model()\n self.view = View()\n\n self.server = Server(msg_handler=self.msg_handler,\n err_handler=self.err_handler,\n conn_handler=self.conn_handler,\n quit_handler=self.quit_handler)\n self.server.start()\n\n self.view.frame.onclose(self.server.close)",
"def __init__(self):\n self.model = gameModel.Model()\n self.view = gameView.View()",
"def __init__(self, model, view):\n self._model = model\n self._view = view\n\n self._connectModel()\n self._connectView()\n\n self.valid = True\n\n self._model.user = PipelineHelper.getCurrentUser()\n if self._model.user is None:\n DisplayMayaDialog.displayMayaDialog(\n 'Not logged',\n 'Please login in Fenix and try again.',\n severity=DisplayMayaDialog.SeverityTypes.CRITICAL\n )\n self.valid = False",
"def init_model(self, model_name, config=None):\n ModelDirectory.init_model(model_name, pipeline=self, config=config)\n return self\n #self._action_list.append({'name': INIT_MODEL_ID, 'model_name': model_name, 'config': config})\n #return self.append_action()",
"def __init__(self):\n self.game_model = ScrollerModel(1280, 480)\n self.view = ScrollerView(self.game_model, 1280, 480)",
"def __init__(self, model, **kwargs):\n self.model = model\n\n super(RecordView, self).__init__(**kwargs)",
"def initView(self):\n return {}",
"def test_init(self):\n self.view.__init__()\n self.assertIsInstance(self.view.questionnaire, Questionnaire)\n self.assertEqual(self.view.questionnaire, self.questionnaire)",
"def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()",
"def initialize(self) -> None:\n self.model = load(self.path)",
"def __init__(self, model):\n self._model = model",
"def __init__(self):\n self.view = GuiView(self)\n return",
"def __init__(self):\n self.model = None",
"def __init__(self):\n self.model = None",
"def initialize_scene(self):\n if Time.now() - self.initial_time > 0.45 and self.should_initialize:\n self.should_initialize = False\n self.background_particle_controller = BackgroundParticlesController()\n self.player_controller = PlayerController()\n self.obstacle_controller_wrapper = ObstacleControllerWrapper()\n self.items_controller = ItemsControllerWrapper()\n self.score_controller = ScoreController()",
"def __init__(self, view, interactor, model, observer):\n\n # Set view and interactor to couple and process events\n self.view = view\n self.interactor= interactor\n\n # Set model and observer to couple and process events\n self.model = model\n self.observer = observer",
"def __init__(self, *args, **kwargs):\n\n\t\tassert ltrace(TRACE_USERS, '> UsersController.__init__(%s)' %\n\t\t\tUsersController.init_ok)\n\n\t\tif UsersController.init_ok:\n\t\t\treturn\n\n\t\tsuper(self.__class__, self).__init__(name='users')\n\n\t\tUsersController.init_ok = True\n\t\tassert ltrace(TRACE_USERS, '< UsersController.__init__(%s)' %\n\t\t\tUsersController.init_ok)",
"def __init__(self):\n\n self.controller = None\n\n self.game_running = False\n self.menu_view_running = False\n self.end_game_running = False"
] | [
"0.7051387",
"0.68626446",
"0.676445",
"0.67608416",
"0.6730788",
"0.66994655",
"0.66494745",
"0.662967",
"0.66260207",
"0.65950286",
"0.6576748",
"0.654844",
"0.6439896",
"0.64361614",
"0.6435332",
"0.6284416",
"0.62685275",
"0.62328696",
"0.6160774",
"0.6102789",
"0.6099398",
"0.6098826",
"0.60700434",
"0.5979176",
"0.5960245",
"0.5960245",
"0.5936289",
"0.5921174",
"0.5875528",
"0.5874308"
] | 0.80433726 | 0 |
Relationships for user Returns a list of friends, people you are following, and followers, people that are following you but you are not following. | def relationships_for_user(self, user):
# List of users who follow "user".
followers_users_list = [ relationship.from_user for relationship in self.filter(to_user=user) ]
# List of relationships for users "user" follows, who also follow "user".
friend_list = self.filter(from_user=user, to_user__in=followers_users_list)
# List of users "user" is friends with.
friends_users_list = [ relationship.to_user for relationship in friend_list ]
# List of relatiosnhips for users who follow "user", but "user" does not follow back.
follower_list = self.filter(to_user=user).exclude(from_user__in=friends_users_list)
# List of relationships for users "user" follows, but do not follow "user" back.
following_list = self.filter(from_user=user).exclude(to_user__in=friends_users_list)
relationships = {
'friends': friend_list,
'followers': follower_list,
'following': following_list,
}
return relationships | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def follows(self):\r\n return relationships.Follows(self)",
"def follows(self):\n return relationships.Follows(self)",
"def user_following(username, max: int = None):\n for user_dict in client.user_relationships(username, max=max, type=\"following\"):\n print(json.dumps(user_dict))",
"def get_user_relationships(user):\n transactions = {}\n\n for transaction in Transaction.ready.filter(Q(created_by=user) | Q(sent_to=user)):\n other_user_handle = transaction.created_by.handle\n\n if user == transaction.created_by:\n other_user_handle = transaction.sent_to.handle\n\n if other_user_handle not in transactions:\n transactions[other_user_handle] = []\n\n transactions[other_user_handle].append(transaction)\n\n return transactions",
"def followed_by(self):\r\n return relationships.FollowedBy(self)",
"def get_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})-[:FOLLOWS]->(follower) RETURN follower.full_name AS full_name, follower.email AS email, follower.profile_image AS profile_image\n \"\"\"\n return tx.run(query)",
"def user_relationships(id, related_collection_name):\n response = None\n if request.method == 'POST':\n response = User.create_relationships(id, related_collection_name, eval(request.data))\n elif request.method == 'PATCH':\n response = User.update_relationship(id, related_collection_name, json.loads(request.data))\n elif request.method == 'DELETE':\n response = User.disconnect_relationship(id, related_collection_name, eval(request.data))\n elif request.method == 'GET':\n response = User.get_relationship(request.args, id, related_collection_name)\n return response",
"def followed_by(self):\n return relationships.FollowedBy(self)",
"def get_friends(self):\n edges = DirectedUserToUserEdge.all().filter(\n 'owner_user_id =', self.key().id()).run()\n return db.get([db.Key.from_path('User', edge.friend_user_id) for edge in\n edges])",
"def resolve_following(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(follower=user.id, pending=False)\n return [follow.following for follow in follow_request]",
"def doesfollow(user):\n return jsonify({\n 'follows': isFollowed(g.user,user)\n })",
"def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}",
"def user_following_gql(self, user_id: int, amount: int = 0) -> list:\n user_id = int(user_id)\n end_cursor = None\n users = []\n variables = {\n \"id\": user_id,\n \"include_reel\": True,\n \"fetch_mutual\": False,\n \"first\": 24\n }\n while True:\n if end_cursor:\n variables[\"after\"] = end_cursor\n data = self.public_graphql_request(\n variables, query_hash=\"e7e2f4da4b02303f74f0841279e52d76\"\n )\n if not data[\"user\"] and not users:\n raise UserNotFound(user_id=user_id, **data)\n page_info = json_value(\n data, \"user\", \"edge_follow\", \"page_info\", default={}\n )\n edges = json_value(\n data, \"user\", \"edge_follow\", \"edges\", default=[]\n )\n for edge in edges:\n users.append(extract_user_short(edge[\"node\"]))\n end_cursor = page_info.get(\"end_cursor\")\n if not page_info.get(\"has_next_page\") or not end_cursor:\n break\n if amount and len(users) >= amount:\n break\n # time.sleep(sleep)\n if amount:\n users = users[:amount]\n return users",
"def follow_user(cls, user, following):\r\n pass",
"def get_following_by_user(request):\n response, status_code = get_followings(request)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n serialize_data = FollowingSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=status_code, safe=False)",
"def get_posts_of_followings_of_a_user(tx: Transaction, email: str) -> BoltStatementResult:\n query = f\"\"\"\n MATCH (:Person {{email: '{email}'}})\n -[:FOLLOWS]->(user:Person)\n -[:POSTED]->(post:Post)\n RETURN DISTINCT {{content:post.content, modified:post.modified, created:post.created, uuid:post.uuid, user_email:user.email}} AS posts\"\"\"\n return tx.run(query)",
"def follow_user(cls, user, following):\n pass",
"def relationships(self):",
"def get_followers(user_id):\n return list(Backward.objects.filter(destination_id=user_id) \\\n .values_list('source_id', flat=True))",
"def user_followers(username, max: int = None):\n for user_dict in client.user_relationships(username, max=max, type=\"followers\"):\n print(json.dumps(user_dict))",
"def get_followings(request):\n user_id = request.GET.get(\"user_id\")\n if not user_id:\n return {\"error\": \"User Id should be provided\"}, 400\n following_data = Following.objects.filter(user_profile_id=user_id, is_active=True).first()\n return following_data, 200",
"def get_friends(user, data):\n setA = list(\n data.loc[data.user == user].user_friend_list.values)\n setB = list(\n data.loc[data.user_friend_list == user].user\n .values)\n friends = list(set(set(setA).union(setB)))\n return friends",
"def get_following(user_id):\n return list(Forward.objects.filter(source_id=user_id).values_list(\n 'destination_id', flat=True))",
"def follow(user, people):\n api = get_api(user)\n current_screen_name = api.VerifyCredentials().GetScreenName()\n\n # don't let a user follow themselves\n screen_names = [person.twitter_screen_name for person in people]\n if current_screen_name in screen_names: screen_names.remove(current_screen_name)\n\n followed = []\n not_followed = []\n\n for screen_name in screen_names:\n try:\n api.CreateFriendship(screen_name=screen_name)\n followed.append(screen_name)\n except twitter.TwitterError:\n not_followed.append(screen_name)\n\n return 'followed %s people' % len(followed)",
"def resolve_followers(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(following=user.id, pending=False)\n return [follow.follower for follow in follow_request]",
"def get_friends(user_id):\n return list(set(get_following(user_id)) &\n set(get_followers(user_id)))",
"def _user_following_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followings: List[Dict[str, Any]] = self.api.getTotalFollowings(uid)\n user_followings = list([_InstagramUser(x) for x in followings])\n return user_followings",
"def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))",
"def get_people_followed(user_id, ignore_exceptions=False, return_type=list, start_index=0):\n def _get_followed(_user_id, _ignore_exceptions=False, _start_index=0, _count=100):\n \"\"\"This function performs the API call to get the users followed from a single GET request.\n\n .. versionchanged:: 3.1.0\n Renamed the function to only have a single underscore prefix and added parenthesis to the exception\n classes.\n\n :param _user_id: The User ID for the user against which to check\n :type _user_id: int\n :param _ignore_exceptions: Determines whether non-200 responses should raise an exception (Default: ``False``)\n :type _ignore_exceptions: bool\n :param _start_index: The startIndex for the API call (Default: ``0``)\n :type _start_index: int\n :param _count: The maximum number of results to return in the API call (Default: ``100``)\n :type _count: int\n :returns: The data from the @following sub-endpoint in JSON format\n :raises: :py:exc:`khorosjx.errors.exceptions.UserQueryError`,\n :py:exc:`khorosjx.errors.exceptions.UserNotFoundError`,\n \"\"\"\n _following_url = f\"{base_url}/people/{_user_id}/@following?count={_count}\" + \\\n f\"&startIndex={_start_index}\"\n _response = core.get_request_with_retries(_following_url)\n if _response.status_code == 200:\n _following_data = _response.json()\n else:\n if _ignore_exceptions:\n _empty_response = {\"list\": []}\n _following_data = core_utils.convert_dict_to_json(_empty_response)\n else:\n if _response.status_code == 404:\n raise errors.exceptions.UserNotFoundError()\n else:\n raise errors.exceptions.UserQueryError()\n return _following_data\n\n # Verify that the core connection has been established\n verify_core_connection()\n\n # Perform the initial API call\n people_followed = []\n following_data = _get_followed(user_id, ignore_exceptions)\n\n # Continue looping through the data from subsequent calls until an empty list is found in the JSON response\n while following_data.get('list'):\n for user_followed in following_data.get('list'):\n # Append reach User ID to the list\n people_followed.append(user_followed.get('id'))\n\n # Perform the next API call for the next 100 users\n start_index += 100\n following_data = _get_followed(user_id, ignore_exceptions, start_index)\n\n # Convert the list to a comma-separated string and return the value\n if return_type == str:\n people_followed = ','.join(people_followed)\n elif return_type == tuple:\n people_followed = tuple(people_followed)\n return people_followed",
"def get(self, request):\n # Retrieve the user from the request if they have been authenticated\n current_user = request.user\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n # Return the follower details for the current user\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['MY_FOLLOWERS_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )"
] | [
"0.6660938",
"0.65419674",
"0.6477425",
"0.6456748",
"0.63984066",
"0.62829083",
"0.627599",
"0.6252226",
"0.62037635",
"0.61852443",
"0.61262274",
"0.6106134",
"0.60990894",
"0.6072475",
"0.6068187",
"0.6049861",
"0.6044976",
"0.60012174",
"0.5986694",
"0.59622574",
"0.59373134",
"0.59001017",
"0.5897292",
"0.58821684",
"0.5867463",
"0.5854855",
"0.5853938",
"0.5841044",
"0.58344764",
"0.5782839"
] | 0.82885265 | 0 |
Checks if flags is in args, and if not it adds the flag to args. | def override_if_not_in_args(flag, argument, args):
if flag not in args:
args.extend([flag, argument]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_flags(args: Sequence[str]) -> Dict[str, bool]:\n flags = {}\n for arg in args:\n if arg.startswith(FLAG_MARKER):\n flag_name = arg[len(FLAG_MARKER):]\n if flag_name and flag_name not in OMIT_FLAGS:\n flags[flag_name] = True\n else:\n break # Ignore flags after initial CLI call\n return flags",
"def try_add_flag(args, compiler, flag, ext=None):\n if try_compile(compiler, flags=args+[flag], ext=ext):\n args.append(flag)",
"def parse_xcodebuild_flags(args):\n result = {}\n key = None\n for arg in args:\n if arg.startswith('-'):\n if arg in INTERESTING_FLAGS:\n key = arg\n elif key is not None:\n result[key] = arg\n key = None\n\n return result",
"def parse_xcodebuild_flags(args):\n result = {}\n key = None\n for arg in args:\n if arg.startswith('-'):\n if arg in INTERESTING_FLAGS:\n key = arg\n elif key is not None:\n result[key] = arg\n key = None\n\n return result",
"def flag(x):\n if x in sys.argv:\n sys.argv.remove(x)\n return True\n else:\n return False",
"def get_flags(self, args):\n\n\t\tpositional = []\n\n\t\tfor argument in args:\n\t\t\t# A flag is an instance of a subclass of\n\t\t\t# flags.Flags if it was passed alone\n\t\t\tif isinstance(argument, flags.Flags):\n\t\t\t\tpositional.append(argument)\n\n\t\t\t# or is an integer if it was (bitwise) OR'd\n\t\t\t# with another flag (a \"flag combination\")\n\t\t\telif isinstance(argument, int):\n\t\t\t\tif argument < 0 or argument >= flags.LIMIT:\n\t\t\t\t\traise errors.FlagError(\"Flag value '{0}' is out of range \"\n\t\t\t\t\t\t\t\t\t\t \"!\".format(argument))\n\t\t\t\tpositional.append(argument)\n\n\t\t\t# Dictionaries store 'always'-arguments\n\t\t\telif isinstance(argument, dict):\n\t\t\t\tfor key, value in argument.items():\n\t\t\t\t\t# Simple 'always'-argument where one string\n\t\t\t\t\t# is mapped to one formatting flag-combination\n\t\t\t\t\tif isinstance(key, str):\n\t\t\t\t\t\tself.always[key] = value\n\n\t\t\t\t\t# Complex 'always'-argument with a\n\t\t\t\t\t# tuple containing strings, each with the same\n\t\t\t\t\t# flag-combination (same value)\n\t\t\t\t\telif isinstance(key, tuple):\n\t\t\t\t\t\tfor i in key:\n\t\t\t\t\t\t\tself.always[i] = value\n\t\t\t\t\telse:\n\t\t\t\t\t\traise errors.EcstasyError(\"Key '{0}' in dictionary \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"argument passed is neither \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"a string nor a tuple \"\n\t\t\t\t\t\t\t\t\t\t\t\t \"of strings!\".format(key))\n\n\t\t\telif isinstance(argument, collections.Iterable):\n\t\t\t\tpositional += self.get_flags(argument)\n\n\t\t\telse:\n\t\t\t\traise errors.EcstasyError(\"Argument '{0}' is neither a flag, a \"\n\t\t\t\t\t\t\t\t\t\t \"(bitwise) OR'd flag-combination, a \"\n\t\t\t\t\t\t\t\t\t\t \"dictionary nor an iterable of \"\n\t\t\t\t\t\t\t\t\t\t \"positional arguments \"\n\t\t\t\t\t\t\t\t\t\t \"!\".format(argument))\n\n\t\treturn positional",
"def add_arg(self, *args: Any, **kwargs: Any) -> None:\n # Normalize\n if len(args) == 1 and isinstance(args[0], Argument):\n arg = args[0]\n else:\n arg = Argument(*args, **kwargs)\n # Uniqueness constraint: no name collisions\n for name in arg.names:\n if name in self.args:\n msg = \"Tried to add an argument named {!r} but one already exists!\" # noqa\n raise ValueError(msg.format(name))\n # First name used as \"main\" name for purposes of aliasing\n main = arg.names[0] # NOT arg.name\n self.args[main] = arg\n # Note positionals in distinct, ordered list attribute\n if arg.positional:\n self.positional_args.append(arg)\n # Add names & nicknames to flags, args\n self.flags[to_flag(main)] = arg\n for name in arg.nicknames:\n self.args.alias(name, to=main)\n self.flags.alias(to_flag(name), to=to_flag(main))\n # Add attr_name to args, but not flags\n if arg.attr_name:\n self.args.alias(arg.attr_name, to=main)\n # Add to inverse_flags if required\n if arg.kind == bool and arg.default is True:\n # Invert the 'main' flag name here, which will be a dashed version\n # of the primary argument name if underscore-to-dash transformation\n # occurred.\n inverse_name = to_flag(\"no-{}\".format(main))\n self.inverse_flags[inverse_name] = to_flag(main)",
"def read_flags():\n return flag_args",
"def _is_args_added(parser: CoreParser, custom_args: List[str]) -> bool:\n namespace, _ = parser.parser.parse_known_args()\n namespace_args = vars(namespace).keys()\n\n for arg in custom_args:\n if arg not in namespace_args:\n return False\n\n return True",
"def checkargs_wprefresh(cmdargs):\n flags = []\n # Collect Static for wprefresh\n yesforced = False\n noforced = False\n restartonly = False\n if cmdargs:\n for yesarg in ('-c', '--collect'):\n if yesarg in cmdargs:\n yesforced = True\n break\n for noarg in ('-C', '--nocollect'):\n if noarg in cmdargs:\n noforced = True\n break\n if yesforced and noforced:\n print('\\nBoth \\'--collect\\' and \\'--nocollect\\' '\n 'args used, this won\\'t work.')\n sys.exit(1)\n for restartarg in ('-r', '--restart'):\n if restartarg in cmdargs:\n restartonly = True\n break\n\n if (not yesforced) and (not noforced) and (not restartonly):\n collectstatic = input('Would you like to collect static files? '\n '(yes/no): ')\n if collectstatic.lower().startswith('y'):\n flags.append('--collect')\n else:\n flags.append('--nocollect')\n # Get live/skip args\n liveforced = False\n skipforced = False\n if cmdargs:\n # live args used?\n for livearg in ('-l', '--live'):\n if livearg in cmdargs:\n liveforced = True\n break\n # skip args used?\n for skiparg in ('-R', '--norestart'):\n if skiparg in cmdargs:\n skipforced = True\n break\n\n # Ambiguos args.\n if skipforced and restartonly:\n print('\\nBoth \\'--restart\\' and \\'--norestart\\' args used. '\n 'this won\\'t work.')\n sys.exit(1)\n\n if skipforced and liveforced:\n print('\\nBoth \\'--live\\' and \\'--norestart\\' args used, '\n 'this won\\'t work.')\n sys.exit(1)\n\n needlive = (not liveforced) and (not skipforced)\n if (not is_test_site()) and (needlive):\n livesite = confirm('*** This is the LIVE site! ***\\n'\n 'Would you like to restart apache?')\n if livesite:\n flags.append('--live')\n else:\n flags.append('--norestart')\n return flags",
"def parse_args(self, parse_args_request: ParseArgsRequest) -> OptionValueContainer:\n\n flag_value_map = parse_args_request.flag_value_map\n namespace = parse_args_request.namespace\n\n mutex_map: DefaultDict[str, list[str]] = defaultdict(list)\n for args, kwargs in self._option_registrations:\n self._validate(args, kwargs)\n dest = self.parse_dest(*args, **kwargs)\n\n # Compute the values provided on the command line for this option. Note that there may be\n # multiple values, for any combination of the following reasons:\n # - The user used the same flag multiple times.\n # - The user specified a boolean flag (--foo) and its inverse (--no-foo).\n # - The option has multiple names, and the user used more than one of them.\n #\n # We also check if the option is deprecated, but we only do so if the option is explicitly\n # specified as a command-line flag, so we don't spam users with deprecated option values\n # specified in config, which isn't something they control.\n implicit_value = kwargs.get(\"implicit_value\")\n if implicit_value is None and self.is_bool(kwargs):\n implicit_value = True # Allows --foo to mean --foo=true.\n\n flag_vals: list[int | float | bool | str] = []\n\n def add_flag_val(v: int | float | bool | str | None) -> None:\n if v is None:\n if implicit_value is None:\n raise ParseError(\n f\"Missing value for command line flag {arg} in {self._scope_str()}\"\n )\n flag_vals.append(implicit_value)\n else:\n flag_vals.append(v)\n\n for arg in args:\n # If the user specified --no-foo on the cmd line, treat it as if the user specified\n # --foo, but with the inverse value.\n if self.is_bool(kwargs):\n inverse_arg = self._inverse_arg(arg)\n if inverse_arg in flag_value_map:\n flag_value_map[arg] = [self._invert(v) for v in flag_value_map[inverse_arg]]\n implicit_value = self._invert(implicit_value)\n del flag_value_map[inverse_arg]\n\n if arg in flag_value_map:\n for v in flag_value_map[arg]:\n add_flag_val(v)\n del flag_value_map[arg]\n\n # Get the value for this option, falling back to defaults as needed.\n try:\n value_history = self._compute_value(\n dest, kwargs, flag_vals, parse_args_request.passthrough_args\n )\n self._history[dest] = value_history\n val = value_history.final_value\n except ParseError as e:\n # Reraise a new exception with context on the option being processed at the time of error.\n # Note that other exception types can be raised here that are caught by ParseError (e.g.\n # BooleanConversionError), hence we reference the original exception type as type(e).\n args_str = \", \".join(args)\n raise type(e)(\n softwrap(\n f\"\"\"\n Error computing value for {args_str} in {self._scope_str()} (may also be\n from PANTS_* environment variables). Caused by:\n\n {e}\n \"\"\"\n )\n )\n\n # If the option is explicitly given, check deprecation and mutual exclusion.\n if val.rank > Rank.HARDCODED:\n self._check_deprecated(dest, kwargs)\n mutex_dest = kwargs.get(\"mutually_exclusive_group\")\n mutex_map_key = mutex_dest or dest\n mutex_map[mutex_map_key].append(dest)\n if len(mutex_map[mutex_map_key]) > 1:\n raise MutuallyExclusiveOptionError(\n softwrap(\n f\"\"\"\n Can only provide one of these mutually exclusive options in\n {self._scope_str()}, but multiple given:\n {', '.join(mutex_map[mutex_map_key])}\n \"\"\"\n )\n )\n\n setattr(namespace, dest, val)\n\n if not parse_args_request.allow_unknown_flags and flag_value_map:\n # There were unconsumed flags.\n raise UnknownFlagsError(tuple(flag_value_map.keys()), self.scope)\n return namespace.build()",
"def test_addFlags(self):\n self._flagsTest('addFlags', b'+FLAGS')",
"def add_args(parser):\n parser.add_argument(\n \"--zero-infinity\", action=\"store_true\", help=\"zero inf loss\"\n )\n try:\n parser.add_argument(\n \"--remove-bpe\",\n \"--post-process\",\n default=\"letter\",\n help=\"remove BPE tokens before scoring (can be set to sentencepiece, letter, and more)\",\n )\n except:\n pass # this option might have been added from eval args ",
"def take_action_on_flags(self, *args, **kwargs):\r\n pass",
"def add_custom_argument(self, *name_or_flags, **options):\n self._specific_args_group.add_argument(*name_or_flags, **options)",
"def get_flags(args):\r\n\r\n flags = 0\r\n\r\n if args.regexfilepattern is not None:\r\n flags |= pygrep.FILE_REGEX_MATCH\r\n\r\n if not args.regexp:\r\n flags |= pygrep.LITERAL\r\n elif args.dotall:\r\n flags |= pygrep.DOTALL\r\n\r\n if args.ignore_case:\r\n flags |= pygrep.IGNORECASE\r\n\r\n if args.recursive:\r\n flags |= pygrep.RECURSIVE\r\n\r\n if args.regexdirpattern:\r\n flags |= pygrep.DIR_REGEX_MATCH\r\n\r\n return flags",
"def normalize_flags(argv: List[str]) -> List[str]:\n bolean_flag_patern = re.compile(r'--[\\w_]+=(true|false)')\n\n def _normalize_flag(arg: str) -> str:\n if not bolean_flag_patern.match(arg):\n return arg\n if arg.endswith('=true'):\n return arg[: -len('=true')] # `--flag=true` -> `--flag`\n elif arg.endswith('=false'):\n # `--flag=false` -> `--noflag`\n return '--no' + arg[len('--') : -len('=false')]\n else:\n raise AssertionError(f'Unrecognized arg: {arg}')\n\n return [_normalize_flag(a) for a in argv]",
"def add_args(parser, args):\n for arg in args:\n parser.add_argument('--' + arg, **global_args_dict[arg])\n return parser",
"def parse(self, args, config_uri=None):\n unparsed = []\n\n skip_parse = False\n\n if config_uri is not None:\n # Add flags from config_uri. Unknown flags are accumulated in unparsed.\n unparsed.extend(self._dict_to_args(self._parse_from_config_uri(config_uri)))\n\n for arg in args:\n if skip_parse:\n unparsed.append(arg)\n continue\n\n if arg == '--':\n skip_parse = True\n continue\n\n match = RE_FLAG.match(arg)\n if match is None:\n unparsed.append(arg)\n continue\n\n key = match.group(1).replace('-', '_')\n value = match.group(2)\n\n flag_desc = self._defs.get(key, None)\n if flag_desc is not None:\n flag_desc.parse(value)\n elif key == self._CONFIG_FLAG.name:\n # --config is a special case to trigger loading parse flag values\n # from a JSON dictionary in a uri.\n #\n # It is handled outside of self._defs for 2 reasons:\n # - Unknown flags from the configuration are accumulated in the local variable\n # unparsed.\n # - Users may override and handle --config themselves if they choose.\n\n # Add flags from uri. Unknown flags are accumulated in unparsed.\n unparsed.extend(self._dict_to_args(self._parse_from_config_uri(value)))\n else:\n unparsed.append(arg)\n\n self._unparsed = tuple(unparsed)\n return True",
"def _add_argument(self, args=''):\n\n sys.argv += args.split(' ')",
"def cli(arg_dict):\n\n keys = list(arg_dict.keys())\n for key in keys:\n v = arg_dict[key]\n usr_args_ls = sys.argv\n for ind in range(len(usr_args_ls)):\n val = usr_args_ls[ind]\n if val == \"-\" + key[0] or val == \"--\" + key:\n if type(v).__name__ == \"bool\":\n v = not v\n else:\n v = usr_args_ls[ind + 1]\n\n arg_dict[key] = v",
"def _add_flag(self, mbox, msgset, flag):\n self.select_mailbox(mbox, False)\n self._cmd(\"STORE\", msgset, \"+FLAGS\", flag)",
"def AddFlags(arg_parser):\n common_flags.DefineAppsDomainFlagWithDefault(arg_parser)\n common_flags.DefineVerboseFlagWithDefaultFalse(arg_parser)\n\n arg_parser.add_argument(\n '--long_list', '-l', action='store_true', default=False,\n help='Show more columns of output.')\n arg_parser.add_argument(\n '--plus_domains', '-p', action='store_true', default=False,\n help='Show output from Google Plus Domains Profile.')\n arg_parser.add_argument(\n '--user_email', '-u', required=True,\n help='User email address [REQUIRED].',\n type=validators.EmailValidatorType())",
"def test_addFlagsSilently(self):\n self._flagsSilentlyTest('addFlags', b'+FLAGS.SILENT')",
"def _ParseFlags(argv=sys.argv):\n try:\n argv = FLAGS(argv)\n logging.debug('Parsed command line flags: {}'.format(FLAGS.input_file))\n except flags.Error as e:\n logging.error(e)\n sys.exit(1)",
"def argument(self, *name_or_flags, **kwargs):\n return self.parser.add_argument(*name_or_flags, **kwargs)",
"def with_arg(self, flag, value=\"\"):\n self._args[flag] = value\n return self",
"def Parse(self, args):\n unparsed = []\n\n skip_parse = False\n\n for arg in args:\n if arg == '--':\n skip_parse = True\n continue\n\n if skip_parse:\n unparsed.append(arg)\n continue\n\n match = RE_FLAG.match(arg)\n if match is None:\n unparsed.append(arg)\n continue\n\n key = match.group(1)\n value = match.group(2)\n\n if key not in self._defs:\n unparsed.append(arg)\n continue\n\n self._defs[key].Parse(value)\n\n self._unparsed = tuple(unparsed)\n return True",
"def _check_args(self, args_):\n\n pass",
"def test_options2args():\n args, kwargs = util.options2args([\"--arg1\", \"-arg2\", \"--arg3=10\"])\n assert all([\"--arg1\" in args, \"-arg2\" in args, \"arg3\" in kwargs.keys()])"
] | [
"0.6677477",
"0.6645882",
"0.60863894",
"0.60863894",
"0.60808325",
"0.60373974",
"0.60074294",
"0.59921616",
"0.59623545",
"0.5952936",
"0.5949942",
"0.5934693",
"0.5869397",
"0.58683527",
"0.5840151",
"0.5837697",
"0.5830563",
"0.5817184",
"0.58051646",
"0.5788879",
"0.5758011",
"0.5743646",
"0.5735064",
"0.5732392",
"0.5726702",
"0.57166374",
"0.5703944",
"0.5683265",
"0.56687033",
"0.5636021"
] | 0.76882064 | 0 |
Builds the evaluation VAEGAN graph. | def build_eval_graph(self, data_dir, batch_size):
return self.build_graph(data_dir, batch_size, mode=EVAL) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()",
"def build_eval_graph(self):\n saver = tf.train.import_meta_graph('./model_check/embedding.ckpt-15.meta')\n saver.restore(self.sess, tf.train.latest_checkpoint('./model_check'))\n emb = self.sess.run('embeddings:0')\n \n nemb = tf.nn.l2_normalize(emb, 1)\n \n analogy_a = tf.placeholder(dtype=tf.int32)\n analogy_b = tf.placeholder(dtype=tf.int32)\n analogy_c = tf.placeholder(dtype=tf.int32)\n\n a_emb = tf.gather(nemb, analogy_a)\n b_emb = tf.gather(nemb, analogy_b)\n c_emb = tf.gather(nemb, analogy_c)\n\n target = c_emb + (a_emb - b_emb)\n dist = tf.matmul(target, nemb, transpose_b=True)\n _, pred_idx = tf.nn.top_k(dist, 4)\n\n nearby_word = tf.placeholder(dtype=tf.int32)\n nearby_emb = tf.gather(nemb, nearby_word)\n nearyby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)\n nearby_val, nearby_idx = tf.nn.top_k(nearyby_dist, min(1000, nemb.shape[0]))\n\n\n self._analogy_a = analogy_a\n self._analogy_b = analogy_b\n self._analogy_c = analogy_c\n self._analogy_pred_idx = pred_idx\n self._nearby_word = nearby_word\n self._nearby_val = nearby_val\n self._nearby_idx = nearby_idx\n\n tf.global_variables_initializer().run()\n self.saver = tf.train.Saver()",
"def _build_and_compile_vae(self):\n z_dimension = int(self.n_dims//2)+1\n\n self.encoder, inputs, z_mean, z_var = self._encoder(z_dimension, self.mcd)\n\n self.decoder, outputs = self._decoder(inputs, z_dimension, self.mcd)\n\n self.model = Model(inputs, outputs, name=\"VAE IMPUTER\")\n\n loss = self.vae_loss(self.n_dims, z_mean, z_var)\n\n self.model.compile(optimizer=self.optimizer, loss=loss)",
"def _build_graph(self):\n pass",
"def build_graph(self):\n pass",
"def build_inference_graph(self):\n self.build_train_graph()",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def _build_graph(self):\n self._setup_placeholders()\n self._embed()\n self.p_emb = tf.concat([self.p_emb, tf.expand_dims(self.em, -1)], -1)\n self._encode()\n self._match()\n self._fuse()\n\n with tf.variable_scope('boundary'):\n self._decode()\n with tf.variable_scope('content'):\n self._content()\n with tf.variable_scope('verif'):\n self._verify()\n\n self._compute_loss()",
"def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.gamma*self.cluster_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)+self.regularizer_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def _build_graph(self, train_data, test_data):\n\n # Network for testing / evaluation\n # As before, we define placeholders for the input. These here now can be fed\n # directly, e.g. with a feed_dict created by _evaluation_food\n self.expert_outputs = {m: test_pipeline(test_data[m], self.config['prefixes'][m],\n **self.config)\n for m in self.modalities}\n self.prediction = self._fusion(self.expert_outputs)",
"def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()",
"def build(self): \n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.factorization_layer = Factorization(self.args, self.vocab_size)\n self.cluster_layer = Clustering(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.factorization_layer()+self.gamma*self.cluster_layer(self.factorization_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n \n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n \n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss, global_step = self.batch)\n \n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def _build_computation_graph(self):\n raise NotImplementedError",
"def build_graph(self):\n raise NotImplementedError",
"def build_eval_graph(self):\n # Eval graph\n opts = self._options\n\n # Each analogy task is to predict the 4th word (d) given three\n # words: a, b, c. E.g., a=italy, b=rome, c=france, we should\n # predict d=paris.\n\n # The eval feeds three vectors of word ids for a, b, c, each of\n # which is of size N, where N is the number of analogies we want to\n # evaluate in one batch.\n analogy_a = tf.placeholder(dtype=tf.int32) # [N]\n analogy_b = tf.placeholder(dtype=tf.int32) # [N]\n analogy_c = tf.placeholder(dtype=tf.int32) # [N]\n\n # Normalized word embeddings of shape [vocab_size, emb_dim].\n nemb = tf.nn.l2_normalize(self.embeddings, 1)\n\n # Each row of a_emb, b_emb, c_emb is a word's embedding vector.\n # They all have the shape [N, emb_dim]\n a_emb = tf.gather(nemb, analogy_a) # a's embs\n b_emb = tf.gather(nemb, analogy_b) # b's embs\n c_emb = tf.gather(nemb, analogy_c) # c's embs\n\n # We expect that d's embedding vectors on the unit hyper-sphere is\n # near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].\n target = c_emb + (b_emb - a_emb)\n\n # Compute cosine distance between each pair of target and vocab.\n # dist has shape [N, vocab_size].\n dist = tf.matmul(target, nemb, transpose_b=True)\n\n # For each question (row in dist), find the top 4 words.\n _, pred_idx = tf.nn.top_k(dist, 5)\n\n # Nodes for computing neighbors for a given word according to\n # their cosine distance.\n nearby_word = tf.placeholder(dtype=tf.int32) # word id\n nearby_emb = tf.gather(nemb, nearby_word)\n nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)\n nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,\n min(1000, opts.vocabulary_size))\n\n field_cates = tf.placeholder(dtype=tf.int32)\n field_embs = tf.gather(self.embeddings, field_cates)\n center_point = tf.reduce_mean(field_embs, 0)\n avg_distance = tf.reduce_mean(tf.sqrt(tf.reduce_sum(\n tf.pow(tf.sub(center_point, field_embs), 2), 1)), 0)\n\n self._avg_distance = avg_distance\n self._field_cates = field_cates\n # Nodes in the construct graph which are used by training and\n # evaluation to run/feed/fetch.\n self._analogy_a = analogy_a\n self._analogy_b = analogy_b\n self._analogy_c = analogy_c\n self._analogy_pred_idx = pred_idx\n self._nearby_word = nearby_word\n self._nearby_val = nearby_val\n self._nearby_idx = nearby_idx\n\n # Properly initialize all variables.\n # tf.initialize_all_variables().run()\n\n # self.saver = tf.train.Saver()",
"def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._passage_rank()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))",
"def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()+self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def build_graph(self, data_dir, batch_size, mode):\r\n tensors = GraphReferences()\r\n assert batch_size > 0\r\n self.batch_size = batch_size\r\n \r\n if mode in (TRAIN, EVAL):\r\n trainData = TFDataLoaderUtil(data_dir, 'train2014')\r\n trainQuestions = [value['question'] for key, value in trainData.dataset.items()]\r\n self.MAX_QUES_PAD_LEN = max(list(map(lambda x: len(self.tokenizer.split_sentence(x)), trainQuestions)))\r\n self.tokenizer.generate_vocabulary(trainQuestions)\r\n logging.info('Size of Question Vectors: %d', self.MAX_QUES_PAD_LEN)\r\n \r\n self.trainTFDataset = trainData.genTFDatasetObject(self.tokenizer, \r\n self.MAX_QUES_PAD_LEN, \r\n self.batch_size, \r\n self.NUM_PARALLEL_CALLS, \r\n self.BUFFER_SIZE)\r\n tensors.quesVec = self.trainTFDataset.get_next()[0]\r\n tensors.posImg = self.trainTFDataset.get_next()[1]\r\n tensors.negImg = self.trainTFDataset.get_next()[2]\r\n \r\n if mode is EVAL:\r\n evalData = TFDataLoaderUtil(data_dir, 'val2014') \r\n self.evalTFDataset = evalData.genTFDatasetObject(self.tokenizer, \r\n self.MAX_QUES_PAD_LEN, \r\n self.batch_size, \r\n self.NUM_PARALLEL_CALLS, \r\n self.BUFFER_SIZE)\r\n \r\n tensors.quesVec = self.evalTFDataset.get_next()[0]\r\n tensors.posImg = self.evalTFDataset.get_next()[1]\r\n tensors.negImg = self.evalTFDataset.get_next()[2] \r\n \r\n siamGAN = SiamGan()\r\n quesEmbeds = QuestionEmbedding().stackedLSTMWordEmbedding(\r\n vocab_size=self.VOCAB_SIZE, \r\n embed_size=self.WORD_EMBED_SIZE, \r\n INP_SIZE=self.QUES_SIZE)\r\n \r\n tensors.posImgEmbeds = siamGAN.getDiscriminator(self.IMG_SHAPE)(tensors.posImage)\r\n \r\n tensors.negImgEmbeds = siamGAN.getDiscriminator(self.IMG_SHAPE)(tensors.negImg)\r\n\r\n tensors.genImgdata = siamGAN.getGenerator(self.QUES_EMBED_SIZE)(quesEmbeds(self.quesVec))\r\n\r\n tensors.genImgEmbeds = siamGAN.getDiscriminator(self.IMG_SHAPE)(tensors.genImgdata)\r\n \r\n \r\n if mode in (EVAL, TRAIN):\r\n\r\n tensors.discLoss, tensors.genLoss = siamGAN.tripletLoss(\r\n tensors.genImgEmbeds, \r\n tensors.posImgEmbeds, \r\n tensors.negImgEmbeds)\r\n #regularize\r\n \r\n tf.summary.scalar('cost_generator', tensors.genLoss)\r\n tf.summary.scalar('cost_discriminator', tensors.discLoss)\r\n tf.summary.tensor_summary('disc_pos', tensors.posImgEmbeds)\r\n tf.summary.tensor_summary('disc_neg', tensors.negImgEmbeds)\r\n tf.summary.scalar('mean_disc_pos', tf.reduce_mean(tensors.posImgEmbeds))\r\n tf.summary.scalar('mean_disc_neg', tf.reduce_mean(tensors.negImgEmbeds))\r\n \r\n # Cost of Decoder/Generator is VAE network cost and cost of generator\r\n # being detected by the discriminator.\r\n tensors.global_step = tf.Variable(0, name='global_step', trainable=False)\r\n t_vars = tf.trainable_variables()\r\n \r\n with tf.variable_scope(tf.get_variable_scope(), reuse=None):\r\n generator_vars = [var for var in t_vars if var.name.startswith('gen_')]\r\n discriminator_vars = [\r\n var for var in t_vars if var.name.startswith('disc_')\r\n ]\r\n \r\n tensors.discOptimizer = tf.train.GradientDescentOptimizer(\r\n self.DISC_LR).minimize(\r\n tensors.discLoss,\r\n var_list = discriminator_vars,\r\n global_step = tensors.global_step)\r\n \r\n tensors.genOptimizer = tf.train.AdamOptimizer(\r\n learning_rate = self.GEN_LR, \r\n beta1 = self.GEN_BETA1, \r\n beta2 = self.GEN_BETA2).minimize(\r\n tensors.genLoss,\r\n var_list = generator_vars,\r\n global_step = tensors.global_step)\r\n\r\n return tensors",
"def build(self):\n self.computation_graph = tf.Graph()\n with self.computation_graph.as_default():\n\n self.walker_layer = DeepWalker(self.args, self.vocab_size, self.degrees)\n self.cluster_layer = Clustering(self.args)\n self.regularizer_layer = Regularization(self.args)\n\n self.gamma = tf.placeholder(\"float\")\n self.loss = self.walker_layer()\n self.loss = self.loss + self.gamma*self.cluster_layer(self.walker_layer)\n self.loss = self.loss + self.regularizer_layer(self.walker_layer)\n\n self.batch = tf.Variable(0)\n self.step = tf.placeholder(\"float\")\n\n self.learning_rate_new = tf.train.polynomial_decay(self.args.initial_learning_rate,\n self.batch,\n self.true_step_size,\n self.args.minimal_learning_rate,\n self.args.annealing_factor)\n\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_new).minimize(self.loss,\n global_step=self.batch)\n\n self.init = tf.global_variables_initializer()\n\n self.weights = overlap_generator(self.args, self.graph)",
"def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = total_params(tf.trainable_variables())\n self.logger.info('There are {} parameters in the model'.format(param_num))",
"def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n self._encode_back()\n self._encode()\n self._match()\n self._fuse()\n self._decode()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))",
"def build(self, graph, name_scopes, training):\n raise NotImplementedError('Must be overridden by concrete subclass')",
"def _build_graph(self):\n start_t = time.time()\n self.load_data()\n self.get_train_data()\n self.plot_data()\n self._setup_placeholders()\n self.lstm()\n self._compute_loss()\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))",
"def build_eval_graph(self, name, emb_ten, query_ten, knn=100, en_knn=True, en_mean=True):\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n # Look Up the words\n emb_lut = tf.nn.embedding_lookup(emb_ten, self.src_ph, name=\"EvalSrcLut\")\n query_lut = tf.nn.embedding_lookup(query_ten, self.tgt_ph, name=\"EvalTgtLut\")\n # Cast\n emb = tf.cast(emb_lut, tf.float32)\n query = tf.cast(query_lut, tf.float32)\n # MM\n sim_scores = tf.matmul(emb, tf.transpose(query))\n # Topk\n if en_knn and not en_mean:\n top_matches = tf.nn.top_k(sim_scores, knn)\n return top_matches\n if en_knn and en_mean:\n top_matches = tf.nn.top_k(sim_scores, knn)\n best_distances = tf.reduce_mean(top_matches[0], axis=1)\n return best_distances\n return sim_scores",
"def gen_graph(self):"
] | [
"0.66330874",
"0.65755147",
"0.64188087",
"0.6387675",
"0.6357557",
"0.629013",
"0.62466234",
"0.619727",
"0.61953604",
"0.61843127",
"0.61041623",
"0.6084243",
"0.6071744",
"0.6046951",
"0.6042873",
"0.60207635",
"0.6010319",
"0.6009588",
"0.5993697",
"0.5976763",
"0.59427863",
"0.59367794",
"0.5921881",
"0.59040356",
"0.58896005",
"0.58624375",
"0.5833217",
"0.5739939",
"0.57201546",
"0.5717531"
] | 0.6601584 | 1 |
Builds the prediction VAEGAN graph for image input. | def build_prediction_image_graph(self):
tensors = self.build_graph(1, PREDICT_IMAGE_IN)
keys_p = tf.placeholder('float32', [None, self.QUES_SIZE])
inputs = {'key': keys_p, 'ques_vector': tensors.quesVec}
keys = tf.identity(keys_p)
outputs = {'key': keys, 'prediction': tensors.genImgData}
return inputs, outputs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_inference_graph(self):\n self.build_train_graph()",
"def _build_and_compile_vae(self):\n z_dimension = int(self.n_dims//2)+1\n\n self.encoder, inputs, z_mean, z_var = self._encoder(z_dimension, self.mcd)\n\n self.decoder, outputs = self._decoder(inputs, z_dimension, self.mcd)\n\n self.model = Model(inputs, outputs, name=\"VAE IMPUTER\")\n\n loss = self.vae_loss(self.n_dims, z_mean, z_var)\n\n self.model.compile(optimizer=self.optimizer, loss=loss)",
"def _build_graph(self):\n self._setup_placeholders()\n self._embed()\n self.p_emb = tf.concat([self.p_emb, tf.expand_dims(self.em, -1)], -1)\n self._encode()\n self._match()\n self._fuse()\n\n with tf.variable_scope('boundary'):\n self._decode()\n with tf.variable_scope('content'):\n self._content()\n with tf.variable_scope('verif'):\n self._verify()\n\n self._compute_loss()",
"def _build_graph(self, train_data, test_data):\n\n # Network for testing / evaluation\n # As before, we define placeholders for the input. These here now can be fed\n # directly, e.g. with a feed_dict created by _evaluation_food\n self.expert_outputs = {m: test_pipeline(test_data[m], self.config['prefixes'][m],\n **self.config)\n for m in self.modalities}\n self.prediction = self._fusion(self.expert_outputs)",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass",
"def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def _build_predict_edges(self, N=100):\n with tf.device(self.device):\n self.pred_edges_ph = tf.placeholder(dtype=tf.int32)\n # MC estimate\n self.predict_edges = tf.reduce_mean(self._edge_prob_samples(self.pred_edges_ph, N=N), axis=0)",
"def create_graph(self, feature, **kwargs):\n self.input_size = feature.shape[1:3]\n\n net = PSPNet101({'data': feature}, is_training=True, num_classes=self.class_num)\n self.pred = net.layers['conv6']\n pred = tf.image.resize_bilinear(self.pred, self.input_size)\n self.output_size = pred.shape[1:3]\n self.output = tf.nn.softmax(pred)",
"def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))",
"def build_graph(self):\n self._build_model()\n if self.mode == 'train':\n self._build_train_op()",
"def prediction(self, query_image_path_list, evaluation_method, demonstration_subdir, evaluation_subdir):\n semantic_tag_unique_list = list(set(self.node_semantic_tag_list))\n semantic_tag_total_count = np.array([self.node_semantic_tag_list.count(semantic_tag) for semantic_tag in semantic_tag_unique_list])\n\n es_semantic_tag_list = []\n es_target_index_list = []\n es_target_index_rank_list = []\n query_feature_dist = []\n node_index_list = []\n for query_image_path in query_image_path_list:\n query_image = cv2.imread(query_image_path)\n query_image_batch = self.process_batch([query_image])\n query_rolling_features_list = self.sess.run(self.rolling_features_list,\n feed_dict = {self.images_placeholder: query_image_batch})\n\n metric_distance_list = []\n for node_index in range(self.node_number):\n node_metric_feature = self.node_metric_feature_list[node_index]\n rolling_distance_list = []\n for query_rolling_features in query_rolling_features_list:\n rolling_distance = np.sqrt(np.sum(np.square(node_metric_feature - query_rolling_features)))\n rolling_distance_list.append(rolling_distance)\n metric_distance = min(rolling_distance_list)\n metric_distance_list.append(metric_distance)\n metric_distance_list = np.array(metric_distance_list)\n\n query_feature_dist.append(metric_distance_list)\n\n\n node_index_rank = np.argsort(metric_distance_list)\n node_index_list.append(node_index_rank[0])\n es_target_index_rank = [self.node_id_list[node_index] for node_index in node_index_rank]\n es_target_index_rank_list.append(es_target_index_rank)\n es_target_index_list.append(es_target_index_rank[0])\n es_semantic_tag_list.append(set([self.node_semantic_tag_list[node_index_rank[0]]]))\n\n if evaluation_method == 'find_target_position':\n print(\"TARGET INDEX: \"+str(es_target_index))\n print(\"semantic tag: \"+str(self.node_semantic_tag_list[es_target_index]))\n print(\"\\n\")\n return es_semantic_tag_list, es_target_index_list\n\n elif evaluation_method == 'target_position_precision-error':\n return es_target_index_list\n elif evaluation_method == 'target_position_distance-accuracy':\n return es_target_index_list\n elif evaluation_method == 'target_position_recall-N':\n return es_target_index_rank_list\n elif evaluation_method == 'target_position_precision-recall':\n return [node_index_list, query_feature_dist]\n else:\n raise ValueError(\"unknown evaluation method\")",
"def _build_graph(self):\n pass",
"def predict(image_path):\n global graph\n with graph.as_default():\n image_size = (299, 299)\n img = image.load_img(image_path, target_size=image_size)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n predictions = model.predict(x)\n print('Predicted:', decode_predictions(predictions, top=1)[0])\n return decode_predictions(predictions, top=1)[0]",
"def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model",
"def build_graph(self):\n pass",
"def build_eval_graph(self):\n saver = tf.train.import_meta_graph('./model_check/embedding.ckpt-15.meta')\n saver.restore(self.sess, tf.train.latest_checkpoint('./model_check'))\n emb = self.sess.run('embeddings:0')\n \n nemb = tf.nn.l2_normalize(emb, 1)\n \n analogy_a = tf.placeholder(dtype=tf.int32)\n analogy_b = tf.placeholder(dtype=tf.int32)\n analogy_c = tf.placeholder(dtype=tf.int32)\n\n a_emb = tf.gather(nemb, analogy_a)\n b_emb = tf.gather(nemb, analogy_b)\n c_emb = tf.gather(nemb, analogy_c)\n\n target = c_emb + (a_emb - b_emb)\n dist = tf.matmul(target, nemb, transpose_b=True)\n _, pred_idx = tf.nn.top_k(dist, 4)\n\n nearby_word = tf.placeholder(dtype=tf.int32)\n nearby_emb = tf.gather(nemb, nearby_word)\n nearyby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)\n nearby_val, nearby_idx = tf.nn.top_k(nearyby_dist, min(1000, nemb.shape[0]))\n\n\n self._analogy_a = analogy_a\n self._analogy_b = analogy_b\n self._analogy_c = analogy_c\n self._analogy_pred_idx = pred_idx\n self._nearby_word = nearby_word\n self._nearby_val = nearby_val\n self._nearby_idx = nearby_idx\n\n tf.global_variables_initializer().run()\n self.saver = tf.train.Saver()",
"def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n\n # Read an image\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n img_data_jpg = tf.image.decode_jpeg(image_data) # Decode image\n img_data_jpg = tf.image.convert_image_dtype(img_data_jpg, dtype=tf.float32) # Convert uint8 to float32\n img_data_jpg = tf.image.resize_image_with_crop_or_pad(img_data_jpg,IMAGE_SIZE,IMAGE_SIZE)\n\n # Creates graph from saved GraphDef.\n create_graph()\n\n with tf.Session() as sess:\n image_data = img_data_jpg.eval().reshape(-1,IMAGE_SIZE,IMAGE_SIZE,CHANNEL)\n softmax_tensor = sess.graph.get_tensor_by_name('lg/InceptionV3/Predictions/Reshape_1:0')\n predictions = sess.run(softmax_tensor, {'lg/Placeholder:0': image_data})\n predictions = np.squeeze(predictions)\n print('predictions: ',predictions)\n # Read the labels from label.txt.\n label_path = os.path.join(FLAGS.model_dir, '/home/lg/projects/labels.txt')\n label = np.loadtxt(fname=label_path,dtype=str)\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n for node_id in top_k:\n label_string = label[node_id]\n score = predictions[node_id]\n print('%s (score = %.5f)' % (label_string, score))",
"def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model",
"def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n # Creates graph from saved GraphDef.\n #create_graph()\n\n with tf.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n global RESULTS_ANALYSIS\n softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n\t\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n #top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n top_k = predictions.argsort()[-3:][::-1]\n RESULTS_ANALYSIS=''\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n print('%s (score = %.5f)' % (human_string, score))\n RESULTS_ANALYSIS=RESULTS_ANALYSIS+'%s (score = %.5f)' % (human_string, score)+';'",
"def make_inference(ndvi_image, model):\n ndvi_image_exp = np.expand_dims(ndvi_image, axis=0)\n ndvi_image_exp = ndvi_image_exp / 255.0\n prediction = model.predict(ndvi_image_exp)\n return prediction",
"def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)",
"def main(image, model_dir):\n model_file, signature = get_model_and_sig(model_dir)\n interpreter = load_model(model_dir + model_file)\n prediction = get_prediction(image, interpreter, signature)\n # get list of confidences from prediction\n confidences = list(prediction.values())[0]\n # get the label name for the predicted class\n labels = signature.get(\"classes\").get(\"Label\")\n max_confidence = max(confidences)\n prediction[\"Prediction\"] = labels[confidences.index(max_confidence)]\n return prediction",
"def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n\n\n with tf.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n result =[]\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n\n point = collections.namedtuple('Point', ['humanString', 'score'])\n point.humanString = human_string\n point.score = score\n result.append(point)\n return result",
"def inference(image, keep_prob):\n print('setting up vgg model initialized params')\n model_data = utils.get_model_data(\"data\", MODEL_URL)\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel)\n\n with tf.name_scope('inference'):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net['conv5_3']\n\n pool5 = utils.max_pool_2x2(conv_final_layer)\n\n W6 = utils.weights_variable([7, 7, 512, 4096], name=\"W6\")\n b6 = utils.bias_variable([4096], name='b6')\n conv6 = utils.conv2d_basic(pool5, W6, b6)\n relu6 = tf.nn.relu(conv6, name='relu6')\n\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)\n\n W7 = utils.weights_variable([1, 1, 4096, 4096], name=\"W7\")\n b7 = utils.bias_variable([4096], name=\"b7\")\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\n\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\n\n W8 = utils.weights_variable([1, 1, 4096, NUM_OF_CLASSESS], name='W8')\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\n\n #unsampling to actual image size\n deconv_shape1 = image_net['pool4'].get_shape()\n W_t1 = utils.weights_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name='W_t1')\n b_t1 = utils.bias_variable([deconv_shape1[3].value], name=\"b_t1\")\n conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net['pool4']))\n fuse_1 = tf.add(conv_t1, image_net['pool4'], name='fuse_1')\n\n deconv_shape2 = image_net['pool3'].get_shape()\n W_t2 = utils.weights_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name='W_t2')\n b_t2 = utils.bias_variable([deconv_shape2[3].value], name=\"b_t2\")\n conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net['pool3']))\n fuse_2 = tf.add(conv_t2, image_net[\"pool3\"], name=\"fuse_2\")\n\n shape = tf.shape(image)\n output_shape = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])\n W_t3 = utils.weights_variable([7, 7, NUM_OF_CLASSESS, deconv_shape2[3].value], name='W_t3')\n b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b_t3\")\n conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=output_shape)\n\n annotation_pre = tf.argmax(conv_t3, dimension=3, name='prediction')\n\n return tf.expand_dims(annotation_pre, dim=3), conv_t3",
"def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()",
"def prediction(self, v, imu_meas):\n # YOUR CODE HERE\n pass",
"def model(inputs):\n outputs = {}\n\n # First, build the encoder\n encoder_fn = _get_network(params.encoder_name)\n with tf.variable_scope('encoder', reuse=reuse):\n # Produces id/pose units\n enc_outputs = encoder_fn(inputs['images_1'], params, is_training)\n outputs['ids_1'] = enc_outputs['ids']\n\n # Second, build the decoder and projector\n decoder_fn = _get_network(params.decoder_name)\n with tf.variable_scope('decoder', reuse=reuse):\n outputs['voxels_1'] = decoder_fn(outputs['ids_1'], params, is_training)\n if run_projection:\n projector_fn = _get_network(params.projector_name)\n with tf.variable_scope('projector', reuse=reuse):\n outputs['projs_1'] = projector_fn(\n outputs['voxels_1'], inputs['matrix_1'], params, is_training)\n # Infer the ground-truth mask\n with tf.variable_scope('oracle', reuse=reuse):\n outputs['masks_1'] = projector_fn(inputs['voxels'], inputs['matrix_1'],\n params, False)\n\n # Third, build the entire graph (bundled strategy described in PTN paper)\n for k in range(1, params.step_size):\n with tf.variable_scope('projector', reuse=True):\n outputs['projs_%d' % (k + 1)] = projector_fn(\n outputs['voxels_1'], inputs['matrix_%d' %\n (k + 1)], params, is_training)\n with tf.variable_scope('oracle', reuse=True):\n outputs['masks_%d' % (k + 1)] = projector_fn(\n inputs['voxels'], inputs['matrix_%d' % (k + 1)], params, False)\n\n return outputs",
"def build_vgg(self, weights=\"imagenet\"): \n \n # Input image to extract features from\n img = Input(shape=(self.img_rows, self.img_cols, 3))\n\n # Mean center and rescale by variance as in PyTorch\n processed = Lambda(lambda x: (x-self.mean) / self.std)(img)\n \n # If inference only, just return empty model \n if self.inference_only:\n model = Model(inputs=img, outputs=[img for _ in range(len(self.vgg_layers))])\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n return model\n \n # Get the vgg network from Keras applications\n if weights in ['imagenet', None]:\n vgg = VGG16(weights=weights, include_top=False)\n else:\n vgg = VGG16(weights=None, include_top=False)\n vgg.load_weights(weights, by_name=True)\n\n # Output the first three pooling layers\n vgg.outputs = [vgg.layers[i].output for i in self.vgg_layers] \n \n # Create model and compile\n model = Model(inputs=img, outputs=vgg(processed))\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n\n return model"
] | [
"0.61238796",
"0.6017474",
"0.5863091",
"0.56766254",
"0.5661419",
"0.5589735",
"0.5516368",
"0.5507306",
"0.5503744",
"0.5484291",
"0.54819345",
"0.5471904",
"0.5432435",
"0.54278654",
"0.54207414",
"0.54012424",
"0.53967243",
"0.53850025",
"0.5355296",
"0.535415",
"0.5329042",
"0.53261536",
"0.5323461",
"0.53070134",
"0.5302241",
"0.5301017",
"0.5257672",
"0.5247207",
"0.5242307",
"0.5239011"
] | 0.63652086 | 0 |
Exports the prediction graph. | def export(self, last_checkpoint, output_dir):
with tf.Session(graph=tf.Graph()) as sess:
inputs, outputs = self.build_prediction_image_graph()
init_op = tf.global_variables_initializer()
sess.run(init_op)
trained_saver = tf.train.Saver()
trained_saver.restore(sess, last_checkpoint)
predict_signature_def = build_signature(inputs, outputs)
# Create a saver for writing SavedModel training checkpoints.
build = builder.SavedModelBuilder(
os.path.join(output_dir, 'saved_model_image_in'))
build.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
},
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS))
self.has_exported_image_in = True
build.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export(self):\n if self.model.algorithm == 'DecisionTree':\n dot_data = tree.export_graphviz(self.model.clf, out_file=None)\n graph = graphviz.Source(dot_data)\n graph.render(\"exports/DecisionTreeRegressor\")",
"def export_freeze_model(self, export_dir='.', version=1):\n self.feed_dict.update({self.training_phase: False})\n tf.keras.backend.set_learning_phase(0) \n\n self.outputs = tf.identity_n(self.outputs, name='output/hr')\n sess = tf.get_default_session()\n export_path = Path(export_dir) / str(version)\n while export_path.exists():\n version += 1 # step ahead 1 version\n export_path = Path(export_dir) / str(version)\n export_path = str(export_path)\n graph = sess.graph.as_graph_def()\n graph = tf.graph_util.remove_training_nodes(graph)\n graph = tf.graph_util.convert_variables_to_constants(\n sess, graph, [outp.name.split(':')[0] for outp in self.outputs])\n # fcarrio\n for node in graph.node:\n print (node.name)\n\n\n for op in tf.get_default_graph().get_operations():\n print(str(op.name))\n\n tf.train.write_graph(graph, export_path, self.name, as_text=False)\n LOG.info(\"Model exported to {}/{}.\".format(export_path, self.name))",
"def save(self, output_path):\r\n self.graph.cleanup().toposort()\r\n model = gs.export_onnx(self.graph)\r\n output_path = os.path.realpath(output_path)\r\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\r\n onnx.save(model, output_path)\r\n log.info(\"Saved ONNX model to {}\".format(output_path))",
"def save_prediction(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n if Trainer.y_pred is None:\n messagebox.showerror(\"Information\", \"Preciction has not been made, please train a new model and predict or \"\n \"load a model and predict.\")\n return\n\n path = filedialog.asksaveasfile(mode='w', defaultextension=\".csv\", filetypes=[(\"csv files\", '*.csv'),\n (\"xlsx files\", '*.xlsx'),\n (\"dat files\", '*.dat')])\n\n copy_data = DataLoader.data.copy()\n copy_data['prediction'] = Trainer.y_pred\n copy_data.to_csv(path, index=False)\n\n # Clears memory\n copy_data.drop(copy_data.index, inplace=True)\n del copy_data",
"def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)",
"def save_prediction(self, meta, y_pred, y, filename):\n df = pd.DataFrame(meta)\n df['y_pred'] = y_pred\n df['y'] = y\n print(df)\n df.loc[:, 'id'] = df.index\n self.df_to_csv(df, filename, store_header=False)",
"def dump(pred_out_path, xyz_pred_list, verts_pred_list):\n # make sure its only lists\n xyz_pred_list = [x.tolist() for x in xyz_pred_list]\n verts_pred_list = [x.tolist() for x in verts_pred_list]\n #import pdb; pdb.set_trace()\n # save to a json\n with open(pred_out_path, 'w') as fo:\n json.dump(\n [\n xyz_pred_list,\n verts_pred_list\n ], fo)\n print('Dumped %d joints and %d verts predictions to %s' % (len(xyz_pred_list), len(verts_pred_list), pred_out_path))",
"def write_predictions(estimator, vertical, source_website, target_website):\n score_dir_path = os.path.join(\n FLAGS.result_path, \"{}/{}-results/score\".format(vertical, source_website))\n\n tf.gfile.MakeDirs(score_dir_path)\n pred_filename = os.path.join(\n FLAGS.result_path,\n \"{}/{}-results/score/{}.preds.txt\".format(vertical, source_website,\n target_website))\n node_emb_filename = os.path.join(\n FLAGS.result_path,\n \"{}/{}-results/score/{}.node_emb.npz\".format(vertical, source_website,\n target_website))\n print(\"Writing predictions to file: %s\" % pred_filename, file=sys.stderr)\n golds_gen = model_util.joint_generator_fn(\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=False),\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=True),\n vertical,\n mode=\"all\")\n transfer_eval_input_function = functools.partial(\n model_util.joint_input_fn,\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=False),\n get_data_path(\n vertical=vertical, website=target_website, dev=False, goldmine=True),\n vertical,\n mode=\"all\")\n preds_gen = estimator.predict(transfer_eval_input_function)\n prediction_str = \"\"\n if FLAGS.extract_node_emb:\n node_embs = []\n for gold, pred in zip(golds_gen, preds_gen):\n if FLAGS.circle_features:\n ((nnodes), (_), (words_list, words_len), (_, _), (_, _),\n (partner_words, _), (friend_words, _), (_, _), (_, _),\n (html_path, xpath_list), (_, _), (_, _), (_)), tags = gold\n\n for index in range(nnodes):\n normalized_partner = []\n for w in partner_words[index]:\n normalized_partner.append(normalize_text(w))\n\n if FLAGS.match_keywords:\n normalized_word = [\n normalize_text(w)\n for w in words_list[index][:words_len[index]]\n ]\n candicate_labels = constants.ATTRIBUTES[vertical]\n print(\"Partner: %s, Words: %s, Pred: %s\" %\n (\" \".join(normalized_partner), \" \".join(normalized_word),\n pred[\"tags\"][index]))\n normalized_partner = \" \".join(normalized_partner)\n for i, l in enumerate(candicate_labels):\n l = str(l).lower().replace(\"tor\", \"t\").split(\"_\")\n status = all([x in normalized_partner for x in l])\n if status:\n print(\"OLD:\", pred[\"tags\"][index])\n print(\"NEW:\", candicate_labels[i].encode())\n pred[\"tags\"][index] = candicate_labels[i].encode()\n\n if FLAGS.friend_encoder:\n normalized_friend = []\n for w in friend_words[index]:\n normalized_friend.append(normalize_text(w))\n print(normalized_friend)\n print(pred[\"friends_embs\"][index])\n\n else:\n ((nnodes), (words_list, words_len), (_, _), (_, _), (_, _),\n (html_path, xpath_list), (_, _), (_), (_)), tags = gold\n assert nnodes == len(words_list) == len(tags)\n for index in range(nnodes):\n s = \"\\t\".join([\n str(html_path, \"utf-8\"),\n str(xpath_list[index], \"utf-8\"),\n \" \".join([\n str(w, \"utf-8\") for w in words_list[index][:int(words_len[index])]\n ]),\n str(tags[index], \"utf-8\"),\n str(pred[\"tags\"][index], \"utf-8\"),\n \",\".join([str(score) for score in pred[\"raw_scores\"][index]]),\n ]) + \"\\n\"\n prediction_str += s\n if FLAGS.extract_node_emb:\n node_embs.append([float(i) for i in pred[\"node_embs\"][index]])\n\n with tf.gfile.Open(pred_filename, \"w\") as f:\n f.write(prediction_str)\n\n node_embs = np.array(node_embs)\n # Save np.array to file.\n with tf.gfile.Open(node_emb_filename, \"wb\") as gfo:\n print(\"Writing node emb pickle: %s\" % node_emb_filename, file=sys.stderr)\n pickle.dump(node_embs, gfo)\n print(\"Node Representation Save- done.\", file=sys.stderr)",
"def export_prediction_to_example(filename, pred_geo, pred_sem):\n with tf.python_io.TFRecordWriter(filename) as writer:\n out_feature = {\n 'prediction_df/dim': util.int64_feature(pred_geo.shape),\n 'prediction_df': util.float_feature(pred_geo.flatten().tolist())\n }\n if FLAGS.predict_semantics:\n out_feature['prediction_sem'] = util.bytes_feature(\n pred_sem.flatten().tobytes())\n example = tf.train.Example(features=tf.train.Features(feature=out_feature))\n writer.write(example.SerializeToString())",
"def create_visual_graph(self):\n if self.predict_new and self.prediction_without_covid_case:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n else:\n self.restore_prediction_df()\n if not self.analysis_plot:\n self.predict_co2_emission_future()\n self.save_prediction_df()\n\n self.do_plot()\n self.output_graph_file = OUTPUT_GRAPH_PATH\n return self.output_graph_file",
"def save_predicted_results(predicted_results):\n # Save the model\n with open(\"predicted_results\", \"wb\") as predicted_results_file:\n pickle.dump(predicted_results, predicted_results_file)",
"def save_predictions(prediction_maps, output_file, dataset_names):\n assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n\n with h5py.File(output_file, \"w\") as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map, compression=\"gzip\")",
"def _export_model(\n self,\n precision: ModelPrecision = ModelPrecision.FP32,\n export_format: ExportType = ExportType.ONNX,\n dump_features: bool = True,\n ):\n # copied from OTX inference_task.py\n self._data_cfg = ConfigDict(\n data=ConfigDict(\n train=ConfigDict(\n otx_dataset=None,\n labels=self._labels,\n ),\n test=ConfigDict(\n otx_dataset=None,\n labels=self._labels,\n ),\n )\n )\n self._init_task(export=True)\n\n cfg = self.configure(False, None)\n\n self._precision[0] = precision\n export_options: Dict[str, Any] = {}\n export_options[\"deploy_cfg\"] = self._init_deploy_cfg(cfg)\n assert len(self._precision) == 1\n export_options[\"precision\"] = str(self._precision[0])\n export_options[\"type\"] = str(export_format)\n\n export_options[\"deploy_cfg\"][\"dump_features\"] = dump_features\n if dump_features:\n output_names = export_options[\"deploy_cfg\"][\"ir_config\"][\"output_names\"]\n if \"feature_vector\" not in output_names:\n output_names.append(\"feature_vector\")\n if export_options[\"deploy_cfg\"][\"codebase_config\"][\"task\"] != \"Segmentation\":\n if \"saliency_map\" not in output_names:\n output_names.append(\"saliency_map\")\n export_options[\"model_builder\"] = getattr(self, \"model_builder\", build_segmentor)\n\n if self._precision[0] == ModelPrecision.FP16:\n export_options[\"deploy_cfg\"][\"backend_config\"][\"mo_options\"][\"flags\"].append(\"--compress_to_fp16\")\n\n backend_cfg_backup = {}\n if export_format == ExportType.ONNX:\n backend_cfg_backup = export_options[\"deploy_cfg\"][\"backend_config\"]\n export_options[\"deploy_cfg\"][\"backend_config\"] = {\"type\": \"onnxruntime\"}\n export_options[\"deploy_cfg\"][\"ir_config\"][\"dynamic_axes\"][\"input\"] = {0: \"batch\"}\n\n exporter = SegmentationExporter()\n results = exporter.run(\n cfg,\n **export_options,\n )\n\n if export_format == ExportType.ONNX:\n results[\"inference_parameters\"] = {}\n results[\"inference_parameters\"][\"mean_values\"] = \" \".join(\n map(str, backend_cfg_backup[\"mo_options\"][\"args\"][\"--mean_values\"])\n )\n results[\"inference_parameters\"][\"scale_values\"] = \" \".join(\n map(str, backend_cfg_backup[\"mo_options\"][\"args\"][\"--scale_values\"])\n )\n\n return results",
"def export(self, path):\r\n # Save plot as png\r\n dt = m.get_instance().dt\r\n self.perception_history = m.get_instance().larvae[0].history\r\n t = np.arange(0,len(self.perception_history)*dt,dt)\r\n plt.plot(t,self.perception_history)\r\n plt.title('Perception History')\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Perception (uM)')\r\n\r\n plt.gca().set_aspect('equal', adjustable='box')\r\n plt.savefig(path + '.png')",
"def write_out_prediction_objects(model, lookups):\n neighbours = get_neighbours(lookups, model)\n id_to_movie = get_reverse_movie_lookup(lookups)\n popular_films = get_popular(lookups)\n\n output = {\n \"neighbours\": neighbours,\n \"id_to_movie\": id_to_movie,\n \"popular_films\": popular_films,\n }\n\n with open(PICKLE_FILENAME, \"wb\") as f:\n pickle.dump(output, f)\n\n return output",
"def export_experiment(session, saver, last_step, global_step, output_dir,\n eval_set, features, labels, images, route):\n output_filename = 'output_%s_%s_%d.h5' % (\n FLAGS.dataset, eval_set, global_step)\n output_directory = os.path.join(output_dir, 'classify', 'output')\n\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n with h5py.File(os.path.join(output_directory, output_filename), 'w') as hf:\n hf.create_dataset('features', data=features, compression='lzf')\n hf.create_dataset('labels', data=labels, compression='lzf')\n hf.create_dataset('images', data=images, compression='lzf')\n hf.create_dataset('route', data=route, compression='lzf')\n\n session_directory = os.path.join(\n output_dir, 'eval', FLAGS.dataset, eval_set)\n saver.save(session, os.path.join(session_directory, 'model.ckpt'),\n global_step=last_step)",
"def write(self, predictions, filename):\n driver = self.dataset.GetDriver()\n dst_ds = driver.CreateCopy(filename, self.dataset)\n\n prediction_array = np.zeros_like(self.segmentation)\n for prediction, y, x in predictions:\n prediction_array[y:y + self.size, x:x + self.size] = prediction\n\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(prediction_array)",
"def write(self, outfilename):\n\n nx.write_gpickle(self.graph, outfilename)",
"def export_graph(graph, name_file, format_export):\n im_name = ('{}.' + format_export).format('./' + name_file)\n if (format_export == \"png\"):\n graph.write_png(im_name)\n elif (format_export == \"dot\"):\n graph.write_dot(im_name)\n else:\n raise LookupError",
"def save_tf_export(self, session):\n raise NotImplementedError(\"Implement save_tf_export() method\")",
"def write_predictions(self, file: str, out_file: str, fee_only: bool = False):\n\n if not fee_only and self.network is None:\n raise Exception(\"No network found! Train or load a network.\")\n\n xs, ys = get_dataset([file])\n\n if not fee_only:\n dataset_iter = self.prepare_dataset(xs, ys, 1)\n predictions = self.network.predict(dataset_iter)\n prediction = iter(predictions)\n\n out_data = []\n sent_count = 0\n last_sentence = []\n\n for x in xs:\n if last_sentence != x[1:]:\n if not sent_count == 0:\n out_data.append(data_dict)\n\n data_dict = dict()\n data_dict[\"sentence\"] = x[1:]\n data_dict[\"sentence_id\"] = sent_count\n data_dict[\"prediction\"] = []\n last_sentence = x[1:]\n sent_count += 1\n frame_count = 0\n\n prediction_dict = dict()\n prediction_dict[\"id\"] = frame_count\n prediction_dict[\"fee\"] = x[0]\n if not fee_only:\n prediction_dict[\"frame\"] = self.output_field.vocab.itos[next(prediction).item()]\n\n data_dict[\"prediction\"].append(prediction_dict)\n\n frame_count += 1\n\n out_data.append(data_dict)\n\n with open(out_file, \"w\") as out:\n json.dump(out_data, out, indent=4)",
"def export_saved_model(self, export_dir='.', version=1):\n\n sess = tf.get_default_session()\n export_path = Path(export_dir) / str(version)\n while export_path.exists():\n version += 1 # step ahead 1 version\n export_path = Path(export_dir) / str(version)\n export_path = str(export_path)\n LOG.debug(\"exporting to {}\".format(export_path))\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n # build the signature_def_map\n inputs, outputs = {}, {}\n for n, inp in enumerate(self.inputs):\n tag = 'input_' + str(n)\n inputs[tag] = tf.saved_model.utils.build_tensor_info(inp)\n for n, outp in enumerate(self.outputs):\n tag = 'output_' + str(n)\n outputs[tag] = tf.saved_model.utils.build_tensor_info(outp)\n sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs=inputs, outputs=outputs,\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: sig\n },\n strip_default_attrs=True)\n builder.save()",
"def save_prediction(predictions, image_file, path):\n\t\n\tsave_file = convert_file_extension_to_txt(image_file)\n\t\n\twith open(os.path.join(path, save_file), 'w') as f:\n\t\tfor prediction in predictions:\n\t\t\tf.write(str(prediction) + \"\\n\")",
"def save_graph(self):\n with tf.Session(graph=self.graph) as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n save_path = saver.save(sess, os.path.join(MODELS_PATH, \"model\"))\n print(\"Model saved in path: %s\" % save_path)\n\n with open(os.path.join(MODELS_PATH, \".model.inputs\"), \"w\") as file:\n for v in self.inputs.values():\n file.write(v.name + \"\\n\")\n with open(os.path.join(MODELS_PATH, \".model.output\"), \"w\") as file:\n file.write(self.output.name)",
"def export_graph(cls, graph, filename):\n edges = {}\n for node in graph.values():\n for neighbour, dist in node.distances.items():\n if (node.id, neighbour) in edges or (neighbour, node.id) in edges:\n continue\n edges[(node.id, neighbour)] = dist\n\n file_string = '{}\\n'.format(len(graph))\n for edge, dist in edges.items():\n file_string = file_string + '{} {} {}\\n'.format(edge[0], edge[1], dist)\n file_string = file_string[:-1] # Strip the last \\n\n\n with open(filename, 'w') as file:\n file.write(file_string)",
"def export_prediction_to_mesh(outprefix, input_sdf, output_df, output_sem,\n target_df, target_sem):\n # Add back (below floor) padding for vis (creates the surface on the bottom).\n (scene_dim_z, scene_dim_y, scene_dim_x) = input_sdf.shape\n save_input_sdf = constants.TRUNCATION * np.ones(\n [scene_dim_z, 2 * FLAGS.pad_test + scene_dim_y, scene_dim_x])\n save_prediction = np.copy(save_input_sdf)\n save_target = None if target_df is None else np.copy(save_input_sdf)\n save_input_sdf[:, FLAGS.pad_test:FLAGS.pad_test + scene_dim_y, :] = input_sdf\n save_prediction[:, FLAGS.pad_test:FLAGS.pad_test + scene_dim_y, :] = output_df\n if target_df is not None:\n save_target[:, FLAGS.pad_test:FLAGS.pad_test + scene_dim_y, :] = target_df\n # For error visualization as colors on mesh.\n save_errors = np.zeros(shape=save_prediction.shape)\n save_errors[:, FLAGS.pad_test:FLAGS.pad_test + scene_dim_y, :] = np.abs(\n output_df - target_df)\n if FLAGS.predict_semantics:\n save_pred_sem = np.zeros(shape=save_prediction.shape, dtype=np.uint8)\n save_pred_sem[:, FLAGS.pad_test:\n FLAGS.pad_test + scene_dim_y, :] = output_sem\n save_pred_sem[np.greater(save_prediction, 1)] = 0\n if target_sem is not None:\n save_target_sem = np.zeros(shape=save_prediction.shape, dtype=np.uint8)\n save_target_sem[:, FLAGS.pad_test:\n FLAGS.pad_test + scene_dim_y, :] = target_sem\n\n # Save as mesh.\n util.save_iso_meshes(\n [save_input_sdf, save_prediction, save_target],\n [None, save_errors, save_errors], [None, save_pred_sem, save_target_sem],\n [\n outprefix + 'input.obj', outprefix + 'pred.obj',\n outprefix + 'target.obj'\n ],\n isoval=1)",
"def export_model(module_spec, class_count, saved_model_dir):\n # SavedModel should hold the eval graph\n sess, in_image, _, _, _, _ = build_eval_session(module_spec, class_count)\n graph = sess.graph\n with graph.as_default():\n inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}\n\n out_classes = sess.graph.get_tensor_by_name('profile_image_verifier:0')\n outputs = {\n 'prediction': tf.saved_model.utils.build_tensor_info(out_classes)\n }\n\n signature = tf.saved_model.signature_def_utils.build_signature_def(\n inputs=inputs,\n outputs=outputs,\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME\n )\n\n legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\n\n # Save out the SavedModel\n builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature\n },\n legacy_init_op=legacy_init_op\n )\n builder.save()",
"def get_export_outputs_prediction_dict_smith_de(\n seq_embed_1, seq_embed_2, predicted_score, predicted_class,\n documents_match_labels, input_sent_embed_1, input_sent_embed_2,\n output_sent_embed_1, output_sent_embed_2):\n export_outputs = {\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n tf_estimator.export.PredictOutput(predicted_score),\n \"seq_embed_1\":\n tf_estimator.export.PredictOutput(seq_embed_1),\n \"seq_embed_2\":\n tf_estimator.export.PredictOutput(seq_embed_2),\n \"input_sent_embed_1\":\n tf_estimator.export.PredictOutput(input_sent_embed_1),\n \"input_sent_embed_2\":\n tf_estimator.export.PredictOutput(input_sent_embed_2),\n \"output_sent_embed_1\":\n tf_estimator.export.PredictOutput(output_sent_embed_1),\n \"output_sent_embed_2\":\n tf_estimator.export.PredictOutput(output_sent_embed_2),\n \"predicted_class\":\n tf_estimator.export.PredictOutput(predicted_class),\n \"documents_match_labels\":\n tf_estimator.export.PredictOutput(documents_match_labels)\n }\n\n prediction_dict = {\n \"predicted_score\": predicted_score,\n \"predicted_class\": predicted_class,\n \"documents_match_labels\": documents_match_labels,\n \"seq_embed_1\": seq_embed_1,\n \"seq_embed_2\": seq_embed_2,\n \"input_sent_embed_1\": input_sent_embed_1,\n \"input_sent_embed_2\": input_sent_embed_2,\n \"output_sent_embed_1\": output_sent_embed_1,\n \"output_sent_embed_2\": output_sent_embed_2\n }\n return (export_outputs, prediction_dict)",
"def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)",
"def saveGraph(self, filename):\n nx.write_yaml(self.G,filename)"
] | [
"0.69779485",
"0.6514851",
"0.6343418",
"0.63088596",
"0.6273102",
"0.6150119",
"0.61405104",
"0.60842127",
"0.6041941",
"0.6040545",
"0.59748113",
"0.5972487",
"0.5959718",
"0.59432715",
"0.59266734",
"0.59050053",
"0.5880715",
"0.58521193",
"0.5832841",
"0.58324564",
"0.583085",
"0.58294517",
"0.58153373",
"0.58128387",
"0.58084565",
"0.5803314",
"0.5765701",
"0.5751476",
"0.5739995",
"0.5736711"
] | 0.67879766 | 1 |
grid vector quantity over pair separation calculate drij[i, j] dot vec[i] and count for each bin see usage in grid_force_difference | def gofv(myr, drij, vec):
# !!!! assume linear grid
nr = len(myr)
rmin = myr[0]
dr = myr[1]-myr[0]
if not np.isclose(myr[2]-myr[1], dr):
raise RuntimeError('not linear grid')
# histogram
ysum = np.zeros(nr)
counts = np.zeros(nr, dtype=int)
rij = np.linalg.norm(drij, axis=-1)
nmol = rij.shape[0]
for i in range(nmol):
for j in range(i+1, nmol):
deno = rij[i, j]
ir = int((deno-rmin)//dr) # bin index
if (0 <= ir) and (ir < nr):
rhat = -drij[i, j] # drij is dr_i - dr_j NOT ri->rj
nume = np.dot(rhat, vec[i])
val = nume/deno
ysum[ir] += val
counts[ir] += 1
return ysum, counts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np",
"def span_vector_pdb(self) -> None:\n pnts = self.dir_vec * np.mgrid[-7.5:7.5:2j][:, np.newaxis]\n pnts += self.mean_vec\n pnt0 = pnts[0]\n pnt1 = pnts[1]\n mean = self.mean_vec\n atom0 = mp.Atom(header='ATOM', serial_num=self.span_num*3-2,\n name='%i' % self.span_num,\n res_type_3='T%i' % self.span_num,\n chain='M', res_seq_num=self.span_num,\n x=pnt0[0], y=pnt0[1], z=pnt0[2])\n atom1 = mp.Atom(header='ATOM', serial_num=self.span_num*3-1,\n name='%i' % self.span_num,\n res_type_3='T%i' % self.span_num,\n chain='M', res_seq_num=self.span_num,\n x=pnt1[0], y=pnt1[1], z=pnt1[2])\n atom2 = mp.Atom(header='ATOM', serial_num=self.span_num*3,\n name='%i' % self.span_num,\n res_type_3='T%i' % self.span_num,\n chain='M', res_seq_num=self.span_num,\n x=mean[0], y=mean[1], z=mean[2])\n\n print(atom0)\n print(atom1)\n print(atom2)",
"def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count",
"def _computeStikeDip(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n norm_vec = Vector(0, 0, 0)\n north_vec = Vector(0, 0, 0)\n up_vec = Vector(0, 0, 0)\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n P0 = Point(self._toplons[ind],\n self._toplats[ind],\n self._topdeps[ind])\n P1 = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1])\n P2 = Point(self._botlons[ind + 1],\n self._botlats[ind + 1],\n self._botdeps[ind + 1])\n P3 = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind])\n P1up = Point(self._toplons[ind + 1],\n self._toplats[ind + 1],\n self._topdeps[ind + 1] - 1.0)\n P1N = Point(self._toplons[ind + 1],\n self._toplats[ind + 1] + 0.001,\n self._topdeps[ind + 1])\n P3up = Point(self._botlons[ind],\n self._botlats[ind],\n self._botdeps[ind] - 1.0)\n P3N = Point(self._botlons[ind],\n self._botlats[ind] + 0.001,\n self._botdeps[ind])\n p0 = Vector.fromPoint(P0)\n p1 = Vector.fromPoint(P1)\n p2 = Vector.fromPoint(P2)\n p3 = Vector.fromPoint(P3)\n p1up = Vector.fromPoint(P1up)\n p1N = Vector.fromPoint(P1N)\n p3up = Vector.fromPoint(P3up)\n p3N = Vector.fromPoint(P3N)\n\n # Sides\n s01 = p1 - p0\n s02 = p2 - p0\n s03 = p3 - p0\n s21 = p1 - p2\n s23 = p3 - p2\n\n # First triangle\n t1norm = (s02.cross(s01)).norm()\n a = s01.mag()\n b = s02.mag()\n c = s21.mag()\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Second triangle\n t2norm = (s03.cross(s02)).norm()\n a = s03.mag()\n b = s23.mag()\n c = s02.mag()\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c)) / 1000\n\n # Up and North\n p1up = (p1up - p1).norm()\n p3up = (p3up - p3).norm()\n p1N = (p1N - p1).norm()\n p3N = (p3N - p3).norm()\n\n # Combine\n norm_vec = norm_vec + A1 * t1norm + A2 * t2norm\n north_vec = north_vec + A1 * p1N + A2 * p3N\n up_vec = up_vec + A1 * p1up + A2 * p3up\n\n norm_vec = norm_vec.norm()\n north_vec = north_vec.norm()\n up_vec = up_vec.norm()\n\n # Do I need to flip the vector because it is pointing down (i.e.,\n # right-hand rule is violated)?\n flip = np.sign(up_vec.dot(norm_vec))\n norm_vec = flip * norm_vec\n\n # Angle between up_vec and norm_vec is dip\n self._dip = np.arcsin(up_vec.cross(norm_vec).mag()) * 180 / np.pi\n\n # Normal vector projected to horizontal plane\n nvph = (norm_vec - up_vec.dot(norm_vec) * up_vec).norm()\n\n # Dip direction is angle between nvph and north; strike is orthogonal.\n cp = nvph.cross(north_vec)\n sign = np.sign(cp.dot(up_vec))\n dp = nvph.dot(north_vec)\n strike = np.arctan2(sign * cp.mag(), dp) * 180 / np.pi - 90\n if strike < -180:\n strike = strike + 360\n self._strike = strike",
"def calc_bin_volumes(self):\n\n if self._log_p_flag:\n P_diffs = np.diff(np.log10(self._P_boundaries))\n else:\n P_diffs = np.diff(self._P_boundaries)\n\n if self._log_r_flag:\n R_diffs = np.diff(np.log10(self._R_boundaries))\n else:\n R_diffs = np.diff(self._R_boundaries)\n\n return np.outer(R_diffs, P_diffs)\n\n # return np.outer(np.diff(self._R_boundaries),\n # np.diff(np.log10(self._P_boundaries)))\n #else:\n # return np.outer(np.diff(self._R_boundaries),\n # np.diff(self._P_boundaries))",
"def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count",
"def _dipole_gof(uu, sing, vv, B, B2):\n ncomp = 3 if sing[2] / sing[0] > 0.2 else 2\n one = np.dot(vv[:ncomp], B)\n Bm2 = np.sum(one ** 2)\n return Bm2 / B2, one",
"def getBondVectors(struct,tol,prec): \n \n \n binary_matrix= getDistMat(struct,tol)\n bond_dir = {}\n distance_matrix = struct.distance_matrix\n lattice = np.array(struct.lattice.as_dict()['matrix'])\n iterations = list(itertools.product([1,0,-1],repeat=3))\n # Loop over list of atoms\n for i in range(len(binary_matrix)):\n for j in range(i+1,len(binary_matrix)):\n # Proceed if the entries are listed as \"bonded\" \n if binary_matrix[i][j]==1: \n s1 = struct.species[i]\n s2 = struct.species[j]\n # Organize dictionary so it is always in order of increasing\n # atomic number\n if s1.number>s2.number:\n s1 = struct.species[j]\n s2 = struct.species[i] \n if s1 not in bond_dir:\n bond_dir[s1]={}\n if s2 not in bond_dir[s1]:\n bond_dir[s1][s2]=[]\n valid_vs = []\n \n # Get the vector between atomic positions\n \n bond_vector = np.array(struct.sites[j].coords-\n struct.sites[i].coords) \n \n # The positions of the atoms may not be in the right locations\n # to be the minimum distance from each other. As a result,\n # a translation is applied to the resulting \"bond vector\" \n # (alternatively, one of the atoms is translated)\n for shift in iterations:\n bondShift = bond_vector + np.dot(lattice.T,shift)\n if abs(distance_matrix[i][j]-magni(bondShift))<=prec:\n valid_vs.append(bondShift)\n break\n # See if the vector is already present in the collection of \n # vectors. If so, add the coordinates to the entry. Else,\n # create a new entry for the direction of the bond.\n for v in valid_vs:\n if np.any([magni(v-x[0])<=prec for x in bond_dir[s1][s2]]):\n for k in range(len(bond_dir[s1][s2])):\n if magni(v-bond_dir[s1][s2][k][0])<=prec:\n bond_dir[s1][s2][k][1].append([i,j])\n break\n \n else:\n bond_dir[s1][s2].append([v,[[i,j]]])\n return(bond_dir)",
"def vector(self,\n i: int,\n j: int) -> np.ndarray:\n return self[j].coord - self[i].coord",
"def get_nb_vals(i, pnts, dem, top_left_cor, cellsize, rows, cols):\n nb_x = np.zeros((5,5)) # this 5 by 5 max would contain the x coordinate of 16 neighbor pixels of a sample point\n nb_y = np.zeros((5,5)) # this 5 by 5 matrix would contain the y coordinate of 16 neighbor pixels of a sample point\n nb_z = np.zeros((5,5))\n # get index and value of cell in DEM containing current point\n (cell_X, cell_Y, cell_Z) = misc.getCellValue(pnts[i], \n dem, \n top_left_cor, \n cellsize)\n #Deal with sample points near boundary of the DEM\n point_within_dem = (cell_X-2) >=0 and (cell_Y-2>=0) and (cell_X+3)<=cols and (cell_Y+3)<=rows\n if point_within_dem:\n nb_z[0:5,0:5] = misc.RasterSubset(dem,(cell_Y-2),(cell_Y+3),(cell_X-2),(cell_X+3))\n else:\n #Get the part of moving window within the DEM domain\n in_data= misc.RasterSubset(dem,max((cell_Y-2),0),min((cell_Y+3),rows),max((cell_X-2),0),min((cell_X+3),cols))\n #in_data=dem[\"array\"][max((cell_Y-2),0):min((cell_Y+3),rows),max((cell_X-2),0):min((cell_X+3),cols)]\n nb_z[max((2-cell_Y),0):min((5-(cell_Y+3-rows)),5),max((2-cell_X),0):min((5-(cell_X+3-cols)),5)]=in_data[0:in_data.shape[0],0:in_data.shape[1]]\n in_data_avg=np.mean(in_data[in_data>-3.4e+10])\n nb_z[nb_z==0]=in_data_avg\n nb_z[nb_z<-3.4e+10]=in_data_avg\n\n\n \n # If there is missing data in the neighborhood of the sample point \n # use neighborhood average to replace the missing value \n has_missing_data = (nb_z>8848).sum()>0 or (nb_z<-413).sum()>0\n if has_missing_data:\n avgValue=np.mean(nb_z[np.where(np.logical_and(nb_z<8848, nb_z>-413))])\n nb_z[nb_z>8848]=avgValue\n nb_z[nb_z<-413]=avgValue\n \n # Obtain the coordinate of cell centroid of a 5*5 neighborhood around the sample point\n for ii in [0,1,2,3,4]:\n cor_y=ii-2\n dy = (cell_Y+cor_y+0.5) * cellsize[1]\n nb_y[ii,:] = top_left_cor[1] + dy\n for jj in [0,1,2,3,4]:\n cor_x=jj-2\n dx = (cell_X+cor_x+0.5) * cellsize[0]\n nb_x [:,jj] = top_left_cor[0] + dx\n return nb_x, nb_y, nb_z",
"def _bond_dist(geom, a1, a2):\n if isinstance(geom, np.ndarray):\n geom = geom.flatten().tolist()\n a13 = a1 * 3\n a23 = a2 * 3\n\n xd = (geom[a13] - geom[a23])**2\n yd = (geom[a13 + 1] - geom[a23 + 1])**2\n zd = (geom[a13 + 2] - geom[a23 + 2])**2\n\n return (xd + yd + zd)**0.5",
"def __beinflumatgrid(axis):\n len_axis = len(axis)\n vec = np.zeros((1, len_axis))\n vec[0, :] = axis\n vertical_ax = np.zeros((len_axis, 1))\n vertical_ax[:, 0] = axis\n grid = np.repeat(vec, len_axis, axis=0)\n return np.absolute(np.subtract(grid, vertical_ax))",
"def v(i, j, d):\n return 81 * (i - 1) + 9 * (j - 1) + d",
"def nvector(self,\n i: int,\n j: int) -> np.ndarray:\n vec = self.vector(i, j)\n return vec / np.linalg.norm(vec)",
"def delta_r_c(r_i,r_j,latticevec):\n r_ij = r_j - r_i\n\n\n r_x = r_ij[0] - latticevec[0][0] * round( r_ij[0]/ latticevec[0][0] )\n r_y = r_ij[1] - latticevec[1][1] * round( r_ij[1]/ latticevec[1][1] )\n r_z = r_ij[2] - latticevec[2][2] * round( r_ij[2]/ latticevec[2][2] )\n\n dr_pbc = np.array( [r_x,r_y,r_z] )\n\n return dr_pbc",
"def calculate_dvec_spin(self) -> Tuple['Nparray', 'Nparray']:\n return self._calculate_dvec_spin_with_coeff(self.coeff)",
"def calc_dist(self, neighboring_pos):\n vec = np.array([i[1] - i[0] for i in zip(self.pos, neighboring_pos)])\n dist = np.linalg.norm(vec)\n return vec, dist",
"def meshup2d(self, ind='ij'):\r\n\r\n xv, yv, _ = self.vec()\r\n x_reg, y_reg = np.meshgrid(xv, yv, indexing=ind)\r\n\r\n return x_reg, y_reg",
"def calculate_dop(points, iscore=np.array([0])):\n N = points.shape[0]\n if iscore.size == 1:\n iscore = np.ones(N)\n row_norm = np.linalg.norm(points, axis=1)\n coord_norm = np.broadcast_to(np.atleast_2d(row_norm).T, [N, 3])\n unit_vect = points/coord_norm\n G_norm = np.hstack((unit_vect, np.ones([N, 1])))\n G_dash = np.atleast_2d(iscore).T*G_norm\n H_dash = np.linalg.inv(np.matmul(G_dash.T, G_dash))\n IDOP = np.sqrt(np.sum(np.diag(H_dash)))\n return IDOP, np.sum(iscore)",
"def vec_coords(label_coords, LAMBDA=1, spacing=1):\n\n #LAMBDA = 1\n #SPACING = 8\n SPACING = spacing\n \n coords_pial = np.array(label_coords[0])\n coords_gwb = np.array(label_coords[6]) #[::SPACING]\n\n\n ##### Normal Vector Pial\n #derivatives and velocity\n x_der = np.gradient(coords_pial[:,0])\n y_der = np.gradient(coords_pial[:,1]) #col slicing, R, np.array, [:,0]\n velo = np.array([[x_der[i], y_der[i]] for i in range(x_der.size)])\n\n #displacement, tangent\n displ = np.sqrt( x_der * x_der + y_der * y_der ) #speed, time\n tang = np.array([1/displ] *2 ).transpose() * velo\n\n #outward point surface normal, from tang flip, make first neg, opv\n pial_normal = [ [y*-1, x] for x, y in zip(tang[:,0], tang[:,1]) ]\n\n\n ##### Normal Vector GWB\n #derivatives and velocity\n x_der = np.gradient(coords_gwb[:,0])\n y_der = np.gradient(coords_gwb[:,1]) \n velo = np.array([[x_der[i], y_der[i]] for i in range(x_der.size)])\n\n #displacement, tangent\n displ = np.sqrt( x_der * x_der + y_der * y_der ) \n tang = np.array([1/displ] *2 ).transpose() * velo\n\n #outward point surface normal, owv\n gwb_normal = [ [y*-1, x] for x, y in zip(tang[:,0], tang[:,1]) ]\n\n\n\n plot_coords_lst = []\n used_energy_lst = []\n ##### FIND ENERGY\n # for each coord on the pial surface, x\n for x in range(len(coords_pial)):\n pial = coords_pial[x]\n \n #find vector pial to gwb, unit length, tv\n if x == 0:\n min_energy = []\n normal_term_lst = []\n vec_dist_lst = []\n parallel_term_lst = []\n vec_dist_lst = []\n for v in range(len(coords_gwb)):\n #find vector distance from pial to gwb\n gwb = coords_gwb[v]\n vec_pial_gwb = np.array(gwb) - np.array(pial)\n vec_mag = np.array(vec_pial_gwb[0]**2 + vec_pial_gwb[1]**2)\n unit_vec_dist = vec_pial_gwb/vec_mag\n vec_dist_lst.append(unit_vec_dist)\n\n #find dot product for tv and owhite, tv and opial\n dot_prod1 = np.dot(vec_dist_lst[v], gwb_normal[v])\n dot_prod2 = np.dot(vec_dist_lst[v], pial_normal[x])\n\n #normal term for each v\n normal_term_v = (1 - np.abs(dot_prod1)) + (1 - np.abs(dot_prod2))\n normal_term_lst.append(normal_term_v)\n\n #parallel term for each v \n # if x == 0:\n \n #find dot product, using self distance\n dot_prod3 = np.dot(vec_dist_lst[v], vec_dist_lst[v])\n parallel_term_v = (1 - np.abs(dot_prod3))\n parallel_term_lst.append(parallel_term_v)\n \n #energy, no summation\n ind_energy = list(enumerate(np.array([((1-LAMBDA)*n) + (LAMBDA*p) for n, p in \\\n zip(normal_term_lst, parallel_term_lst)]).T))\n \n #find local minima energy\n for i in range(len(ind_energy)):\n curr = ind_energy[i]\n fut = ind_energy[i+1]\n if fut[1] > curr[1]:\n min_energy.append(curr)\n used_energy_lst.append(curr)\n break\n\n # append coordinates to plot straight vector from pial to gwb, min energy\n gwb_idx = min_energy.pop()[0]\n # gwb_idx = min_energy[-1][0]\n plot_coords_lst.append([pial, list(coords_gwb[gwb_idx])])\n\n elif x > 0:\n min_energy = []\n normal_term_lst = []\n vec_dist_lst = []\n parallel_term_lst = []\n vec_dist_lst = []\n \n \n # used_start = int(used_energy_lst[-1][0])+20\n used_start = used_energy_lst[-1][0]\n\n for v in list( range(used_start, len(coords_gwb)-1) ):\n #find vector distance from pial to gwb\n gwb = coords_gwb[v]\n vec_pial_gwb = np.array(gwb) - np.array(pial)\n vec_mag = np.array(vec_pial_gwb[0]**2 + vec_pial_gwb[1]**2)\n unit_vec_dist = vec_pial_gwb/vec_mag\n vec_dist_lst.append(unit_vec_dist)\n\n #find dot product for tv and owhite, tv and opial\n dot_prod1 = np.dot(vec_dist_lst[-1], gwb_normal[v])\n dot_prod2 = np.dot(vec_dist_lst[-1], pial_normal[x])\n\n #normal term for each v\n normal_term_v = (1 - np.abs(dot_prod1)) + (1 - np.abs(dot_prod2))\n normal_term_lst.append(normal_term_v)\n\n #parallel term for each v \n #find dot product, using neighbour vector distance\n knear_vec_dist = np.array(plot_coords_lst[-1][1]) - np.array(plot_coords_lst[-1][0])\n dot_prod3 = np.dot(vec_dist_lst[-1], knear_vec_dist)\n parallel_term_v = (1 - np.abs(dot_prod3))\n parallel_term_lst.append(parallel_term_v) \n\n #energy, no summation\n ind_energy = list( enumerate(np.array([ ((1-LAMBDA)*n) + (LAMBDA*p) for n, p in \\\n zip(normal_term_lst, parallel_term_lst)]).T, used_energy_lst[-1][0])) #v\n\n #find local minima energy, and associated coordinate\n for i in range(len(ind_energy)):\n try:\n curr = ind_energy[i]\n fut = ind_energy[i+1]\n except(IndexError):\n continue\n \n if fut[1] > curr[1]:\n min_energy.append(curr)\n used_energy_lst.append(curr)\n # print(\"curr energy = \", curr)\n break\n\n try:\n gwb_idx = min_energy.pop()[0] #+ 20 #atleast deltaX apart\n plot_coords_lst.append([pial, list(coords_gwb[gwb_idx])])\n # print(\"energy coordinates = \", list( map(list, [pial, coords_gwb[gwb_idx]])) )\n except(IndexError):\n continue\n\n\n \"\"\"\n #encourage atleast one space between each end point coordinate\n energy_idx = [i[0] for i in used_energy_lst]\n new_energy_idx = []\n energy_idx_cp = energy_idx.copy()\n\n count = 0\n same_count = 0\n # loop to remove repeat indices, makes list two short\n while count < len(energy_idx):\n energy_concat = []\n i = count\n curr = energy_idx_cp[i]\n if energy_idx_cp[i] not in new_energy_idx:\n new_energy_idx.append(curr)\n same_count = 0\n else: \n energy_idx_cp = energy_idx_cp[:i] + list((np.array(energy_idx_cp[i:]) \\\n + same_count))\n\n same_count+=1\n \n count+=1\n \"\"\"\n\n\n #encourage even space between each end point coordinate\n energy_idx = [i[0] for i in used_energy_lst]\n new_energy_idx = list(map(math.floor , np.linspace(energy_idx[0] , \\\n len(coords_gwb[energy_idx[0]: len(coords_gwb)]), num=len(energy_idx)))) \n\n # new_plot_coords_lst = [[list(i[0]), list(coords_gwb[j])] for i, j in \\\n # zip(plot_coords_lst, new_energy_idx)]\n\n new_plot_coords_lst = []\n for i, j in zip(plot_coords_lst, new_energy_idx):\n try:\n pial_gwb_plot = [list(i[0]), list(coords_gwb[j])]\n new_plot_coords_lst.append(pial_gwb_plot) \n except(IndexError):\n continue\n\n #space vectors according to SPACING var\n new_plot_coords_lst = new_plot_coords_lst[::SPACING] \n\n return(new_plot_coords_lst)",
"def neighbors(i , j) :\n ns = []\n # vector de direction\n dx = [+1, +1, 0, 1]\n dy = [0, +1, 1, -1]\n for d in range(4) :\n ns.append((i + dx[d], j + dy[d]))\n #remove neagative element\n ns = [i for i in ns if i[0] >= 0 and i[1] >= 0]\n return ns",
"def get_distvec2(self, i, j,exclude_self=True):\n leni = True\n lenj = True\n try:\n l=len(i)\n if l > 1:\n ri = np.array(i)\n else:\n leni = False\n ri = self.xyz[i]\n except:\n ri = self.xyz[i]\n try:\n l=len(j)\n if l > 1:\n rj = np.array(j)\n else:\n rj = self.xyz[j]\n except:\n rj = self.xyz[j]\n lenj = False\n if 1:\n all_rj = rj + self.images_cellvec\n all_r = all_rj - ri\n all_d = np.sqrt(np.add.reduce(all_r*all_r,1))\n d_sort = np.argsort(all_d)\n if exclude_self and (np.linalg.norm(ri-rj) <= 0.001):\n d_sort = d_sort[1:]\n closest = d_sort[0]\n closest=[closest]\n if (abs(all_d[closest[0]]-all_d[d_sort[1]]) < SMALL_DIST):\n for k in d_sort[1:]:\n if (abs(all_d[d_sort[0]]-all_d[k]) < SMALL_DIST):\n closest.append(k)\n d = all_d[closest[0]]\n r = all_r[closest[0]]\n return d, r, closest",
"def _qij_vec_inner(a: int, b: int, i: int, j: int):\n vec_dagger = _qij_vec_dagger(a, b)\n vec = _qij_vec(i, j)\n sum_result = FermionOperator()\n for idx, term in enumerate(vec):\n sum_result += term * vec_dagger[idx]\n return sum_result",
"def get_lig_dihedrals(np_xyz, lig_ndx, close_ndxs, inp):\n n_at1, n_at2 = np.sum(inp.lig1_n_per_bead), np.sum(inp.lig2_n_per_bead)\n n_core = int(len(np_xyz) - inp.lig1_num*n_at1 - inp.lig2_num*n_at2)\n core_xyz = np_xyz[:n_core]\n\n lig1_dihedrals, lig2_dihedrals = [], []\n\n if n_at1 >= 3:\n for i in range(inp.lig1_num):\n ndx0 = n_core + i*n_at1\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[0][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig1_dihedrals.append(dihedral)\n for j in range(n_at1-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig1_dihedrals.append(dihedral)\n\n if n_at2 >= 3:\n for i in range(inp.lig2_num):\n ndx0 = n_core + n_at1*inp.lig1_num + i*n_at2\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[1][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig2_dihedrals.append(dihedral)\n for j in range(n_at2-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig2_dihedrals.append(dihedral)\n\n return (lig1_dihedrals, lig2_dihedrals)",
"def _compute_ddistortion_dgnomic(self, gnomic: ARRAY_LIKE,\n radius2: float, radius4: float, radius6: float) -> np.ndarray:\n\n row = gnomic[1]\n col = gnomic[0]\n\n vector_part = ((self.k1 * radius2 + self.k2 * radius4 + self.k3 * radius6) * np.eye(2) +\n np.array([[2 * self.p1 * row + 4 * self.p2 * col, 2 * self.p1 * col],\n [2 * self.p2 * row, 4 * self.p1 * row + 2 * self.p2 * col]]))\n\n scalar_part = ((2 * self.k1 + 4 * self.k2 * radius2 + 6 * self.k3 * radius4) * np.outer(gnomic, gnomic) +\n 2 * np.outer([self.p2, self.p1], gnomic))\n\n return vector_part + scalar_part",
"def computeNodeVolumes(self):\n for i in np.arange(0,self.ni):\n for j in np.arange(0,self.nj):\n for k in np.arange(0,self.nk):\n \n V = self.dh[0]*self.dh[1]*self.dh[2]\n if (i==0 or i==self.ni-1): V*=0.5\n if (j==0 or j==self.nj-1): V*=0.5\n if (k==0 or k==self.nk-1): V*=0.5\n \n self.node_vol[i][j][k] = V",
"def multiply_ggn_unweighted(self, vector: jnp.ndarray) -> jnp.ndarray:\n pass",
"def cal_gc_ij(_i1, _i2, _j1, _j2):\n _gc_ij = 0.0\n for i in range(_i1, _i2):\n for j in range(_j1, _j2):\n _gc_ij += np.dot(R_diff_3d[i, j, :] / np.linalg.norm(R_diff_3d[i, j, :]) ** 3, dR_cross_3d[i, j, :])\n _gc_ij = _gc_ij / (4 * np.pi)\n return _gc_ij, _i1, _i2, _j1, _j2",
"def compute_mesh(nrow, ncol, nele):\n tri_index = np.zeros((nele, 3))\n for i in range(nrow-1):\n for j in range(NUM):\n if j == 0:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)\n tri_index[i*4*NUM+j*4, 2] = (i+2)\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n else:\n tri_index[i*4*NUM+j*4, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4, 1] = (i+1)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4, 2] = (i+2)+(2*j-1)*nrow\n\n tri_index[i*4*NUM+j*4+1, 0] = (i+1)+(2*j+1)*nrow\n tri_index[i*4*NUM+j*4+1, 1] = (i+2)+(2*j-1)*nrow\n tri_index[i*4*NUM+j*4+1, 2] = (i+2)+(2*j+1)*nrow\n \n tri_index[i*4*NUM+j*4+2, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+2, 1] = (i+1)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+2, 2] = (i+2)+2*(j+1)*nrow\n\n tri_index[i*4*NUM+j*4+3, 0] = (i+1)+2*j*nrow\n tri_index[i*4*NUM+j*4+3, 1] = (i+2)+2*(j+1)*nrow\n tri_index[i*4*NUM+j*4+3, 2] = (i+2)+2*j*nrow\n return tri_index",
"def proVec(*args):\r\n resultado = []\r\n i,j,k = (args[0][1] * args[1][2]) - (args[0][2] * args[1][1]) , ((args[0][0] * args[1][2]) - (args[0][2] * args[1][0])) * (-1) , (args[0][0] * args[1][1]) - (args[0][1] * args[1][0])\r\n resultado.append(i)\r\n resultado.append(j)\r\n resultado.append(k)\r\n return resultado"
] | [
"0.5644964",
"0.5616432",
"0.55806136",
"0.555067",
"0.5532535",
"0.55313903",
"0.55079514",
"0.54220074",
"0.5410216",
"0.5404453",
"0.5369077",
"0.5359384",
"0.535771",
"0.53471506",
"0.533369",
"0.5333605",
"0.531024",
"0.52742636",
"0.52707136",
"0.5268251",
"0.5246245",
"0.5233908",
"0.5229182",
"0.5227153",
"0.52140296",
"0.52107304",
"0.5192087",
"0.51668626",
"0.51595306",
"0.5156015"
] | 0.59597963 | 0 |
Sets the group of a given user. | def set_user_group(self, user_id, group_no):
user_sn = self.id_to_sn(user_id)
grp_chg = bytearray(struct.pack('<I', user_sn)+bytes([group_no]))
self.send_command(cmd=DEFS.CMD_USERGRP_WRQ, data=grp_chg)
self.recv_reply()
self.refresh_data() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setGroups(self, user):\n self.grouplist.setGroups(user)",
"def set_group(self, group):\n self._group = group",
"def set_group(self, group: str) -> None:\n self.group = group",
"def set_group(self, address, group):\n self.groups[address] = group",
"def set_group(self, group):\n # Implemented from template for osid.resource.ResourceForm.set_group_template\n if self.get_group_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_boolean(group):\n raise errors.InvalidArgument()\n self._my_map['group'] = group",
"def add_user_to_group(user, group):\n Command.run(['usermod', '-a', '-G', user, group])",
"def set_group(self, id_: str, player: str, group: list):\n self._groups[id_] = {\n 'player': player,\n 'group': group\n }",
"def user_groups(self, user_groups):\n\n self._user_groups = user_groups",
"def setGroups(self, user):\n groups = gui.getGroups()\n usergroups = gui.getUserGroups(user)\n uid, gid = gui.getUserInfo(user)\n self.gidnm = grp.getgrgid(gid)[0]\n self.liststore.clear()\n for g in groups:\n gn = grp.getgrnam(g)[2]\n if (g == self.gidnm):\n self.liststore.append([g, True, False, gn])\n else:\n enable = (user != 'root') and (g not in ('root', 'bin',\n 'daemon', 'sys', 'adm'))\n self.liststore.append([g, g in usergroups, enable, gn])",
"def setGroup(self, group):\n\t\tself.config.GROUP = group",
"def addUserToGroup(self, user, group):\n return self.pm_getUserManager().addUserToGroup(self._unbox(user), self._unbox(group))",
"def set_group(self, bot, update, args):\n username = str(update.message.from_user['username'])\n chat_id = str(update.message.from_user['id'])\n\n try:\n group_name = self.format_group(str(args[0]))\n\n if self.is_group(group_name):\n self.user_db.add_new_user(username, group_name, chat_id)\n bot.send_message(update.message.chat_id,\n 'Расписание для группы *{}* успешно установлено!\\n'\n '/today\\n'\n '/tomorrow\\n'\n '/week\\n'\n '/nextweek\\n'\n '/full\\n'\n '/timetable\\n'\n '/keyboard\\n'.format(group_name),\n parse_mode='Markdown')\n else:\n raise Exception(\"Group is not exists.\")\n except (Exception, IndexError):\n bot.send_message(update.message.chat_id,\n 'Группы с таким именем не существует, проверьте корректность введенного имени.',\n parse_mode='Markdown')",
"def setgid():\n config = Config()\n try:\n gid = grp.getgrnam(config.group).gr_gid\n os.setgid(gid)\n except KeyError:\n logger.error(\"Group '%s' does not exist.\", config.group)\n raise SystemExit(os.EX_USAGE)\n except PermissionError:\n logger.error(\n \"You do not have permission to switch to group '%s'.\", config.group\n )\n raise SystemExit(os.EX_NOPERM)",
"def add_to_group(user: User, group: Group) -> Result:\n if user.pw_name in group.gr_mem:\n return Result(State.unchanged)\n command([\"/usr/sbin/addgroup\", user.pw_name, group.gr_name])\n group.gr_mem.append(user.pw_name)\n return Result(State.success)",
"def group(self, group):\n self._group = group",
"def group(self, group):\n\n self._group = group",
"def group(self, group):\n\n self._group = group",
"def group(self, group):\n\n self._group = group",
"def set_user(self, user):\n self._user = user",
"def set(self, name_group, key, value):\n self.psettings.beginGroup(name_group)\n self.psettings.setValue(key, value)\n self.closeGroup()",
"def set_user(self, user):\r\n self.user = user",
"def setGatingGroup(self, channel, group, unitCode=0):\n resp = self.XAPCommand('GRPSEL', channel, group, unitCode=unitCode)\n return resp",
"def AddMemberToGroup(group_id,user_id):\r\n Group.AddMemberToGroup(group_id,user_id)",
"def register(self, user):\n if not self.get():\n user_node = user.get() # transform user object to user node object\n usergroup_node = Node(\"Usergroup\",\n groupname=self.groupname,\n id=uuid4().hex)\n graph.create(usergroup_node)\n ownership = Relationship(user_node, 'owns', usergroup_node)\n membership = Relationship(user_node, 'in', usergroup_node)\n graph.create(ownership)\n graph.create(membership)\n self.usergroup_node = usergroup_node\n self.id = usergroup_node['id']\n return usergroup_node\n return self",
"def set_group(group_name):\n group_config = env.groups[group_name]\n set_role_defs(\n web=group_config['servers'][WEB_ROLE],\n db=group_config['servers'][DB_ROLE],\n )\n env.branch = group_config['branch']\n env.subdomain = group_config.get('subdomain', 'www')",
"def set_group_owners(self, group, users):\n \n # Ensure that if we are removing an existing owner, he remains a member\n for u in group.owners:\n if u not in users:\n group.add_member(u)\n self._flush_user_data_caches(u)\n\n group.set_owner(users)\n for u in users:\n self._flush_user_data_caches(u)",
"def assign_TestUserGroup(test_case, # type: AnyMagpieTestCaseType\n override_user_name=null, # type: Optional[Str]\n override_group_name=null, # type: Optional[Str]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> None\n app_or_url = get_app_or_url(test_case)\n usr_name = override_user_name if override_user_name is not null else test_case.test_user_name\n grp_name = override_group_name if override_group_name is not null else test_case.test_group_name\n path = \"/groups/{grp}/users\".format(grp=grp_name)\n resp = test_request(app_or_url, \"GET\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n body = check_response_basic_info(resp, 200, expected_method=\"GET\")\n if usr_name not in body[\"user_names\"]:\n path = \"/users/{usr}/groups\".format(usr=usr_name)\n data = {\"group_name\": grp_name}\n resp = test_request(app_or_url, \"POST\", path, json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n check_response_basic_info(resp, 201, expected_method=\"POST\")\n TestSetup.check_UserGroupMembership(test_case, override_user_name=usr_name, override_group_name=grp_name,\n override_headers=override_headers, override_cookies=override_cookies)",
"def set_groups(self, principal, groups):\n self._check_groups(groups)\n self._groups[principal] = tuple(groups)",
"def set_group_name(self, name):\n self.groupname = name",
"def set_user(self, user: User):\n self.__user = user"
] | [
"0.7851728",
"0.7180732",
"0.70697165",
"0.6988296",
"0.6928173",
"0.69139653",
"0.67774695",
"0.6773321",
"0.67713976",
"0.6747184",
"0.66019046",
"0.65300983",
"0.65245444",
"0.6508961",
"0.64286745",
"0.6407589",
"0.6407589",
"0.6407589",
"0.63585025",
"0.6344296",
"0.6303067",
"0.6250056",
"0.62493557",
"0.6204448",
"0.6183275",
"0.6178613",
"0.6170183",
"0.61679375",
"0.61425805",
"0.6111284"
] | 0.7689402 | 1 |
Set/create an unlock combination. | def set_unlock_comb(self, comb_no, ulg_comb):
wreq_ulg = bytearray([0x00]*8)
wreq_ulg[0] = comb_no
wreq_ulg[6:8] = struct.pack('<H', len(ulg_comb))
for n in range(len(ulg_comb)):
wreq_ulg[1+n] = ulg_comb[n]
self.send_command(cmd=DEFS.CMD_ULG_WRQ, data=wreq_ulg)
self.recv_reply()
self.refresh_data() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lock_unlock(self):\n my_thing_id = str(uuid.uuid4())\n dweepy.lock(my_thing_id, test_lock, test_key)\n dweepy.unlock(my_thing_id, test_key)",
"def unlock(*args):",
"def unlock(self, volume, _cfg=None) :\n name_or_id = self.get_name_or_id(volume)\n CmdList=[_cfg.binaries[\"vos\"], \"unlock\",\"-id\" ,\"%s\" % name_or_id, \"-cell\", \"%s\" % _cfg.cell]\n return CmdList,PM.unlock",
"def f_unlock(self):\n self._locked = False",
"def unlock_password(self, unlock_password):\n\n self._unlock_password = unlock_password",
"def testUnlock(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res, msg=\"Locking file %s\" % t.code)\n ops = c.use_obj(fh)\n ops += [c.locku_op(READ_LT, 1, res.lockid, 0, 0xffffffffffffffff)]\n _replay(c, ops)",
"def wallet_unlock(timeout, password):\r\n return make_request({\"method\": \"wallet_unlock\",\r\n \"params\": [timeout, password],\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 0, })",
"def before_unlock_actions(self):\n for command in self.before_unlock_commands:\n addr = command[\"address\"]\n prop = command[\"property\"]\n if len(command[\"argument\"]) == 0:\n arg = [0]\n else:\n try:\n arg = [eval(command[\"argument\"])]\n except:\n arg = [command[\"argument\"]]\n if command[\"type\"] == \"set\":\n tine.set(addr, prop, arg)\n elif command[\"type\"] == \"query\":\n tine.query(addr, prop, arg[0])",
"def set_locked(self, *args):\n return _ida_hexrays.vdui_t_set_locked(self, *args)",
"def test_unlockroom(run, mocker):\n mocked_unlock = mocker.patch('dork.cli.unlock_room')\n with open('./dork/yaml/default.yml') as file:\n # Should not call load directly\n data = yaml.safe_load(file.read())\n\n # unlock_room uses game, user_action, and direction (in order)\n game = types.Game(data)\n game.player.inventory = ['key']\n user_action = ['use key']\n direction = 'Jail hallway'\n run(dork.cli.unlock_room, game, user_action, direction)\n assert mocked_unlock.call_count == 1",
"def unlock(self, item_type):",
"def rpc_unlock(self, session, rpc, target):\n del rpc, session, target # avoid unused errors from pylint\n return",
"async def async_unlock(self, **kwargs: Any) -> None:\n if not await self._node.secure_unlock():\n raise HomeAssistantError(f\"Unable to unlock device {self._node.address}\")",
"def unlock(self):\n raise NotImplementedError",
"def create_passlocker(username, userpasslock, email):\n new_passlocker = passlocker(username, userpasslock, email)",
"def svn_client_unlock(apr_array_header_t_targets, svn_boolean_t_break_lock, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def unlock_vault():\n print()\n password_vault = [\n {\n 'type': 'password',\n 'message': 'Enter your vault password:',\n 'name': 'password',\n 'validate': NotEmpty\n }\n ]\n password_answer = prompt(password_vault, style=style)\n passwd = password_answer[\"password\"]\n v = vault.unlock(passwd)\n if v == False:\n unlock_vault()",
"async def unlock(self, product_type: ProductType, serial_no: str) -> None:\n await self._send_message_get_response(OutgoingMessage(OutgoingMessageType.unlock, serial_no=serial_no))",
"def door_unlock(self, delay):\n self.send_command(cmd=DEFS.CMD_UNLOCK, data=struct.pack('<I', delay))\n self.recv_reply()",
"def unlock(self, password):\n if self.locked:\n self._privkey = keys.decode_keystore_json(self.keystore, password)\n self.locked = False\n self.address # get address such that it stays accessible after a subsequent lock",
"def unlock_menu_cust(self):\n intro = \"Here are the options available for you to choose from\"\n option1 = \"[1] UNLOCK BY CREDENTIALS\"\n option2 = \"[2] UNLOCK BY FACIAL RECOGNITION\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')",
"def display_cust_unlock(self):\n self.clear_terminal()\n self.unlock_menu_cust()\n self.handle_selection_cust_unlock()",
"def __createLock(self):\n lockUrl = self.metaData.getLink(\"lock\")\n assert lockUrl is not None\n\n lockBody = json.dumps({\"lockIntent\" : \"lockedForEdit\"})\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n lockResponse = self._adapter.postRequest(lockUrl, header, lockBody)\n\n return lockResponse",
"def testUnlockWait(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n res = c.lock_file(t.code, fh, stateid, 20, 100)\n check(res, msg=\"Locking file %s\" % t.code)\n sleeptime = c.getLeaseTime() * 2\n env.sleep(sleeptime)\n ops = c.use_obj(fh)\n ops += [c.locku_op(READ_LT, 1, res.lockid, 0, 0xffffffffffffffff)]\n _replay(c, ops, [NFS4_OK, NFS4ERR_EXPIRED])",
"def unlock(self):\n print(\"DEPRECATED unlock\")\n return self._operations.unlock()",
"def lock(self):\n self._privkey = None\n self.locked = True",
"async def test_unlocking(hass: HomeAssistant) -> None:\n client_mock = await init_integration(hass)\n\n await hass.services.async_call(\n LOCK_DOMAIN,\n SERVICE_UNLOCK,\n {ATTR_ENTITY_ID: \"lock.my_mazda3_lock\"},\n blocking=True,\n )\n await hass.async_block_till_done()\n\n client_mock.unlock_doors.assert_called_once()",
"def unlock_attributes(pynode, attr_name_list = None):\n if attr_name_list is None:\n attr_name_list = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n\n _do_attributes_key_lock_hide(pynode, attr_name_list, lock = False)",
"def unlock(self, lock_handle):\n assert isinstance(lock_handle, tuple)\n lock_key, schluessel = lock_handle\n sqldict = self.tabledict\n sqldict.update({'lock_key': lock_key, 'schluessel': schluessel})\n if self.update(\"%(dffeld)s=''\" % sqldict,\n \"%(dffeld)s='%(lock_key)s' AND %(schluesselfeld)s='%(schluessel)s'\" % sqldict) != 1:\n raise RuntimeError(\"SQL UPDATE failed.\")",
"def create_modad_lock(self):\n\n Logger.create_lock_file()\n\n with open(\"modad.lock\", \"w\") as file:\n file.write(json.dumps(self.commit_hashes))"
] | [
"0.6183068",
"0.6134914",
"0.59802043",
"0.5789322",
"0.5731215",
"0.56940514",
"0.5689925",
"0.5592284",
"0.5524211",
"0.54851586",
"0.54664135",
"0.546573",
"0.54345644",
"0.5430829",
"0.5415139",
"0.5396441",
"0.53864837",
"0.5340268",
"0.53338486",
"0.5332258",
"0.52412295",
"0.52244157",
"0.5202389",
"0.51774114",
"0.5169177",
"0.5167089",
"0.5090967",
"0.50807667",
"0.5041344",
"0.5038947"
] | 0.6149357 | 1 |
Unlocks the door for a given delay. | def door_unlock(self, delay):
self.send_command(cmd=DEFS.CMD_UNLOCK, data=struct.pack('<I', delay))
self.recv_reply() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unlock_door_interlock(self):\n if self.detector_distance_hwobj.getPosition() < 340:\n self.detector_distance_hwobj.move(500)\n gevent.sleep(1)\n\n if not self.use_door_interlock:\n logging.getLogger().info('Door interlock is disabled')\n return\n\n if self.door_interlock_state:\n gevent.spawn(self.unlock_doors_thread)\n else:\n logging.getLogger().info('Door is Interlocked')",
"def wallet_unlock(timeout, password):\r\n return make_request({\"method\": \"wallet_unlock\",\r\n \"params\": [timeout, password],\r\n \"jsonrpc\": \"2.0\",\r\n \"id\": 0, })",
"def unlock_doors_thread(self):\n if self.door_interlock_breakabled:\n try:\n self.before_unlock_actions()\n except:\n pass\n if self.cmd_break_interlock is None:\n self.cmd_break_interlock = self.getCommandObject('cmdBreakInterlock')\n self.cmd_break_interlock()\n else:\n msg = \"Door Interlock cannot be broken at the moment \" + \\\n \"please check its status and try again.\"\n logging.getLogger(\"user_level_log\").error(msg)",
"def delay_off(self, minutes: int):\n return self.send(\"delay_off\", [minutes])",
"def unlock(self):\n\n self.wait = False",
"def put_down(robot):\n log(robot, \"Putting Down Token.\")\n close_arms(robot)\n\n extend_arms(robot)\n sleep(0.1)\n\n lower_arms(robot)\n sleep(0.3)\n\n open_arms(robot)\n sleep(0.2)",
"def set_delay(delay):\r\n inst.write(\"PULS:DEL %f\" %(delay))",
"async def async_unlock(self, **kwargs: Any) -> None:\n if not await self._node.secure_unlock():\n raise HomeAssistantError(f\"Unable to unlock device {self._node.address}\")",
"async def async_turn_off(self) -> None:\n self._zone.power = False",
"def power_off(timeout: int = 0) -> None:",
"def stop_alarm(self):\n self.out_power.pulse()",
"async def async_turn_off(self, **kwargs) -> None:\n await self._zone.set_mode(\"timer\")",
"def _DelayedStop(self, delay):\n blue.synchro.SleepSim(delay)\n if self.playerEffect is not None:\n self.RemoveFromScene(self.playerEffect)\n self.playerEffect = None\n if self.gfx is not None:\n ShipEffect.Stop(self)",
"def unlock(self, with_spell=False):\n\n if not with_spell:\n print(\"You need to cast a spell first.\")\n else:\n print(with_spell)\n super(DoorNeedingSpell, self).unlock()",
"def buzz(self, timeout: float, rate: float) -> None:\n # FIXME - the close() method should stop any buzz in progress\n\n output_pin = settings.DOOR_LOCK[\"CONFIG\"][\"output_pin\"]\n\n time_end = time.time() + timeout\n while time.time() < time_end:\n GPIO.output(output_pin, True)\n time.sleep(rate)\n GPIO.output(output_pin, False)\n time.sleep(rate)",
"def close_door(self):\n\t\tio.set_bit(OUTPUT.DOOR_OPEN, 0)\n\t\tself.should_drive()",
"def f_unlock(self):\n self._locked = False",
"async def leave(self, room_id, *, delay=0, lifespan=math.inf):\n assert type(room_id) is str, \"Parameter room_id should be a string.\"\n await self.add_output(\n \"{}|/leave\".format(room_id), delay=delay, lifespan=lifespan\n )",
"def unlock_clock(self):\n self.sem.release()",
"def _api_release_lock_with_timer(self):\n\n if self._apt_timer.is_alive():\n self._apt_timer.cancel()\n\n if self._api_lock.locked():\n self._api_lock.release()",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self.stop_watering()",
"def unlock(self):\n self.mainloop().unlock()",
"async def async_turn_off(self) -> None:\n await self._device.enter_standby()",
"def release_lock():\r\n get_lock.n_lock -= 1\r\n assert get_lock.n_lock >= 0\r\n # Only really release lock once all lock requests have ended.\r\n if get_lock.lock_is_enabled and get_lock.n_lock == 0:\r\n get_lock.start_time = None\r\n get_lock.unlocker.unlock()",
"async def async_unlock(hass, cluster, entity_id):\n from zigpy.zcl.foundation import Status\n with patch(\n 'zigpy.zcl.Cluster.request',\n return_value=mock_coro([Status.SUCCESS, ])):\n # lock via UI\n await hass.services.async_call(DOMAIN, 'unlock', {\n 'entity_id': entity_id\n }, blocking=True)\n assert cluster.request.call_count == 1\n assert cluster.request.call_args[0][0] is False\n assert cluster.request.call_args[0][1] == UNLOCK_DOOR",
"def force_unlock():\r\n global timeout_before_override\r\n timeout_backup = timeout_before_override\r\n timeout_before_override = 0\r\n try:\r\n get_lock(min_wait=0, max_wait=0.001)\r\n release_lock()\r\n finally:\r\n timeout_before_override = timeout_backup",
"def unlock_gate(self):\n self.fsm_gate.set()",
"async def async_turn_off(self, **kwargs):\n self._wrap_device.device.set_duct_zone(self._zone, False)",
"def _stop_after(delay):\n timer = CFRunLoopTimerCreate(\n None, # allocator\n CFAbsoluteTimeGetCurrent() + delay, # fireDate\n 0, # interval\n 0, # flags\n 0, # order\n _c_stop_callback,\n None,\n )\n CFRunLoopAddTimer(\n CFRunLoopGetMain(),\n timer,\n kCFRunLoopCommonModes,\n )",
"def unlock(*args):"
] | [
"0.66657245",
"0.62961185",
"0.61680007",
"0.5932932",
"0.5875496",
"0.5805225",
"0.57764405",
"0.5750556",
"0.56625706",
"0.56502044",
"0.56499124",
"0.5626006",
"0.5624048",
"0.5610247",
"0.55794007",
"0.556739",
"0.55218613",
"0.55032533",
"0.5485194",
"0.54710114",
"0.5449367",
"0.5448718",
"0.5448019",
"0.5395904",
"0.53866035",
"0.5357449",
"0.53437275",
"0.53280866",
"0.5313165",
"0.5306404"
] | 0.8951721 | 0 |
Find sections in the given xml document which have a match for the name attribute | def _get_named_arrs_from_xml(self, xml, name, is_expected=True):
try:
doc = minidom.parseString(xml)
except ExpatError, e:
raise ProviderContentMalformedError("Content parse provider supplied XML document")
arrs = doc.getElementsByTagName('arr')
matching_arrs = [elem for elem in arrs if elem.attributes['name'].value == name]
if (is_expected and (len(matching_arrs) == 0)):
raise ProviderContentMalformedError("Did not find expected number of matching arr blocks")
return matching_arrs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scan_section(name, o):\n\n for n in dir(o):\n scan(name + n, getattr(o, n))",
"def find(self, elements, name=None):\n\n for element in elements:\n if element[\"name\"] == name or element[\"cm\"][\"name\"] == name:\n return element\n return None",
"def getChildrenByName(rootNode, name):\n return [e for e in rootNode.childNodes if e.localName == name]",
"def getChildrenByNameNS(rootNode, ns, name):\n return [e for e in rootNode.childNodes if e.localName == name and e.namespaceURI == ns]",
"def find_all_by_name ( self, name, **kw ):\n return self.find_all (\n lambda s, n: s.name == n, c_args=( name, ), **kw\n )",
"def find_lost_section(amdpar_xml):\n reg_text = amdpar_xml.getparent()\n reg_text_siblings = [s for s in reg_text.itersiblings()\n if s.tag == 'REGTEXT']\n if len(reg_text_siblings) > 0:\n candidate_reg_text = reg_text_siblings[0]\n amdpars = [a for a in candidate_reg_text if a.tag == 'AMDPAR']\n if len(amdpars) == 0:\n # Only do this if there are not AMDPARS\n for c in candidate_reg_text:\n if c.tag == 'SECTION':\n return c",
"def find_section(amdpar_xml):\n siblings = [s for s in amdpar_xml.itersiblings()]\n\n if len(siblings) == 0:\n return find_lost_section(amdpar_xml)\n\n for sibling in siblings:\n if sibling.tag == 'SECTION':\n return sibling\n\n paragraphs = [s for s in siblings if s.tag == 'P']\n if len(paragraphs) > 0:\n return fix_section_node(paragraphs, amdpar_xml)",
"def get(node: md.Document, name: str) -> mc.Nodelist:\n return node.getElementsByTagName(name)",
"def node_find_by_name( fdt, node_name, starting_node = 0, multi_match=False ):\n\n matching_nodes = []\n matching_node = None\n\n search_active = False\n if starting_node == \"/\" or starting_node == 0:\n search_active = True\n\n for node in fdt.node_iter():\n if not search_active:\n if node.path == starting_node:\n search_active = True\n\n if search_active:\n if node.name == node_name:\n if not matching_nodes:\n matching_node = node\n matching_nodes.append( node )\n\n return matching_node, matching_nodes",
"def get_node_by_name(self, name):\r\n root = self.get_xml_root()\r\n return root.find(name)",
"def search(self, name):\n\t\tmatching_contacts = []\n\t\tfor contact in self:\n\t\t\tif name in contact.name:\n\t\t\t\tmatching_contacts.append(contact)\t\n\t\treturn matching_contacts",
"def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())",
"def get_section_by_name(self, section_name):\n sections = self.unravel_sections(self.get_sections())\n for section in sections:\n if section['name'] == section_name:\n return section['groupId'], section\n return None, None",
"def test_search_subnode_attribute(self):\n\n lista = []\n for (_, value) in self.parser.search(self.xml, 'node@id'):\n lista.append(value)\n self.assertEqual(lista, ['1', '2', '3'])",
"def cxfind(self, namepart):\n names = [name for name in self if namepart in name]\n names.sort()\n return names",
"def contains(name):",
"def _extract_node_by_name(self, node_name: str) -> None:\n for node in self.soup(node_name):\n node.extract()",
"def name_startswith(self, name):\n matches = [\n entry\n for entry in self\n if entry is not None and entry.name.startswith(name)\n ]\n return matches",
"def find(self, name=None, pattern=None, extension=None, **kwargs):\n elements = []\n context = getContext()\n for path in self.findPaths(name=name, pattern=pattern, extension=extension):\n if os.path.exists(path):\n e = elementFromPath(path, context=context, name=name, **kwargs)\n if e is not None:\n elements.append(e)\n return elements",
"def getElements(self, name=\"\"):\n\n if not name:\n return self.children\n else:\n elements = []\n for element in self.children:\n if element.name == name:\n elements.append(element)\n return elements",
"def get_section_with_name(section_names, doc):\n text = ''\n for section in section_names:\n try: \n text = ' '.join([text, doc.sections[section]['text']])\n except:\n pass\n \n try:\n text = ' '.join([text, doc.sections['narrative'][section]])\n except:\n pass\n \n try:\n text = ' '.join([text, doc.sections['findings'][section]])\n except:\n pass\n \n try:\n text = ' '.join([text, doc.sections[section]]) # some general sections are sections themselves\n except:\n pass\n \n return ' '.join(text.split())",
"def read_xml(self, xml_name):\n\t\tlogging.info('INICIANDO: leitura do arquivo xml de consultas')\n\t\tdoc = xml.dom.minidom.parse(xml_name)\n\t\tquery_number = doc.getElementsByTagName(\"QueryNumber\")\n\t\tquery_text = doc.getElementsByTagName(\"QueryText\")\n\t\tresults = doc.getElementsByTagName(\"Results\")\n\t\trecords = doc.getElementsByTagName(\"Records\")\n\t\tcontent = {'QueryNumber':[], 'QueryText':[], 'Results':[], 'Records':[]}\n\t\tfor nquery in query_number: content['QueryNumber'].append(nquery.firstChild.nodeValue)\n\t\tfor tquery in query_text: content['QueryText'].append(tquery.firstChild.nodeValue)\n\t\tfor res in results: content['Results'].append(res.firstChild.nodeValue)\n\t\tfor rec in records: \n\t\t\tdocs_results = []\n\t\t\tfor item in rec.getElementsByTagName(\"Item\"):\n\t\t\t\tscore = self.define_score(item.getAttribute(\"score\"))\n\t\t\t\tdocs_results.append([int(item.firstChild.nodeValue), score])\n\t\t\tcontent['Records'].append(docs_results)\n\t\tlogging.info('FINALIZADO: leitura do arquivo xml de consultas')\n\t\treturn content",
"def searchGroups(self, xml, sources):\r\n for element in xml.getchildren():\r\n if element.tag == 'group':\r\n self.searchGroups(element, sources)\r\n elif element.tag == 'file':\r\n if not str(element.name).endswith('.s'):\r\n s = str(element.name)\r\n if os.path.sep not in s:\r\n if os.path.sep == '\\\\':\r\n s = s.replace('/', '\\\\')\r\n elif os.path.sep == '/':\r\n s = s.replace('\\\\', '/')\r\n\r\n sources.append(s.replace('$PROJ_DIR$'+os.path.sep+'..', self.path))",
"def find_by_name ( self, name, **kw ):\n try:\n return next ( self.find_all_by_name ( name, **kw ) )\n except StopIteration:\n return None",
"def search_service(self, name_filter):\n rs=search_service(name_filter)\n for el in rs:\n print(el)",
"def get_attrs_with_name(self, name):\n return self.get_matches([lambda attr: attr.name == name])",
"def find_node(self, name):\n for node in self.get_children():\n if node.read_name() == name:\n break\n else:\n node = None\n return node",
"def get_by_tag(self, name):\n return [XmlWrapper(i) for i in self.node.getElementsByTagName(name)]",
"def get_by_tag(self, name):\n return [XmlWrapper(i) for i in self.node.getElementsByTagName(name)]",
"def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n # sprint(PrettyFormatAny.form(self.m_root_xml, 'A3-01-A - Entire Xml'))\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n # print(PrettyFormatAny.form(self.m_xml.controller_sect, 'A3-01-B - All Controllers Xml'))\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')\n # print(PrettyFormatAny.form(self.m_xml.controller, 'A3-01-C - First Controller Xml'))"
] | [
"0.59783506",
"0.587904",
"0.5748297",
"0.56949323",
"0.5501737",
"0.5487459",
"0.547802",
"0.5442949",
"0.5428993",
"0.5389648",
"0.5362752",
"0.5323829",
"0.53208655",
"0.5281399",
"0.5272895",
"0.52705747",
"0.52676386",
"0.5186042",
"0.51504034",
"0.5145324",
"0.5138944",
"0.5106924",
"0.5046756",
"0.5040875",
"0.5024798",
"0.50225693",
"0.5022551",
"0.50213593",
"0.50213593",
"0.49788997"
] | 0.63427174 | 0 |
Places player's guess on board | def placeGuess(player, xPos, yPos):
# Print hit ship message if other player's board
# has a ship at that position
if(player.otherPlayer.board[yPos][xPos] != '~'):
player.guess[yPos][xPos] = '#'
print("You've hit a ship!\n")
# Print miss message if no ship at that position
else:
player.guess[yPos][xPos] = 'm'
print("You missed!\n")
for i in range(0, 2):
# Set enemy to be the other player
enemy = player.otherPlayer
ship = enemy.ships[i]
# If ship is already sunk, go to next iteration
if(ship.sunk == 1):
continue
bad = 0
sX = ship.startX
sY = ship.startY
ori = ship.orientation
# Check if all of ship in horizontal position is all hit
if(ori == 1):
for y in range(sY, sY + ship.shipLen):
if(player.guess[y][sX] != enemy.board[y][sX]):
bad = 1
break
# Check if all of ship in vertical position is all hit
else:
for x in range(sX, sX + ship.shipLen):
if(player.guess[sY][x] != enemy.board[sY][x]):
bad = 1
break
# If entire ship is hit, sink ship, print ship sunk message
if(bad == 0):
ship.sunk = 1
print("You sank a " + ship.name + "\n")
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _guess(self, ctx):\n reply = '\\n'\n for i, entry in enumerate(db.get_leaderboard(\n ctx.message.server.id,\n 'guess-leaderboard')):\n for key, value in entry.items():\n if key == \"discord_id\":\n name = self.get_name(ctx, value)\n elif key == 'date':\n date = value\n else:\n score = value\n reply += '{}. {} - {} ({})\\n'.format(\n i+1,\n score,\n name,\n datetime.datetime.fromtimestamp(\n int(date)).strftime('%d-%m-%Y')\n )\n await self.bot.say(reply)",
"def guess(self, row, col) -> Tuple[int, Optional[ship.Ship]]:\n my_ship: ship.Ship = self._board_matrix[row][col]\n\n # if my_ship is None the guess is a miss, otherwise its a hit\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.guess, just copy the code over\n\n # --------- END YOUR CODE ----------",
"def prompt_player(self):\n board = self.draw_board()\n print board\n self.player_moves(self.board_values)",
"def guess_atom(self, row, col):\n if not self.valid_guess(row, col):\n return False\n # if row,col in guess list, tells players and returns True\n if self._board[row][col] != \" \":\n print(\"You've already guessed that location!\")\n return True\n # if match found, deducts 1 from atoms list\n if (row, col) in self._a_locations:\n self._atoms -= 1\n # if all atoms guessed, changes game state to win and prints it\n if self._atoms == 0:\n self.change_state(\"WON\")\n print(self.get_game_state())\n # adds guess location to list of guesses made and returns True\n self._board[row][col] = \"A\"\n return True\n # deducts 5 from score if no match and checks if you lost\n self._score -= 5\n if self._score <= 0:\n self.change_state(\"LOST\")\n print(self.get_game_state())\n self._board[row][col] = \"X\"\n return False",
"async def guess(self, ctx):\n server = ctx.message.server.id\n current_streak = 0\n while True:\n if current_streak > 0:\n await self.bot.say('Your current streak is {}'.format(current_streak))\n reply = guessing_game(server, ctx)\n await self.bot.send_file(\n ctx.message.channel,\n 'images/lineup/game_postgame.png',\n content='Guess a hero {} played that game. {}'.format(\n reply[1], reply[2])\n )\n\n def guess_check(m):\n return ctx.message.content\n\n guess = await self.bot.wait_for_message(\n timeout=30.0,\n check=guess_check,\n author=ctx.message.author,\n channel=ctx.message.channel\n )\n answer = reply[0]\n if guess is None:\n fmt = 'Sorry, you took too long. It was {}.\\nGame over. Your score: {}.'\n await self.bot.send_message(\n ctx.message.channel,\n fmt.format(answer, current_streak)\n )\n if current_streak > 0:\n db.add_leaderboard_guess(\n ctx.message.server.id,\n ctx.message.author.id,\n current_streak,\n 'guess-leaderboard'\n )\n break\n if guess.content.lower() == answer.lower():\n\n await self.bot.say('Yay! You are right.')\n current_streak += 1\n else:\n await self.bot.say(\n 'Nope. It is actually {}.\\n Game over. Your score: {}'.format(answer, current_streak))\n if current_streak > 0:\n db.add_leaderboard_guess(\n ctx.message.server.id,\n ctx.message.author.id,\n current_streak,\n 'guess-leaderboard'\n )\n break",
"def get_guess(self):\n guess = self.player.higher_or_lower",
"def take_turn(self, opponent):\n\n # --------- BEGIN YOUR CODE ----------\n\n # 1.) Guess a random space that has not been guessed (or be more clever!)\n\n # Steps 2-4 are the same as Human.take_turn\n\n # 2.) Call opponent.guess() to check whether the guess is a hit or miss\n\n # 3.) Update my_hits, my_misses, and sunk_ships accordingly\n\n # 4.) If the sunk_ships array has 5 ships in it set self.complete to True\n\n # --------- END YOUR CODE ----------\n\n # enforce a short delay to make the computer appear to \"think\" about its guess\n time.sleep(0.5)",
"def guess(mqtt_client, number_to_guess_entry):\n # TODO: 5. Uncomment the line of code below to make guesses with EV3.\n mqtt_client.send_message(\"guess\", [int(number_to_guess_entry.get())])\n number_to_guess_entry.delete(0, 'end')\n # Note: You can play the game with only TO DO 5 complete, but it will be easier to solve if you do TO DO 6 as well.",
"def run_game(player_board, user_guess, computer_board, computer_guess):\n player_turn = 0 # Ensures player goes first\n computer_turn = 1 # Computer can only go once player score is equal\n # Life counter decrements each time a ship is hit\n player_lives = 15\n computer_lives = 15\n while True:\n if player_turn < computer_turn:\n user_guess.print_board()\n column, row = player_board.attack_input()\n if user_guess.board[row][column] == GUESSED:\n print('\\nYOU HAVE ALREADY GUESSED THIS CO-ORDINATE\\n')\n elif user_guess.board[row][column] == HITSHIP:\n print('\\nYOU HAVE ALREADY HIT A SHIP IN THIS CO-ORDINATE\\n')\n elif computer_board.board[row][column] == SHIP:\n print(' ')\n print(PHASE)\n print('\\nCONGRATULATIONS, YOU HIT A SHIP!\\n')\n user_guess.board[row][column] = HITSHIP\n player_turn += 1\n user_guess.lives_counter()\n user_guess.print_board()\n computer_lives -= 1\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_lives == 0:\n print('\\nTHE COMPUTER HAS NO LIVES LEFT!')\n print('YOU WIN!')\n print(' ')\n print(PHASE)\n break\n else:\n print(' ')\n print(PHASE)\n print('\\nYOU MISSED!\\n')\n user_guess.board[row][column] = GUESSED\n player_turn += 1\n user_guess.print_board()\n print(\"COMPUTER'S TURN TO ATTACK!\")\n time.sleep(3)\n if computer_turn == player_turn:\n row, column = computer_guess.attack_input()\n if computer_guess.board[row][column] == GUESSED:\n pass\n elif computer_guess.board[row][column] == HITSHIP:\n pass\n elif player_board.board[row][column] == SHIP:\n print('THE COMPUTER HIT YOUR SHIP!\\n')\n computer_turn += 1\n player_lives -= 1\n computer_guess.column_arry.append(column)\n computer_guess.row_arry.append(row)\n computer_guess.board[row][column] = HITSHIP\n player_board.board[row][column] = HITSHIP\n player_board.lives_counter()\n player_board.print_board()\n computer_guess.attk_arry.append(0)\n time.sleep(3)\n if player_lives == 0:\n print('\\nYOU HAVE NO LIVES LEFT!')\n print('YOU LOSE!')\n print(' ')\n print(PHASE)\n break\n else:\n print('COMPUTER MISSED!\\n')\n computer_guess.board[row][column] = GUESSED\n computer_turn += 1\n player_board.print_board()\n computer_guess.attk_arry.append(1)\n computer_guess.check_miss_count()\n time.sleep(3)",
"def check_guess(self):\n if self.guess in self.guessed[8:]:\n self.entry.delete(0, END)\n return\n self.guessed += self.guess\n self.guesses.set(self.guessed)\n if self.guessed[-1] not in self.word:\n self.strikes += 1\n self.change_image()\n else:\n self.word_form()\n\n if \"_\" in self.word_underscored:\n if self.strikes == 6:\n self.word_blank.set(self.word)\n self.guesses.set(\"HANGMAN. YOU LOSE.\")\n\n if ''.join(self.word_underscored) == self.word:\n self.guesses.set(\"You WIN!\")\n self.word_blank.set(self.word)\n\n self.entry.delete(0, END)",
"def random_place(board, player):\n available = possibilities(board)\n place(board, player, random.choice(available))",
"def play_best_guess(self, game):\n\n\n # create a list of cells\n cells = [game.board[i][j]\n for i in xrange(game.rows)\n for j in xrange(game.cols)]\n\n first_cell = cells[0]\n game.reveal_cell(first_cell.row, first_cell.col)\n\n # draw updated board and pause for a second\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n\n\n total_flagged = 0\n while not game.lost_game and not game.won_game:\n\n # remember if we've made a move in the while loop\n # so we know whether to make a random move later on\n made_move = False\n\n # look through all revealed cells for any with a number of neighboring mines.\n # if the cell has the same number of unrevealed neighbors as the cell's\n # number of neighboring mines, all the unrevealed neighbors must be mines.\n revealed_numbered_cells = [c for c in cells if c.revealed and (not c.flagged) and (c.neighbors > 0)]\n while revealed_numbered_cells:\n cell = revealed_numbered_cells.pop()\n # cell may have been marked flagged after revealed_numbered_cells was compiled\n if not cell.flagged:\n neighbor_cells = ms.Minesweeper.get_neighbors(cell.row, cell.col, game.board)\n flagged_neighbors = [n for n in neighbor_cells if n.flagged]\n number_remaining_mines = cell.neighbors - len(flagged_neighbors)\n unknown_neighbors = [n for n in neighbor_cells if not n.flagged and not n.revealed]\n if number_remaining_mines > 0 and len(unknown_neighbors) == number_remaining_mines:\n # flag every neighbor\n for c in unknown_neighbors:\n if total_flagged < game.mines:\n total_flagged += 1\n game.flag_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # we may have won with the flag above so test whether we're still playing\n # before further calculations\n if not game.lost_game and not game.won_game:\n # loop through all unrevealed, unflagged cells and see if we know it's safe to reveal\n for c in cells:\n if not c.revealed and not c.flagged and self.is_cell_safe(c, game.board):\n game.reveal_cell(c.row, c.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(1)\n made_move = True\n\n # assume we've made our best guesses and now have to guess randomly\n # this will prevent us from looping forever if no obvious moves are available\n if not made_move:\n unrevealed = [c for c in cells if not c.revealed and not c.flagged]\n if len(unrevealed) > 0:\n cell = random.choice(unrevealed)\n game.reveal_cell(cell.row, cell.col)\n if (game.test_did_win()):\n game.game_over()\n game.draw_board()\n if PAUSE == True:\n time.sleep(3)",
"def make_move(self,request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n user = game.user.get()\n if game.game_over:\n raise endpoints.ForbiddenException('Illegal action: Game is already over.')\n guess_Word = list(game.target)\n score = []\n [score.append('*') for i in range(len(guess_Word))]\n guess = request.guess.upper()\n # Validation of user entries\n if guess.isalpha() == False:\n msg = 'Please dont enter a number.'\n game.add_game_history(msg,guess)\n elif len(guess) != 1:\n msg = 'Please enter only one character.'\n game.add_game_history(msg,guess)\n # If user didn't get the correct answer. Substract 1.\n else:\n if guess not in guess_Word:\n game.attempts_remaining -=1\n if game.attempts_remaining > 0:\n [set_score_at(score,guess_Word,i) for i in game.correct]\n msg = \"Incorrect, you have %i attempts remaining. %s \" % (game.attempts_remaining, ''.join(score))\n game.add_game_history(msg,guess)\n else:\n msg = \"Game Over!. The answer was %s. Game Over \" % ''.join(guess_Word)\n user.loss +=1\n user.win_ratio = self.analyze_guess(user.win, user.loss)\n user.put()\n game.add_game_history(msg,guess)\n game.end_game()\n elif guess in guess_Word:\n [game.correct.append(i) for i in range(len(guess_Word)) if guess_Word[i] == guess and i not in game.correct]\n game.put()\n [set_score_at(score,guess_Word,i) for i in game.correct]\n msg = ''.join(score)\n game.add_game_history(msg,guess)\n if len(game.correct) == len(guess_Word):\n user.win +=1\n user.win_ratio = self.analyze_guess(user.win, user.loss)\n user.put()\n game.end_game(True)\n msg = \"You've won. The word was %s \" % ''.join(game.target)\n game.add_game_history(msg,guess)\n return game.to_form(msg)",
"def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result",
"async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))",
"def checkIfHitOrMiss(guess, opponents_board):\r\n print \"\\n\\n\\n\"\r\n opponents_slot = opponents_board[guess]\r\n if opponents_slot == \" \":\r\n print \"Miss!\"\r\n opponents_board[guess] = \"M\"\r\n return \"miss\"\r\n if opponents_slot == \"M\" or opponents_slot == \"H\":\r\n print \"You've already guess this! Try again.\"\r\n print \"\\n\"\r\n return \"guess again\"\r\n if opponents_slot == \"S\":\r\n print \"You've hit the ship!\"\r\n opponents_board[guess] = \"H\"\r\n return \"hit\"",
"def eval_guess(self, Guess):\n\n\t\t# pulls comparison from win check and assigns peg responses \n\n\t\t# returns a list to be in hint_response\n\n\t\t# displays as part of big display in view.\n\n\t\t\"\"\"Borrow the logic from win_check to implement eval_guess. Use variables right and wrong to \n\t\tevaluate. Right = Black peg. Wrong = no peg. \n\n\t\tWhite will be generated from a third loop to compare the entire list\"\"\"\n\n\n\t\tpass",
"def human_attack():\n while True:\n print_board(user_board)\n coordinate = (raw_input(\"Enter the coordinate you would like to attack (i.e. 'A1'): \")).upper()\n row, col = coordinate[0], coordinate[1:]\n if row in row_label and col in col_label:\n row_index = row_label.index(row)\n col_index = col_label.index(col) \n if user_board[row_index][col_index] == \"-\":\n response = get_response(coordinate)\n if response == \"H\":\n print \"\\nComputer: Hit. Lucky shot.\\n\"\n statistics['hits'] += 1\n statistics['miss_streak'] = 0\n statistics['total_guesses'] += 1\n if statistics['prev_guess'] == \"H\" or statistics['prev_guess'] == \"S\":\n statistics['hit_streak'] +=1\n if statistics['hit_streak'] > statistics['biggest_hit_streak']:\n statistics['biggest_hit_streak'] = statistics['hit_streak']\n statistics['prev_guess'] = \"H\"\n break\n # Update statistics and gameplay for a miss\n elif response == \"M\":\n print \"\\nComputer: HAH! YOU MISSED!! \\n\"\n statistics['misses'] += 1\n statistics['total_guesses'] += 1\n statistics['hit_streak'] = 0\n if statistics['prev_guess'] == \"M\":\n statistics['miss_streak'] +=1\n if statistics['miss_streak'] > statistics['biggest_miss_streak']:\n statistics['biggest_miss_streak'] = statistics['miss_streak']\n statistics['prev_guess'] = \"M\"\n break\n # Update statistics and gameplay when ship is sunk\n elif response == \"S\":\n print ship_info[board[row_index][col_index]]['name'] + \" destroyed!\"\n print \"Computer: You got that one, but you won't get the rest!!\\n\"\n statistics['hits'] += 1\n statistics['miss_streak'] = 0\n statistics['total_guesses'] += 1\n statistics['ships_destroyed'] += 1\n if statistics['prev_guess'] == \"H\" or statistics['prev_guess'] == \"S\":\n statistics['hit_streak'] +=1\n if statistics['hit_streak'] > statistics['biggest_hit_streak']:\n statistics['biggest_hit_streak'] = statistics['hit_streak']\n statistics['prev_guess'] = \"S\"\n break\n else:\n print \"Response returned bad data\"\n else:\n print \"You already guessed there! Try somewhere else.\\n\"\n else:\n print \"Please enter a valid coordinate.\\n\"",
"async def finish_game(self, ctx: commands.Context, score: dict):\n # Print out scoreboard, and winner, then give the winners their points\n scoreboard = sorted(score.items(), key=lambda x:x[1], reverse=True)\n result = \"**SCOREBOARD:**\\n\"\n condition = min(len(scoreboard), 9)\n\n # Find up to the 9th place\n # ranking_index[i] will give the index of the first person in the (i+1)th place\n ranking_index = [0]\n\n for i in range(1, len(scoreboard)):\n if scoreboard[i][1] == scoreboard[i-1][1]:\n continue\n ranking_index.append(i)\n if i >= condition: # Only want up to the 9th place\n break\n else:\n ranking_index.append(len(scoreboard))\n \n # Construct results of game and send to channel\n for i in range(len(ranking_index) - 1):\n result += (self.word_placing[i] \n + f\" place - {scoreboard[i][1]} points - \"\n + ', '.join(str(x[0]) for x in scoreboard[ranking_index[i]:ranking_index[i+1]])\n + '\\n')\n\n # Give points to everyone in first place\n for person in scoreboard[ranking_index[0]:ranking_index[1]]:\n self.data[ctx.guild.id]['score'][person[0].id] += 1\n\n result += \"Players in first place have earned one point each.\"\n await ctx.send(result)\n # Clear the database\n self.games_info[ctx.guild.id] = gamesDict()",
"def main():\n\n board = [[\".\"] * grid_size for i in range(grid_size)]\n ship_row = random_row(board)\n ship_col = random_col(board) - 1\n ships = 0\n turn = 0\n\n print_board(board)\n while turn < total_turns:\n\n guess_col = get_col()\n guess_row = get_row()\n\n print(\"-\" * 35)\n print(\n f\"You entered: {letter_and_index_conversion(guess_col, grid_size)}{guess_row} \\n\"\n )\n\n if guess_row == ship_row and guess_col == ship_col:\n board[guess_row - 1][guess_col - 1] = \"X\"\n print(\"Congratulations Captain! You got a hit!\")\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n ships += 1\n ship_row = random_row(board)\n ship_col = random_col(board)\n if ships == 10:\n print(\"Congratulations Captain! You won!\")\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n else:\n if (\n board[guess_row - 1][guess_col - 1] == \"X\" or\n board[guess_row - 1][guess_col - 1] == \"*\"\n ):\n print(\"You already guessed this one -_-\")\n print(\"-\" * 35)\n else:\n print(\"Your aim is WAY off! \\n\")\n board[guess_row - 1][guess_col - 1] = \"*\"\n print_board(board)\n print(\"-\" * 35)\n turn += 1\n if turn == total_turns:\n print(\"Game Over! You ran out of turns\")\n print(\"-\" * 35)\n game_prompt = input(\"Restart? y/n: \\n\")\n game_restart(game_prompt)\n\n print(f\"Turn {turn + 1} of {total_turns}\")\n print(f\"You have {10 - ships} ships left\")",
"def handle_turn(player_):\n if player_ == computer:\n print('\\nNow ', player_ + \"'s turn.\")\n position = block_to_win()\n if position == -1:\n position = check_if_computer_can_win()\n if position == -1:\n position = randrange(0, 9)\n while board[position] not in ['_']:\n position = randrange(0, 9)\n board[position] = computer\n display_board()\n if player_ == player:\n print('\\nNow ', player_ + \"'s turn.\")\n position = int(input('Choose a position from 1-9 (available): '))\n while position not in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n position = int(input('Wrong input. Choose a position from 1-9: '))\n position = position - 1\n while board[position] not in ['_']:\n position = int(input('Position is already taken. Choose from available positions: '))\n position = position - 1\n board[position] = player\n display_board()",
"def make_play(board, your_team, last_move):\n \"\"\"\n # a list containing all the entities from all the teams (either Monkeys or Queens)\n entities = board.get_entities()\n\n # just like entities, but into a map (dictionary). The key is a Vec2I object containing the position where you\n # want to get the entity. Use entity_map.get(Vec2I(x, y)) instead of entity_map[Vec2I(x, y)] if you want to avoid\n # raising a KeyError. Vec2I is used for the positions\n entity_map = board.get_entity_map()\n\n # List all the possible legal moves\n all_possible_moves = board.get_legal_moves(your_team)\n\n # You can iterate over all the entities like so:\n for entity in entities:\n position = entity.get_position()\n team = entity.get_team()\n print('Entity at position {}, is from team {}'.format(position, team))\n\n # You can get other information from the board functions.\n your_queen = board.search_queen(your_team)\n\n # There are only two teams, either Team.WHITE or Team.BLACK\n enemy_team = None\n if your_team == Team.WHITE:\n enemy_team = Team.BLACK\n else:\n enemy_team = Team.WHITE\n\n # you can do the same with this one liner\n enemy_team = Team.WHITE if your_team == Team.BLACK else Team.BLACK\n\n # get the enemy queen info from the board\n enemy_queen = board.search_queen(enemy_team)\n\n # Get the position of an entity, for example, with this queen\n # This can also work with Monkeys\n your_queen_position = enemy_queen.get_position()\n\n # Get the queen stack (number of remaining monkeys)\n your_queen_stack = your_queen.get_stack()\n\n # Print the position information, positions use the object Vec2I, defined in the file src/game/geo.py\n print(your_queen_position.x, your_queen_position.y)\n\n # Get all the possible moves for your queen\n possible_moves = your_queen.get_legal_moves()\n\n # We want to move our queen one cell down\n your_queen_x = your_queen_position.x\n your_queen_y = your_queen_position.y\n\n # Again, the game uses the Vec2I object for the positions\n new_position = Vec2I(your_queen_x, your_queen_y + 1)\n\n # As the board is a DEEP COPY of the real board, you can use it to forecast the future, for example, if you\n # want to list all your enemy moves after the move you want to select\n\n # As said, you have to return a tuple of Vec2I from this function, but to make a play you have to put those\n # two Vec2I in a Command object\n move_command = Command(your_queen_position, new_position)\n\n # Make a copy of the current game state\n current_board = board.copy_state()\n\n # Plays the command, now the board is just like you have played your decised move\n board.make_play(move_command)\n\n # Forecast all the legal moves from your opponent\n opponent_possible_responses = board.get_legal_moves()\n\n # We check if the new position is a legal move\n if new_position in possible_moves:\n # We make this play by returning the new_position\n return your_queen_position, new_position\n else:\n new_position = random.choice(possible_moves)\n return your_queen_position, new_position\n \"\"\"\n begin = time()\n np_board = board_translate(board,your_team)\n move = alpha_beta_search(np_board, your_team)\n print(\"Execution time: \" + str(time() - begin))\n move = (Vec2I(move[0][0], move[0][1]),Vec2I(move[1][0],move[1][1]))\n return move",
"def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message",
"def player_turn(self):\n if self.turn == \"x\":\n player_name = self.player_1\n else:\n player_name = self.player_2\n\n player_choice = input(f\"{player_name}, pick an open box to put an {self.turn} by entering the column, 'a', 'b',\"\n f\" or 'c' and the cell number, 1, 2, 3. (e.g. 'a1', 'b2' or 'c3'):\\n\").strip().lower()\n\n while player_choice not in valid_choices:\n print(f\"Invalid choice entered! Please submit an open box as listed below\")\n player_choice = input(\n f\"{player_name}, pick an open box to put an {self.turn} by entering the column, 'a', 'b', or \"\n f\"'c' and the cell number, 1, 2, 3. (e.g. 'a1', 'b2' or 'c3'):\\n\").strip().lower()\n\n self.update_board(player_choice)",
"def place_player(self, player, position):\n self.board[position] = player",
"def update_board(board, player):\n while True:\n try:\n position = int(input('Player %s where do you want to place your marker? ' % player))\n if position not in range(1,10):\n raise\n while board[position] != ' ':\n position = int(input('That position is already taken, try another. '))\n if position not in range(1, 10):\n raise\n break\n except (KeyboardInterrupt, SystemExit):\n sys.exit()\n except:\n print('Value must be 1-9, inclusive.')\n\n board[position] = player\n print('')\n print('')\n return board",
"def play(self):\n self.populateGrid()\n self.displayGrid()\n while not self.isGameOver():\n x1, y1 = self.getCoordinates(1) #input(\"Enter coordinates of first card \")\n card1 = self.grid[x1 - 1][y1 - 1]\n x2, y2 = self.getCoordinates(2) #input(\"Enter coordinates of second card \")\n card2 = self.grid[x2 - 1][y2 - 1]\n if (x1, y1) == (x2, y2): #coordinates must be different for the two cards\n print(\" ***Coordinates for the two cards must be different, try again***\")\n continue #continue with while loop\n if card1.rank == card2.rank: #an identical pair\n #print('identical pair')\n card1.face = 'up'\n card2.face = 'up'\n else:\n print(\" Not an identical pair. Found %d at (%d,%d) and %d at (%d,%d)\" \\\n % (card1.rank, x1, y1, card2.rank, x2, y2))\n self.displayGrid()",
"def make_move(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n guess = request.guess.upper()\n if game.game_over:\n raise endpoints.NotFoundException('This Game Has Ended!')\n if len(guess) != 1 or guess < 'A' or guess > 'Z':\n raise endpoints.BadRequestException('Please enter a valid guess!')\n if guess in game.failed_tries or guess in game.answer:\n return game.to_form(message='You have already tried that!')\n else:\n if guess in game.target:\n game.history.append(guess)\n # Go through target and to add 'guess' character and remove '_'\n for ind, tar in enumerate(game.target):\n if guess == tar:\n game.answer.pop(ind)\n game.answer.insert(ind, guess)\n game.put()\n if game.answer == game.target:\n game.end_game(True)\n return game.to_form(message=\"You Won!!\")\n else:\n return game.to_form(message=\"You guessed correct!\")\n else:\n if game.attempts_remaining <= 1:\n game.attempts_remaining = 0\n game.end_game(False)\n return game.to_form(message=\"Game Over! The word was: %s\"\n % game.target)\n else:\n game.failed_tries.append(guess)\n game.history.append(guess)\n game.attempts_remaining -= 1\n game.put()\n return game.to_form(message='Uh-Oh. Try Again.')",
"def print_message(self):\r\n # print(\"Word : \" + game_instance.get_word())\r\n print(\"\\nCurrent guess : \"+self.current_word)\r\n print(\"\\ng:guess, t:tell me, l:letter guess, q:quit\")",
"async def game_loop(self):\n other, player = self.players\n while not self.boards[other.pn].defeated():\n player, other = other, player\n\n x, y = await player.guess(self.boards[player.pn].lines(), self.boards[other.pn].other_lines())\n\n result = self.boards[other.pn].potshot(x, y)\n if result == Board.MISS:\n await player.print(\"Splash!\")\n elif result == Board.NEAR:\n await player.print(\"KERSPLOOSH!!\")\n else:\n await player.print(\"BOOM!!!\")\n\n\n await player.print(\"The winner is {}\".format(player.pn))\n await other.print(\"The winner is {}\".format(player.pn))\n await player.exit(0)\n await other.exit(0)"
] | [
"0.69176424",
"0.6890433",
"0.6629641",
"0.66096646",
"0.6592822",
"0.65086484",
"0.64996165",
"0.6430656",
"0.64269215",
"0.63308525",
"0.6314818",
"0.6299123",
"0.62588865",
"0.62438065",
"0.6242464",
"0.6218959",
"0.61367047",
"0.61359924",
"0.61223763",
"0.6118807",
"0.61117506",
"0.61028266",
"0.6020755",
"0.6019902",
"0.6012549",
"0.6002581",
"0.5999079",
"0.5976844",
"0.5975505",
"0.59725225"
] | 0.71550655 | 0 |
finds the most common word from a list of words in a piece of text | def most_common_word(words, text):
word_frequency = {w:text.count(w) for w in words}
return sorted(words, key=word_frequency.get)[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words",
"def most_common(data_word):\n stop_words = set(stopwords.words(\"english\"))\n\n #filter out stop words\n data_filtered = [word for word in data_word if word not in stop_words]\n cnt = Counter(data_filtered)\n\n #count most common words\n common = cnt.most_common(100)\n return common",
"def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))",
"def most_common_word_in_web_page(words, url, user_agent=requests):\n response = user_agent.get(url)\n return most_common_word(words, response.text)",
"def most_repeating_word(words):\n return max(words, key=most_repeating_letter_count)",
"def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]",
"def most_reducible(wordlist):\n\n\t# We create a memo for reducible words since is_reducible is \n\t# recursive. The keys are the words and the values are the \n\t# number of characters\n\tglobal reducible_words\n\treducible_words = dict()\n\treducible_words['a'], reducible_words['i'] = 1, 1\n\t\n\tword_dict = to_dictionary(wordlist)\n\tfor line in word_dict:\n\t\tis_reducible(line, word_dict)\n\n\t# Varible that will search the memo for the longest word\n\tcurrent_greatest = ''\n\tfor word in reducible_words:\n\t\tif reducible_words[word] > len(current_greatest):\n\t\t\tcurrent_greatest = word\n\tprint(current_greatest)",
"def most_consonants(words):\n cos = [[len(x)-num_vowels(x),x] for x in words]\n best_pair = max(cos)\n return best_pair[1]",
"def find_long_and_common_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 7 and FreqDist(tokens)[word] > 7])",
"def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]",
"def task2(dictionary):\n word_count = Counter(dictionary)\n ans = word_count.most_common(10)\n print(ans)\n return ans",
"def count_word(self, most_num):\n with open(self.file_name, 'r') as f:\n data = f.read().lower()\n # characters and single quote not split\n words = re.split(r'[^\\w\\']+', data)\n logging.debug(words)\n most_cnts_words = Counter(words).most_common(most_num)\n print(most_cnts_words)",
"def comp_choose_word(hand, word_list):\n maxscore = 0\n maxword = \"\" \n for n in range(calculate_handlen(hand)):\n perms = get_perms(hand, n)\n for word in perms:\n wordscore = get_word_score(word, HAND_SIZE)\n if wordscore > maxscore:\n if word not in word_list:\n continue\n else:\n maxscore = wordscore\n maxword = word\n return maxword\n # TO DO...",
"def most_common_words(self,\r\n words,\r\n number=10,\r\n dictionaryobject=None,\r\n reverse=False):\r\n\r\n if not dictionaryobject:\r\n dictionaryobject = self.word_dict\r\n\r\n temp_words = how_common(entrylist=words,\r\n dictionaryobject=dictionaryobject,\r\n display=display)\r\n number = min([number,len(temp_words)])\r\n\r\n\r\n\r\n if not reverse:\r\n temp_words = temp_words[0:number]\r\n else:\r\n temp_words = temp_words[len(temp_words)-number:len(temp_words)]\r\n\r\n return [x_temp[0] for x_temp in temp_words]",
"def average_word(word_list):\n \n result = ''\n \n #Going through each character position\n word_length = len(word_list[0]['text'])\n for position in range(word_length):\n #How many times each character apears in this position\n frequencies = {}\n \n #Going through all strings\n for word in word_list:\n char = word['text'][position]\n \n if char in frequencies:\n frequencies[char]+= 1\n else:\n frequencies[char] = 1\n \n #Finding the most common character and adding to result\n mx = -1\n mxch = ' '\n for p in frequencies.items():\n if p[1] > mx:\n mxch = p[0]\n mx = p[1]\n result+= mxch\n \n return result",
"def maxcompChooseWord(hand, wordList, n):\n # 电脑给出最优解\n point = 0\n maxword = ''\n for word in wordList:\n newword1 = copy.deepcopy(word)\n newword2 = copy.deepcopy(word)\n if isValidWord(newword1, hand, wordList):\n p = getWordScore(newword2, n)\n if p > point:\n point = p\n maxword = word\n if point == 0:\n return None\n else:\n return maxword, point",
"def most_common_words(visual_fld, num_visualize):\n words = open(os.path.join(visual_fld, 'vocab.tsv'), 'r').readlines()[:num_visualize]\n words = [word for word in words]\n file = open(os.path.join(visual_fld, 'vocab_' + str(num_visualize) + '.tsv'), 'w')\n for word in words:\n file.write(word)\n file.close()",
"def find_largest_freq():\n words_list = {word for line in lines for word in line} # all words possible\n word_freqs = [(find_freq(word), word) for word in words_list] # list of tuples of words and their frequencies\n max_freq = max(word_freqs)\n return max_freq[0], max_freq[1]",
"def most_repeating_letter_count(word):\n return Counter(word.lower()).most_common(1)[0][1]",
"def common_words(self, n=10):\n # remove some really common words\n ignore = ['a', 'i', 'it', 'the', 'and', 'in', 'he', 'she', 'to', 'at', 'of', 'that', 'as', 'is', 'his', 'my', 'for', 'was', 'me', 'we', 'be', 'on', 'so']\n filtered = [s for s in self.text if s not in ignore and len(s) >=3]\n dat = Counter(filtered)\n return dat.most_common(n)",
"def predict_currword_given_lastword(first_word, second_word, top_n=10):\r\n return Counter(\r\n {\r\n w: c\r\n for w, c in model.WORD_TUPLES_MODEL[first_word.lower()].items()\r\n if w.startswith(second_word.lower())\r\n }\r\n ).most_common(top_n)",
"def common_words(filename):\n\n \"\"\"This function assumes that 'words' are strings of alphabetical characters, i.e. this function ignores punctuation\"\"\"\n\n return common_words_min(filename, 0)",
"def most_common(hist, excluding_stopwords=False):\n # imports stopword.txt and creates lists\n common_words = []\n stopwords = open(\"stopword.txt\")\n stopwords_list = []\n # turns stopwords.txt into list\n for line in stopwords:\n lin = line.strip()\n stopwords_list.append(lin)\n # sorts dictionary by frequency and returns list of tupples\n for word, freq in hist.items():\n if not excluding_stopwords or not word in stopwords_list:\n common_words.append((freq, word))\n common_words.sort(reverse=True)\n return common_words",
"def count_words(data, number_word_frequency_results=40):\n current_max_sentence_size = 0\n count_word_frequency = Counter()\n for entry in data:\n print (entry)\n terms_all = [term for term in entry]\n count_word_frequency.update(terms_all)\n return count_word_frequency.most_common(number_word_frequency_results)",
"def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist",
"def commonWords(self):\n #utilize similar code used in stats.py\n exclude = set(('!', '.', '?'))\n freq = Stats()\n fullText = []\n #Parse email\n for x in range(self.getSCount()):\n #Simplify email into string of words separated by single space\n sString = self[x].lower()\n sString = ''.join(char for char in sString if char not in exclude)\n sString = sString.split()\n fullText = fullText + sString\n\n #Call findFreqDic() to find frequencies of words\n freqDict = freq.findFreqDic(fullText)\n\n #Analyze 10 words\n numTopic = 10\n \n #Find most and least common calling topNSort and bottomNSort\n mostCommon = freq.topNSort(freqDict, numTopic)\n leastCommon = freq.bottomNSort(freqDict, numTopic)\n \n most = list(mostCommon.keys())\n least = list(leastCommon.keys())\n \n return most, least",
"def common():\r\n full_song = \"\"\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n full_song += str(song_lyrics(song))\r\n split_lyrics = full_song.lower().split()\r\n counter = collections.Counter(split_lyrics)\r\n most_words = counter.most_common(50)\r\n return most_words",
"def main():\n\n word_count, classification, sums = Counter(), {}, Counter()\n for _ in range(int(input())):\n text = input().split()\n classification[text[0]] = text[2:]\n for word in text[2:]:\n word_count[word] = 1\n\n while True:\n try:\n for word in input().split():\n if word in word_count:\n word_count[word] += 1\n except EOFError:\n break\n\n for word in classification:\n for counting in classification[word]:\n sums[word] += word_count[counting]\n\n results = []\n max_freq = sums.most_common(1)[0][1]\n for word in sums:\n if sums[word] == max_freq:\n results.append(word)\n\n print('\\n'.join(sorted(results)))",
"def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)",
"def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)"
] | [
"0.7875434",
"0.7755277",
"0.75181144",
"0.74954784",
"0.7432042",
"0.7310732",
"0.727135",
"0.7123813",
"0.7071797",
"0.70364475",
"0.7005478",
"0.6933868",
"0.690446",
"0.6851607",
"0.6835552",
"0.6816538",
"0.6816106",
"0.6806948",
"0.67923945",
"0.678575",
"0.6782919",
"0.67693096",
"0.676427",
"0.67626274",
"0.67602193",
"0.6723549",
"0.6711251",
"0.6675674",
"0.6654339",
"0.6650724"
] | 0.8461566 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.