query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Calculates the log of the given TF value
def logTF(self, tf): return math.log(tf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_tf_log(doc):\r\n tf = calc_tf(doc)\r\n max_tf = tf[max(tf, key=tf.get)]\r\n tf_log = {}\r\n for key, val in tf.items():\r\n tf_log[key] = (1 + math.log(val)) / (1 + math.log(max_tf))\r\n return tf_log", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def _loglike(self, y, f):\n ll = y * tf.log(pos(f)) + (1 - y) * tf.log(pos(1 - f))\n return ll", "def _loglike(self, y, f):\n ll = -0.5 * (tf.log(2 * self.variance * np.pi) +\n (y - f)**2 / self.variance)\n return ll", "def weight_log(val):\n return val * math.log(val)", "def _loglike(self, y, f):\n bincoef = tf.lgamma(self.n + 1) - tf.lgamma(y + 1) \\\n - tf.lgamma(self.n - y + 1)\n ll = bincoef + y * tf.log(pos(f)) + (self.n - y) * tf.log(pos(1 - f))\n return ll", "def _loglike(self, y, f):\n # sum along last axis, which is assumed to be the `tasks` axis\n ll = tf.reduce_sum(y * tf.log(pos(f)), axis=-1)\n return ll", "def log(self): # just use base?\n return Factor().__build( VarSet(self.v) , np.log(self.t) )", "def logloss(self,tple):\n feats = self.dataset.input_features\n res = 0\n cc = self.class_counts\n fc = self.feature_counts\n for c in range(self.num_classes):\n res += prod(fc[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))/(cc[c]**(len(feats)-1))\n if res>0:\n return -math.log2(res/len(self.dataset.train))\n else:\n return float(\"inf\") #infinity", "def tflog10(x):\n numerator = tf.log(x)\n denominator = tf.log(tf.constant(10, dtype=numerator.dtype))\n return numerator / denominator", "def smart_log(self, value: float) -> float:\n if value > 0:\n return math.log(value, self.log_scale)\n elif value == 0:\n return 0\n elif value < 0:\n return -(math.log(abs(value), self.log_scale))", "def log(tensor, base=np.e):\n if base == np.e:\n return _elementary_op(tensor, np.log, lambda x: 1 / x)\n return log(tensor) / log(base)", "def log10(tensor):\n return log(tensor, base=10)", "def log2(tensor):\n return log(tensor, base=2)", "def log2(self):\n return Factor().__build( VarSet(self.v) , np.log2(self.t) )", "def log2_graph(x):\n return tf.log(x) / tf.log(2.0)", "def log2_graph(x):\n return tf.log(x) / tf.log(2.0)", "def ln(x):\n return log(x, const.e)", "def log_t(u, t):\n\n def _internal_log_t(u, t):\n return (u ** (1.0 - t) - 1.0) / (1.0 - t)\n\n return tf.cond(\n tf.math.equal(t, 1.0), lambda: tf.math.log(u),\n functools.partial(_internal_log_t, u, t))", "def log10(self):\n return Factor().__build( VarSet(self.v) , np.log10(self.t) )", "def log(x, eps=1e-7, name=None):\n return tf.log(x + eps, name=name)", "def logsumexp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def logsumexp(value, dim=None, keepdim=False):\n if dim is not None:\n m, _ = torch.max(value, dim=dim, keepdim=True)\n value0 = value - m\n if keepdim is False:\n m = m.squeeze(dim)\n return m + torch.log(torch.sum(torch.exp(value0),\n dim=dim, keepdim=keepdim))\n else:\n m = torch.max(value)\n sum_exp = torch.sum(torch.exp(value - m))\n if isinstance(sum_exp, Number):\n return m + math.log(sum_exp)\n else:\n return m + torch.log(sum_exp)", "def get_logCRF(train, model):\n word = train[0]\n Y = train[1]\n char_count, _ = word.shape\n # calculating forward messages\n alpha = np.zeros((char_count, model.dimY))\n first_term = np.dot(word, model.getW(model.labels))\n second_term = model._T\n for i in range(1, char_count):\n sum_term = (first_term[i-1] + alpha[i-1]) + second_term\n alpha[i] = np.apply_along_axis(logsumexp_trick, 1, sum_term) \n # getting logZ from messages\n logZ = logsumexp_trick(first_term[char_count-1]+alpha[char_count-1])\n w_term = np.sum(model.getW(Y).transpose() * word) # $\\sum_{j=1}^m {W_{yj} . x_j}$\n t_term = np.sum(model.getT(Y[:-1], Y[1:])) #$T_{yj, yj+1}\n value = -logZ + w_term + t_term\n return value", "def logm(self, x):\n\n if K.backend() == 'theano':\n # construct theano tensor operation\n from theano.tensor.nlinalg import svd, diag\n from theano.tensor.elemwise import Elemwise\n from theano.scalar import log\n import theano.tensor as T\n # This implementation would be extremely slow. but efficient?\n u, d, v = svd(x)\n d += self.eps\n inner = diag(T.log(d))\n res = T.dot(u, T.dot(inner, v))\n return res\n else:\n from kyu.tensorflow.ops.svd_gradients import batch_matrix_log\n return batch_matrix_log(x, self.eps)", "def compute_log(tx, index_log, mean=[], std=[]):\n tx_new = np.log10(3+abs(tx[:,index_log]))\n return standardize(tx_new,mean,std)", "def Log(num):\n return math.log(float(num))", "def logistic(val):\n return 1.0 / (1.0 + np.exp(-val))", "def logistic(v: float) -> float:\n v = clip(v, -50, 50) # Improve numerical stability.\n return 1 / (1 + math.exp(-v))" ]
[ "0.7719589", "0.77123725", "0.77123725", "0.76386464", "0.7574258", "0.7372678", "0.72950846", "0.7153347", "0.71502346", "0.71078914", "0.70195645", "0.6958456", "0.6947913", "0.68994933", "0.6897144", "0.68653464", "0.68525743", "0.68525743", "0.6830809", "0.6821966", "0.68061656", "0.6778728", "0.6717029", "0.6717029", "0.6692339", "0.6688548", "0.66883683", "0.667105", "0.6652227", "0.66491556" ]
0.85959244
0
Calculates the okapiTF value from the given parameters.
def okapiTF(self, tf, dlen, avgdlen): return tf / (tf + 0.5 + 1.5 * (dlen/avgdlen))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate(self) -> float:", "def calculate_tf(self, book_dict, term):\n term_frequency = 0\n try:\n term_frequency = (\n book_dict[\"SanitizedText\"][term] / book_dict[\"TotalNoOfTerms\"]\n )\n except KeyError:\n print(\"Key Error, Term doesnt exist\")\n return 0\n except ZeroDivisionError:\n print(\"tf division by zero!\")\n return 0\n return term_frequency", "def computeTF(self):\n for word in self.dictionary:\n self.dictionary[word].setTF(self.getTotalTerms())", "def results():\n \n to_predict_list = request.form.to_dict() \n to_predict_list = list(to_predict_list.values()) \n to_predict_list = list(map(float, to_predict_list)) \n result = ValuePredictor(to_predict_list) \n if int(result)== 1: \n prediction ='Run Martha, or you\\'re gonna get the sugar.'\n else: \n prediction ='Go ahead and have another donut Martha, you\\'re all good.' \n return render_template(\"results.html\",\n year=datetime.now().year,\n prediction = prediction\n )", "def calc_okapi_tf(self, query, query_no, avg_doc_length):\n okapi_tf_scores = {}\n f_okapi_tf = open(\"Results/okapi_tf_output.txt\",'a')\n query_array = []\n ic = client.IndicesClient(self.es)\n\n analyzed_result = ic.analyze(index=\"ap_dataset\",analyzer=\"my_english\",body=query)\n token_length = len(analyzed_result['tokens'])\n for i in range(token_length):\n query_array.append(str(analyzed_result['tokens'][i]['token']))\n\n query_body = {\"query\":\n {\"function_score\": {\"query\": {\"match\": {\"text\": query}},\n \"functions\": [\n {\"script_score\":\n {\"script\": \"getOkapiTF\", \"lang\": \"groovy\",\n \"params\": {\"query\": query_array, \"field\": \"text\",\n \"avgLength\": avg_doc_length}}}],\n \"boost_mode\": \"replace\"}}, \"fields\":[\"stream_id\"]}\n\n okapi_result = self.es.search(index=\"ap_dataset\", doc_type=\"document\", size=self.search_size,\n analyzer=\"my_english\", body=query_body)\n result_size = len(okapi_result['hits']['hits'])\n\n rank = 1\n for i in range(result_size):\n doc_id = str(okapi_result['hits']['hits'][i]['_id'])\n score = okapi_result['hits']['hits'][i]['_score']\n if score != 0:\n f_okapi_tf.write(query_no + \" Q0 \" + doc_id + \" \" + str(rank) + \" \" + str(score) + \" Exp\\n\")\n okapi_tf_scores[doc_id] = score\n rank += 1\n f_okapi_tf.close()\n return okapi_tf_scores", "def value(self) -> float:", "def calc_tf_idf(idf, tf):\r\n tfidf = {}\r\n for key, val in tf.items():\r\n tfidf[key] = val * idf[key]\r\n return tfidf", "def compute_value(self, *args, **kwargs):\n\n return None", "def handle_calculate(self, text_input):\n self.output_text = f'{float(self.get_valid_value(text_input)) * MILE_TO_KM_FACTOR:.3f}'", "def test_model(parameters):\n if parameters is None:\n return \"No Value\"\n else:\n return round(modelo.predict_proba([parameters])[0]*100, 3)", "def p_emb(request) -> float:\n return request.param", "def BatageljBren_calc(TP, FP, FN, TN):\n try:\n return (FP * FN) / (TP * TN)\n except Exception:\n return \"None\"", "def operation_result():\n\n input1 = request.form['Input1'] \n input2 = request.form['Input2']\n input3 = request.form['Input3']\n input4 = request.form['Input4']\n input5 = request.form['Input5']\n input6 = request.form['Input6']\n \n try:\n token_price_a_initial = float(input1)\n token_price_b_initial = float(input2)\n token_price_a_future = float(input3)\n token_price_b_future = float(input4)\n token_a_pool_weight = float(input5)\n token_b_pool_weight = float(input6)\n if token_a_pool_weight + token_b_pool_weight == 1:\n\n r1 = token_price_a_future/token_price_a_initial \n r2 = token_price_b_future/token_price_b_initial\n\n impermanent_loss = ((r1**(token_a_pool_weight))*(r2**(token_b_pool_weight))\n /(r1*token_a_pool_weight + r2*token_b_pool_weight) - 1)*-100\n\n return render_template(\n 'calculator.html',\n result=impermanent_loss,\n calculation_success=True\n )\n\n except:\n return render_template(\n 'calculator.html',\n calculation_success=False\n )", "def EvaluateFields(self, *float, **kwargs):\n ...", "def ft(t):\r\n ft = t ** (1.0 / 3.0) if t > 0.008856 else 7.787 * t + 4 / 29\r\n return ft", "def compute_tf(doc_info, freq_dict_all):\n tf_scores = []\n\n for temp_dict in freq_dict_all:\n id = temp_dict['doc_id']\n\n for k in temp_dict['freq_dict']:\n temp = {\n 'doc_id': id,\n 'TF_Score': temp_dict['freq_dict'][k] / doc_info[id - 1]['doc_length'],\n 'key': k\n }\n\n tf_scores.append(temp)\n\n return tf_scores", "def _model(self, t, theta, period, tmpid):\n template = self.templates[tmpid]\n phase = (t / period - theta[2]) % 1\n return theta[0] + theta[1] * template(phase)", "def gtf(self):\n\t #if tank is empty, conductance is 0\n\t if self.tx <= 0:\n\t return 0.\n\t\t#returns 0.5, as a function of TAI\n\t else:\n\t return 0.5", "def f_tf(self, t, x, y, z):\n raise NotImplementedError", "def KendallTau_calc(TP, FP, FN, TN):\n try:\n n = TP + FP + FN + TN\n return (2 * (TP + TN - FP - FN)) / (n * (n - 1))\n except Exception:\n return \"None\"", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def _calc_ft(Tci, Thi, Tco, Tho, N_shells) -> 'ft':\n if (Tco - Tci)/Tco < 0.01 or (Thi-Tho)/Tho < 0.01:\n return 1\n try:\n return ht.F_LMTD_Fakheri(Thi, Tho, Tci, Tco,\n shells=N_shells)\n except ValueError:\n return 0.6 # Accounts for worst case scenario", "def t(self,k,cosTheta,pk,c):\n pk = c.pkInterp(k)\n f2term = (self.tf21(0,1,2, k,cosTheta,pk,c)+self.tf21(1,2,0, k,cosTheta,pk,c)+self.tf21(2,0,1, k,cosTheta,pk,c)+ \\\n self.tf21(1,2,3, k,cosTheta,pk,c)+self.tf21(2,3,1, k,cosTheta,pk,c)+self.tf21(3,1,2, k,cosTheta,pk,c)+ \\\n self.tf21(2,3,0, k,cosTheta,pk,c)+self.tf21(3,0,2, k,cosTheta,pk,c)+self.tf21(0,2,3, k,cosTheta,pk,c)+ \\\n self.tf21(3,0,1, k,cosTheta,pk,c)+self.tf21(0,1,3, k,cosTheta,pk,c)+self.tf21(1,3,0, k,cosTheta,pk,c)) * 4.\n\n f3term = (self.tf31(M.array([0,1,2]),k,cosTheta,pk) + self.tf31(M.array([1,2,3]),k,cosTheta,pk) + \\\n self.tf31(M.array([2,3,1]),k,cosTheta,pk) + self.tf31(M.array([3,1,2]),k,cosTheta,pk)) * 6.\n\n #print cosTheta,f2term, f3term, ft2term+f3term\n return f2term + f3term", "def applyTF(h,tf):\n \n htf=h.Clone('{0}_tf'.format(h.GetName()))\n\n for xbin in xrange(0,h.GetNbinsX()+2):\n val=h.GetBinContent(xbin)\n valUnc=h.GetBinError(xbin)\n tfVal=tf.GetBinContent(xbin)\n tfValUnc=tf.GetBinError(xbin)\n htf.SetBinContent(xbin,val*tfVal)\n htf.SetBinError(xbin,ROOT.TMath.Sqrt((val*tfValUnc)**2+(valUnc*tfVal)**2))\n\n return htf", "def getMyValue(self):\n valueBV = 0.0\n valueCR = 0.0\n valueAL = 0.0\n valueEC = 0.0\n valueIA = 0.0\n factorAL = globals.cityCRGen/globals.cityALGen\n factorEC = globals.cityCRGen/globals.cityECGen\n factorIA = globals.cityCRGen/globals.cityIAGen\n ratio = self.strength/100.0\n valueCR += self.myDesign.costCR*ratio\n valueAL += self.myDesign.costAL*ratio\n valueEC += self.myDesign.costEC*ratio\n valueIA += self.myDesign.costIA*ratio\n valueBV += (valueCR +\n valueAL*factorAL +\n valueEC*factorEC +\n valueIA*factorIA) / 1000.0\n return (valueBV, valueCR, valueAL, valueEC, valueIA)", "def tvalues(self):\n return self.params / self.bse", "def tvalues(self):\n return self.params / self.bse", "def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v" ]
[ "0.5624942", "0.5566399", "0.55482346", "0.5489444", "0.5478475", "0.53710085", "0.5316732", "0.52735984", "0.52552027", "0.5215894", "0.5206082", "0.5185447", "0.5171882", "0.5163169", "0.51528233", "0.5145025", "0.513191", "0.5121332", "0.5108843", "0.5080807", "0.5073275", "0.5073275", "0.5073275", "0.5071594", "0.50635016", "0.50590485", "0.5038452", "0.50185597", "0.50185597", "0.4995276" ]
0.58334565
0
Call MyClass.class_mock_two(), but mock MyMock.class_mock_one.
def test_mock_a_class_func(): print() myclass = mymodule.MyClass() myclass.class_mock_one = Mock() myclass.class_mock_one.return_value = 2 xx = myclass.class_mock_two() print(xx) myclass.class_mock_one.assert_called_with()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider_and_mock_two(monkeypatch, provider_two, mock_inner_provider):\n mock_make_provider = mock.Mock(return_value=mock_inner_provider)\n monkeypatch.setattr(provider_two, 'make_provider', mock_make_provider)\n return provider_two, mock_inner_provider", "def provider_and_mock_one(monkeypatch, provider_one, mock_inner_provider):\n mock_make_provider = mock.Mock(return_value=mock_inner_provider)\n monkeypatch.setattr(provider_one, 'make_provider', mock_make_provider)\n return provider_one, mock_inner_provider", "def testGettingFunctionOnceAndCallingMultipleTimes(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\th = x.g.h\n\t\th(3, 4)\n\t\tc.setReturn(5)\n\t\th(6, 7, 8)\n\t\tc.setReturn(9)\n\t\tc.replay()\n\t\th = x.g.h\n\t\tself.failUnless(h(3, 4) == 5)\n\t\tself.failUnless(h(6, 7, 8) == 9)", "def test__run_one_single(self):\n\n # Set up\n class OneResource(BaseResource):\n\n def process(self, message):\n pass\n\n class OtherResource(BaseResource):\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n OneResource.init(api, 'one_route')\n OtherResource.init(api, 'other_route')\n\n session = ObjectId('57b599f8ab1785652bb879a7')\n a_request = Mock(context={'session': session})\n one_resource = OneResource(a_request)\n other_resource = OtherResource(a_request)\n\n one_resource._get_runnable = Mock(return_value=other_resource)\n other_resource.run = Mock()\n\n # Actual call\n one_resource._run_one('other_resource', {}, 1, False)\n\n # Asserts\n one_resource._get_runnable.assert_called_once_with('other_resource')\n other_resource.run.assert_called_once_with({}, False, None)", "async def test_throttle_on_two_methods(self):\n\n class Tester:\n \"\"\"A tester class for throttling.\"\"\"\n\n @Throttle(seconds=3)\n async def test1(self):\n \"\"\"Test function for throttle.\"\"\"\n return True\n\n @Throttle(seconds=5)\n async def test2(self):\n \"\"\"Test function for throttle.\"\"\"\n return True\n\n tester = Tester()\n now = time.time()\n now_plus_4 = now + 4\n now_plus_6 = now + 6\n\n self.assertEqual(await tester.test1(), True)\n self.assertEqual(await tester.test2(), True)\n self.assertEqual(await tester.test1(), None)\n self.assertEqual(await tester.test2(), None)\n\n with mock.patch(\"time.time\", return_value=now_plus_4):\n self.assertEqual(await tester.test1(), True)\n self.assertEqual(await tester.test2(), None)\n\n with mock.patch(\"time.time\", return_value=now_plus_6):\n self.assertEqual(await tester.test1(), None)\n self.assertEqual(await tester.test2(), True)", "def test_called_two(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n second_att, second_sla = self.make_att_sla(43)\n second_att.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer((self.test_attribute, second_att))\n eq_(self.obj.analyze_attribute.call_args_list[0][0],\n (self.test_attribute, ))\n eq_(self.obj.analyze_attribute.call_args_list[1][0], (second_att, ))", "def testSecondMapper(self):\n self.rpc_mapper2.build_request(\n self.handler, Request1).AndReturn(self.request)\n\n def build_response(handler, response):\n output = '%s %s %s' % (response.integer_field,\n response.string_field,\n response.enum_field)\n handler.response.headers['content-type'] = (\n 'application/x-www-form-urlencoded')\n handler.response.out.write(output)\n self.rpc_mapper2.build_response(\n self.handler, mox.IsA(Response1)).WithSideEffects(build_response)\n\n self.mox.ReplayAll()\n\n self.handler.request.headers['Content-Type'] = 'application/json'\n self.handler.handle('GET', '/my_service', 'method1')\n\n self.VerifyResponse('200', 'OK', '1 a VAL1')\n\n self.mox.VerifyAll()", "def test_unit_second_request_wiki_method(monkeypatch):\n test = requester.Request(\"fake 200 valid query\")\n\n def mock_get(*args, **kwargs):\n return MockResponse(\"fake response\")\n\n def mock_get_wiki_text(self, *args, **kwargs):\n pass\n\n def mock_get_wiki_coordinates(self, *args, **kwargs):\n pass\n\n monkeypatch.setattr(\n \"app.requester.Request.get_wiki_coordinates\",\n mock_get_wiki_coordinates)\n monkeypatch.setattr(\n \"app.requester.Request.get_wiki_text\", mock_get_wiki_text)\n monkeypatch.setattr(\"app.requester.requests.get\", mock_get)\n\n result = test.second_request_wiki(\"10052634\")\n assert result['query']['search'][0]['title'] == \"La La Land (film)\"", "def test_same_method_name_different_class(self):\n self.apple.take_then_give_back(self.cherry)\n apple_take_give_back_cherry_key = get_function_cache_key('method', 'tests.Fruit.take_then_give_back',\n (self.apple, self.cherry), {})\n self.assertExpectedKeyInCache(apple_take_give_back_cherry_key)\n\n self.celery.take_then_give_back(self.cherry)\n celery_take_give_back_cherry_key = get_function_cache_key('method', 'tests.Vegetable.take_then_give_back',\n (self.celery, self.cherry), {})\n self.assertExpectedKeyInCache(celery_take_give_back_cherry_key)\n\n self.assertNotEqual(apple_take_give_back_cherry_key, celery_take_give_back_cherry_key)", "def test_magic_mock():\n\n class Empty:\n pass\n\n empty = Empty()\n empty.abc = MagicMock(return_value=\"abc\") # type: ignore\n\n assert empty.abc(\"hi\") == \"abc\" # type: ignore\n empty.abc.assert_called_with(\"hi\") # type: ignore", "def test_decorator_only_calls_function_twice_when_supplied_different_args(self, _, mock_get_grams_protein):\n # Set qualname since internal function uses it\n mock_get_grams_protein.__qualname__ = 'tests.Meat.get_grams_protein'\n decorated_mock_grams_protein = cached(timeout=5*60)(mock_get_grams_protein)\n decorated_mock_grams_protein(self.chicken)\n # Call the function with different args to see if function will be called again\n decorated_mock_grams_protein(self.steak)\n self.assertEqual(mock_get_grams_protein.call_count, 2)", "def __call__(_mock_self, *args, **kwargs):\n \"\"\"\n when _real_mock() is called, _qmock.mock_calls.append()\n (ie: _MockCallsProxy.append()) will get the fully-qualified\n top-level mock.call object and forward it to\n _qmock._pop_mock_call_queue(). it will then get forwarded again\n to _CallQueue._pop() to validate it against the current expected\n call and return/raise the corresponding result. if the result\n wasn't an exception, _qmock will then assign the result back to\n _qmock._last_mock_result so it can be returned from here.\n\n this isn't thread-safe, but neither is mock.Mock, so whatever.\n \"\"\"\n _mock_self._real_mock(*args, **kwargs)\n return _mock_self._qmock._last_mock_result", "def test_assert_mock_called_once_with(self):\n mock_obj = mock.MagicMock(return_value='Pos vale')\n\n mock_obj(1, 2, 3, kwarg='kwarg_value')\n\n mock_obj.assert_called_once_with(1, 2, 3, kwarg='kwarg_value')", "def mock(self, base_cls=None):\n if base_cls:\n return self.mocker.mock(base_cls)\n return self.mocker.mock()", "def test_no_next_token_2calls(self):\n\n response1 = {'meta': {'result_count': 500, 'next_token': 1}}\n response2 = {'meta': {'result_count': 10}}\n\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n thing = SearchTweets(self.db, f)\n with patch.object(thing, '_SearchTweets__twitter_n_results', new_callable=PropertyMock(return_value=520)):\n with patch.object(thing, '_SearchTweets__connect_to_endpoint') as mock_method:\n with patch.object(thing, '_SearchTweets__multi_user', new_callable=PropertyMock(return_value=False)):\n with patch.object(thing, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(thing, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with self.assertLogs('SEARCH', level='DEBUG') as cm:\n with patch.object(thing, '_SearchTweets__save'):\n mock_method.side_effect = [response1, response2]\n thing.search()\n\n self.assertEqual(mock_method.call_count, 2)\n self.assertTrue(\"DEBUG:SEARCH:THERE ARE NO OTHER PAGE AVAILABLE. ALL TWEETS REACHED\" in cm.output)\n self.assertTrue(\"DEBUG:SEARCH:NO NEXT TOKEN IN RESPONSE:INTERRUPTING\" in cm.output)", "def test_assert_mock_called_once(self):\n mock_obj = mock.MagicMock(return_value='Pos vale')\n\n mock_obj(1, 2, 3, kwarg='kwarg_value')\n\n mock_obj.assert_called_once()", "def testPost(self):\n self.handler.handle = self.mox.CreateMockAnything()\n self.handler.handle('POST', '/my_service', 'method1')\n self.handler.handle('POST', '/my_other_service', 'method2')\n\n self.mox.ReplayAll()\n\n self.handler.post('/my_service', 'method1')\n self.handler.post('/my_other_service', 'method2')\n\n self.mox.VerifyAll()", "def test__call__(self):\n mock = Mock()\n factory = Factory(mock)\n factory()\n mock.assert_called_once_with()", "def testGet(self):\n self.handler.handle = self.mox.CreateMockAnything()\n self.handler.handle('GET', '/my_service', 'method1')\n self.handler.handle('GET', '/my_other_service', 'method2')\n\n self.mox.ReplayAll()\n\n self.handler.get('/my_service', 'method1')\n self.handler.get('/my_other_service', 'method2')\n\n self.mox.VerifyAll()", "def test__run_one_single_list(self):\n\n # Set up\n class OneResource(BaseResource):\n\n def process(self, message):\n pass\n\n class OtherResource(BaseResource):\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n OneResource.init(api, 'one_route')\n OtherResource.init(api, 'other_route')\n\n session = ObjectId('57b599f8ab1785652bb879a7')\n a_request = Mock(context={'session': session})\n one_resource = OneResource(a_request)\n other_resource = OtherResource(a_request)\n\n one_resource._get_runnable = Mock(return_value=other_resource)\n other_resource.run = Mock()\n\n # Actual call\n one_resource._run_one('other_resource', [{}], 1, False)\n\n # Asserts\n one_resource._get_runnable.assert_called_once_with('other_resource')\n other_resource.run.assert_called_once_with({}, False, None)", "def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))", "def test_telnetrmq_after_execute(self, mocked):\n agentconf={}\n telnetconf={\"host\":\"telnet.lan\"}\n rmqconf={\"host\":\"rmq.lan\"}\n agent=TelnetRmqAgent(agentconf, telnetconf, rmqconf)\n\n #Setup generic mock for others methods wich are not tested here\n ignoredmocks=Mock()\n agent.telnetclient=ignoredmocks\n agent.rmqclient=ignoredmocks\n \n\n instance = mocked.return_value \n agent.after_execute()\n mocked.assert_called_with(agent)\n mocked.assert_called_with(agent)", "def test_affinity__with_self(mock_class):\r\n\r\n mock_class.return_value = mocks.mock_myanimelist_endpoint()\r\n\r\n affinity, shared = \\\r\n malaffinity.calculate_affinity(\"DUMMY_USER\", \"DUMMY_USER\")\r\n\r\n assert affinity == 100.0\r\n assert shared == len(mocks.DUMMY_LIST)", "def start_mocking(self):\n self.mock = True\n self.mock_dynamodb = mock_dynamodb2()\n self.mock_dynamodb.start()\n self.mock_ses = mock_ses()\n self.mock_ses.start()", "def test_rackspace_uploader_upload_correct_purgin_first_file(self, mock, mock2):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n mycf.upload_file.return_value=True\r\n mycf.get_object.side_effect = True\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n file = FileStorage(filename='test.jpg')\r\n err_msg = \"Upload file should return True\"\r\n assert u.upload_file(file, container='user_3') is True, err_msg\r\n calls = [call.get_container('user_3'),\r\n call.get_container().get_object().delete(),\r\n call.get_container().get_object('test.jpg')]\r\n print mycf.mock_calls\r\n mycf.assert_has_calls(calls, any_order=True)", "def testFirstMapper(self):\n self.rpc_mapper1.build_request(\n self.handler, Request1).AndReturn(self.request)\n\n def build_response(handler, response):\n output = '%s %s %s' % (response.integer_field,\n response.string_field,\n response.enum_field)\n handler.response.headers['content-type'] = (\n 'application/x-www-form-urlencoded')\n handler.response.out.write(output)\n self.rpc_mapper1.build_response(\n self.handler, mox.IsA(Response1)).WithSideEffects(build_response)\n\n self.mox.ReplayAll()\n\n self.handler.handle('POST', '/my_service', 'method1')\n\n self.VerifyResponse('200', 'OK', '1 a VAL1')\n\n self.mox.VerifyAll()", "def expect_and_return(self, mock_call, return_val):\n self.mocker.result(return_val)", "def test__run_one_many(self, gevent_mock, pool_class_mock):\n\n # Set up\n class OneResource(BaseResource):\n\n def process(self, message):\n pass\n\n class OtherResource(BaseResource):\n\n def process(self, message):\n pass\n\n api = Mock()\n api.endpoint = 'http://an_endpoint'\n OneResource.init(api, 'one_route')\n OtherResource.init(api, 'other_route')\n\n session = ObjectId('57b599f8ab1785652bb879a7')\n a_request = Mock(context={'session': session})\n one_resource = OneResource(a_request)\n other_resource = OtherResource(a_request)\n\n one_resource._get_runnable = Mock(return_value=other_resource)\n other_resource.run = Mock()\n\n pool_mock = Mock()\n pool_mock.spawn.side_effect = ['g1', 'g2', 'g3']\n pool_class_mock.return_value = pool_mock\n\n # Actual call\n messages = [{'message': 1}, {'message': 2}, {'message': 3}]\n one_resource._run_one('other_resource', messages, 3, False)\n\n # Asserts\n expected_calls = [call('other_resource')] * 3\n self.assertEqual(expected_calls, one_resource._get_runnable.call_args_list)\n\n pool_class_mock.assert_called_once_with(3)\n\n expected_calls = [\n call(other_resource.run, {'message': 1}, False, None),\n call(other_resource.run, {'message': 2}, False, None),\n call(other_resource.run, {'message': 3}, False, None),\n ]\n self.assertEqual(expected_calls, pool_mock.spawn.call_args_list)\n\n gevent_mock.wait.assert_called_once_with(['g1', 'g2', 'g3'])\n\n # run method thas NOT been called directly\n self.assertEqual(0, other_resource.run.call_count)", "def test_other(monkeypatch, bot, bot_arg, update):\n\n def fake_get_lyrics(*args):\n return str(args)\n\n bot.log_result('chat_id', fake_log)\n monkeypatch.setattr(bot, 'get_lyrics', fake_get_lyrics)\n\n other(bot_arg, update)\n msg = bot_arg.msg_log[0]\n assert repr(fake_log.song) in msg\n assert fake_log.source.__name__ not in msg", "def testFunctionCallWithNoReturnValueSpecified(self):\n\t\tc = Controller()\n\t\tx = c.mock(KlassBeingMocked)\n\t\tx.g(3, 4)\n\t\tc.replay()\n\t\tx.g(3, 4)\n\t\tc.verify()" ]
[ "0.6849225", "0.6707006", "0.62774473", "0.5856203", "0.5846057", "0.5818575", "0.5705756", "0.57026553", "0.5469705", "0.54603136", "0.53983986", "0.53659284", "0.5362481", "0.53552854", "0.53475523", "0.52593946", "0.52591026", "0.52479494", "0.5157788", "0.5140922", "0.5090578", "0.5056911", "0.50564396", "0.5021899", "0.5017006", "0.4996753", "0.49764264", "0.4966976", "0.49574557", "0.4956635" ]
0.7694529
0
Loads default and custom services for use within CORE.
def _load_services(self) -> None: # load default services self.service_errors = ServiceManager.load_locals() # load custom services service_paths = self.config.get("custom_services_dir") logger.debug("custom service paths: %s", service_paths) if service_paths is not None: for service_path in service_paths.split(","): service_path = Path(service_path.strip()) custom_service_errors = ServiceManager.add_services(service_path) self.service_errors.extend(custom_service_errors) # load default config services self.service_manager.load_locals() # load custom config services custom_dir = self.config.get("custom_config_services_dir") if custom_dir is not None: custom_dir = Path(custom_dir) self.service_manager.load(custom_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_services(service_store):\n service_store.register_service(GetDrugStoreService)\n service_store.register_service(FuelLevelService)\n service_store.register_service(SetFuelLevelService)\n service_store.register_service(GetRobotPosition)\n service_store.register_service(SetRobotPosition)", "def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)", "def setupSERVICES():\n services = Services()\n services.rest = setupREST()\n\n return services", "def addServices(self):\r\n self.addHendrix()\r\n\r\n if not self.options.get('global_cache') and not self.options.get('nocache'):\r\n self.addLocalCacheService()\r\n\r\n if self.is_secure:\r\n self.addSSLService()\r\n\r\n self.catalogServers(self.hendrix)", "def before_all(context):\n set_services(context)\n context.api = {}\n context.repositories = {}\n\n for service, location in context.services.items():\n url = urlparse.urlparse(location)\n api = API(url.scheme + '://' + url.netloc, async=False)\n try:\n context.api[service] = getattr(api, url.path.split('/')[2])\n except:\n context.api[service] = getattr(api, service)", "def _add_services(self):\n this_service = {'name': 'swift-proxy'}\n other_services = [\n {'name': 'percona-cluster'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'swift-storage'}\n ]\n super(SwiftProxyBasicDeployment, self)._add_services(this_service,\n other_services)", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n taas_consts.TAAS, self)", "def _add_services(self):\n # Services and relations which are present merely to satisfy\n # required_interfaces and workload status are not inspected.\n # Fix me. Inspect those too.\n this_service = {'name': 'neutron-openvswitch'}\n other_services = [\n {'name': 'nova-compute'},\n {'name': 'nova-cloud-controller'},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'neutron-api'},\n self.get_percona_service_entry(),\n ]\n if self._get_openstack_release() >= self.bionic_train:\n other_services.append({'name': 'placement'})\n super(NeutronOVSBasicDeployment, self)._add_services(this_service,\n other_services)", "def start_services(torconfig):\n for service in config.custom['services']:\n # load service\n try:\n service_mod = imp.load_module(\n service, *imp.find_module(service, [config.services_dir]))\n except ImportError:\n return log.err('Cannot import service %s' % service)\n except Exception as e:\n traceback.print_tb(sys.exc_info()[2])\n return log.err('Error loading service %s -\\n %s' % (service, e))\n\n service = getattr(service_mod, 'ServiceDescriptor', None)\n if not service:\n log.err('Unable to find class Service in ', repr(service_mod))\n continue\n\n # create hidden service\n add_service(torconfig, service())", "def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)", "def getDefaultServices():\n return Service.getDefaultServices()", "def _get_services(self):\n from googleapiclient.discovery import build as discovery_build\n from oauth2client.client import (\n GoogleCredentials,\n ApplicationDefaultCredentialsError,\n )\n from google.cloud import storage\n\n # Credentials must be exported to environment\n try:\n creds = GoogleCredentials.get_application_default()\n except ApplicationDefaultCredentialsError as ex:\n log_verbose_traceback(ex)\n raise ex\n\n # Discovery clients for Google Cloud Storage and Life Sciences API\n self._storage_cli = discovery_build(\"storage\", \"v1\", credentials=creds)\n self._compute_cli = discovery_build(\"compute\", \"v1\", credentials=creds)\n self._api = discovery_build(\"lifesciences\", \"v2beta\", credentials=creds)\n self._bucket_service = storage.Client()", "def initService(self):", "def getServices(self):\n pass", "def services(**kwargs):\n pass", "def _load_drivers(self):\n self.drivers, self.default_provider = service_base.load_drivers(\n 'L2GW', self)", "def _configure_services(self):\n keystone_config = {\n 'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting'\n }\n swift_proxy_config = {\n 'zone-assignment': 'manual',\n 'replicas': '1',\n 'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae'\n }\n swift_storage_config = {\n 'zone': '1',\n 'block-device': 'vdb',\n 'overwrite': 'true',\n 'ephemeral-unmount': '/mnt'\n }\n pxc_config = {\n 'innodb-buffer-pool-size': '256M',\n 'max-connections': 1000,\n }\n configs = {\n 'keystone': keystone_config,\n 'swift-proxy': swift_proxy_config,\n 'swift-storage': swift_storage_config,\n 'percona-cluster': pxc_config,\n }\n super(SwiftProxyBasicDeployment, self)._configure_services(configs)", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def deploy_common_services():\n put('./minion/*', '/etc/systemd/system', use_sudo=True)\n sudo('source /etc/environment')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/flannel.service')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/kubelet.service')\n\n sudo('systemctl enable /etc/systemd/system/flannel.service')\n sudo('systemctl enable /etc/systemd/system/docker.service')\n sudo('systemctl enable /etc/systemd/system/kube-proxy.service')\n sudo('systemctl enable /etc/systemd/system/kubelet.service')\n\n sudo('systemctl daemon-reload')\n\n sudo('systemctl start flannel')\n sudo('systemctl start docker')\n sudo('systemctl start kube-proxy')\n sudo('systemctl start kubelet')", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "def get_services(**options):\r\n return {}", "def _configure_services(self):\n keystone_config = {\n 'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting',\n 'preferred-api-version': self.keystone_api_version,\n }\n\n if self.git:\n amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY')\n\n reqs_repo = 'git://github.com/openstack/requirements'\n keystone_repo = 'git://github.com/openstack/keystone'\n if self._get_openstack_release() == self.trusty_icehouse:\n reqs_repo = 'git://github.com/coreycb/requirements'\n keystone_repo = 'git://github.com/coreycb/keystone'\n\n branch = 'stable/' + self._get_openstack_release_string()\n\n openstack_origin_git = {\n 'repositories': [\n {'name': 'requirements',\n 'repository': reqs_repo,\n 'branch': branch},\n {'name': 'keystone',\n 'repository': keystone_repo,\n 'branch': branch},\n ],\n 'directory': '/mnt/openstack-git',\n 'http_proxy': amulet_http_proxy,\n 'https_proxy': amulet_http_proxy,\n }\n keystone_config['openstack-origin-git'] = \\\n yaml.dump(openstack_origin_git)\n\n pxc_config = {\n 'dataset-size': '25%',\n 'max-connections': 1000,\n 'root-password': 'ChangeMe123',\n 'sst-password': 'ChangeMe123',\n }\n cinder_config = {\n 'block-device': 'None',\n }\n configs = {\n 'keystone': keystone_config,\n 'percona-cluster': pxc_config,\n 'cinder': cinder_config,\n }\n super(KeystoneBasicDeployment, self)._configure_services(configs)", "def __init_services(self, base_url, repository):\n self.rest_services = {}\n self.rest_services[\"protocol\"] = base_url+\"/protocol\"\n self.rest_services[\"repositories\"] = base_url+\"/repositories\"\n self.rest_services[\"repository\"] = base_url+\"/repositories/{}\".format(repository)\n self.rest_services[\"statements\"] = self.rest_services[\"repository\"]+\"/statements\"\n self.rest_services[\"contexts\"] = self.rest_services[\"repository\"]+\"/contexts\"\n self.rest_services[\"size\"] = self.rest_services[\"repository\"]+\"/size\"\n self.rest_services[\"transaction\"] = self.rest_services[\"repository\"]+\"/transactions\"", "def RegisterService():\n hooks.RegisterHook(SERVICE_NAME, 'file-exists', hook_class=HookForExists)\n hooks.RegisterHook(SERVICE_NAME, 'file-write',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-touch',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-get', hook_class=HookForGet)\n hooks.RegisterHook(SERVICE_NAME, 'list-files', hook_class=HookForListFiles)\n hooks.RegisterHook(SERVICE_NAME, 'list-dir', hook_class=HookForListDir)", "def service(self) -> interface.BaseService:\n for protocol in DEFAULT_PRIORITIES:\n service = self._config.get_service(protocol)\n if service:\n return service\n\n raise RuntimeError(\"no service (bug)\")", "def test_service_import(self):\n bundles = [\n \"pelix.ipopo.core\",\n \"pelix.rsa.remoteserviceadmin\",\n \"pelix.rsa.topologymanagers.basic\",\n \"pelix.rsa.providers.distribution.py4j\",\n \"samples.rsa.helloconsumer\",\n ]\n\n with use_karaf():\n # Start the framework\n fw = create_framework(\n bundles,\n {\"ecf.py4j.javaport\": 25333, \"ecf.py4j.pythonport\": 25334},\n )\n\n try:\n fw.start()\n bc = fw.get_bundle_context()\n\n for _ in range(10):\n # Check if we find the Hello world service\n svc_ref = bc.get_service_reference(\n \"org.eclipse.ecf.examples.hello.IHello\",\n \"(service.imported=*)\",\n )\n if svc_ref is not None:\n # Found the service reference: service imported\n break\n\n time.sleep(.5)\n else:\n # Service not found after 5 seconds\n self.fail(\"Py4J service not found\")\n finally:\n # Clean up the framework\n fw.delete(True)", "async def load_plugins(self):\n for plug in os.listdir('plugins'):\n if plug.startswith('.'):\n continue\n if not os.path.isdir('plugins/%s' % plug) or not os.path.isfile('plugins/%s/hook.py' % plug):\n self.log.error('Problem locating the \"%s\" plugin. Ensure CALDERA was cloned recursively.' % plug)\n exit(0)\n plugin = Plugin(name=plug)\n if await plugin.load():\n await self.get_service('data_svc').store(plugin)\n if plugin.name in self.config['plugins']:\n plugin.enabled = True\n for plugin in self.config['plugins']:\n plug = await self._services.get('data_svc').locate('plugins', match=dict(name=plugin))\n [await p.enable(self.get_services()) for p in plug]\n self.log.debug('Enabling %s plugin' % plugin)\n\n templates = ['plugins/%s/templates' % p.name.lower()\n for p in await self.get_service('data_svc').locate('plugins')]\n templates.append('templates')\n aiohttp_jinja2.setup(self.application, loader=jinja2.FileSystemLoader(templates))", "def update_services():\n\n upload_supervisor_conf()\n upload_nginx_conf()\n upload_gunicorn_conf()", "def _configure_services(self):\n keystone_config = {\n 'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting',\n }\n manila_config = {\n 'default-share-backend': 'generic',\n }\n manila_generic_config = {\n 'driver-handles-share-servers': False,\n }\n configs = {\n 'keystone': keystone_config,\n 'manila': manila_config,\n 'manila-generic': manila_generic_config,\n }\n super(ManilaPluginCharmDeployment, self)._configure_services(configs)", "def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)" ]
[ "0.7005679", "0.6346517", "0.63041884", "0.6099403", "0.60553855", "0.59759414", "0.5793857", "0.5764348", "0.5748033", "0.57183653", "0.57032514", "0.5648803", "0.5618671", "0.5606773", "0.5566092", "0.5551873", "0.5527294", "0.5522216", "0.5508008", "0.549366", "0.5394617", "0.53870445", "0.5386913", "0.53324616", "0.5319193", "0.5316185", "0.5313064", "0.52745354", "0.52648264", "0.52634585" ]
0.76303107
0
Check if emane is installed and load models.
def _load_emane(self) -> None: # check for emane path = utils.which("emane", required=False) self.has_emane = path is not None if not self.has_emane: logger.info("emane is not installed, emane functionality disabled") return # get version emane_version = utils.cmd("emane --version") logger.info("using emane: %s", emane_version) emane_prefix = self.config.get("emane_prefix", DEFAULT_EMANE_PREFIX) emane_prefix = Path(emane_prefix) EmaneModelManager.load_locals(emane_prefix) # load custom models custom_path = self.config.get("emane_models_dir") if custom_path is not None: logger.info("loading custom emane models: %s", custom_path) custom_path = Path(custom_path) EmaneModelManager.load(custom_path, emane_prefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadmodels(self):\n for emane_model in EMANE_MODELS:\n logger.info(\"loading emane model: (%s) %s - %s\",\n emane_model, emane_model.name, RegisterTlvs(emane_model.config_type))\n self._modelclsmap[emane_model.name] = emane_model\n self.session.add_config_object(emane_model.name, emane_model.config_type,\n emane_model.configure_emane)", "def try_models(self):\n result = os.system(\"python try_models.py\")\n return result == 0", "def setup(self):\n with self.session._objects_lock:\n for obj in self.session.objects.itervalues():\n if nodeutils.is_node(obj, NodeTypes.EMANE):\n self.addobj(obj)\n if len(self._objs) == 0:\n return EmaneManager.NOT_NEEDED\n if emane.VERSION == emane.EMANEUNK:\n raise ValueError, \"EMANE version not properly detected\"\n # control network bridge required for EMANE 0.9.2\n # - needs to be configured before checkdistributed() for distributed\n # - needs to exist when eventservice binds to it (initeventservice)\n if emane.VERSION > emane.EMANE091 and self.session.master:\n values = self.getconfig(None, \"emane\",\n self.emane_config.getdefaultvalues())[1]\n otadev = self.emane_config.valueof(\"otamanagerdevice\", values)\n netidx = self.session.get_control_net_index(otadev)\n if netidx < 0:\n msg = \"EMANE cannot be started. \" \\\n \"Invalid OTA device provided: %s. Check core.conf.\" % otadev\n logger.error(msg)\n return EmaneManager.NOT_READY\n\n ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)\n self.distributedctrlnet(ctrlnet)\n eventdev = self.emane_config.valueof(\"eventservicedevice\", values)\n if eventdev != otadev:\n netidx = self.session.get_control_net_index(eventdev)\n if netidx < 0:\n msg = \"EMANE cannot be started.\" \\\n \"Invalid Event Service device provided: %s. Check core.conf.\" % eventdev\n logger.error(msg)\n return EmaneManager.NOT_READY\n\n ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)\n self.distributedctrlnet(ctrlnet)\n\n if self.checkdistributed():\n # we are slave, but haven\"t received a platformid yet\n cfgval = self.getconfig(None, self.emane_config.name,\n self.emane_config.getdefaultvalues())[1]\n i = self.emane_config.getnames().index(\"platform_id_start\")\n if cfgval[i] == self.emane_config.getdefaultvalues()[i]:\n return EmaneManager.NOT_READY\n self.setnodemodels()\n return EmaneManager.SUCCESS", "def check_models_ready(self):\n if not self.models_ready:\n raise RuntimeError(\"Models aren't loaded yet.\")", "def setup_models(self):\n pass", "def load_models(appname):\n return import_module('.models', appname)", "def test_load_model_method(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # adding the model\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model_object = None\n # accessing the MLModelMock model object\n try:\n model_object = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n print_tb(e)\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(model_object is not None)", "def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load(self):\n\n path = Models.modelPath(\"stackexchange\")\n\n if os.path.isfile(os.path.join(path, \"config\")):\n print(f\"Loading model from {path}\")\n embeddings = Embeddings()\n embeddings.load(path)\n else:\n print(\"ERROR: loading model: ensure model is installed\")\n print(\n \"ERROR: Pre-trained model can be installed by running python -m codequestion.download\"\n )\n raise FileNotFoundError(f\"Unable to load codequestion model from {path}\")\n\n return embeddings", "def loadmodels(): # type: () -> None\n\n global accsearch, unaccsearch, eulamodel\n\n accsearch = [row for row in helpers.accExamples if helpers.goodsize(row['Clause Text'])]\n accsearch = [addtoks(row) for row in accsearch]\n unaccsearch = [row for row in helpers.unaccExamples if helpers.goodsize(row['Clause Text'])]\n unaccsearch = [addtoks(row) for row in unaccsearch]\n modeldir = helpers.getmodelfolder()\n accargs = buildbertargs()\n accargs.output_dir = modeldir\n eulamodel = ClassificationModel('roberta', modeldir, args=accargs, weight=[2, 1], use_cuda=False)", "def test_load_model_method_with_wrong_class_path(self):\n # arrange\n # instantiating the model manager class\n model_manager = ModelManager()\n\n # act\n # adding the model\n exception_raised = False\n exception_message = None\n # accessing the MLModelMock model object\n try:\n model_manager.load_model(\"sdf.sdf.sdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"No module named 'sdf'\")", "def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model", "def load_model(self):\n pass", "def startup(self):\n self.reset()\n r = self.setup()\n if r != EmaneManager.SUCCESS:\n return r # NOT_NEEDED or NOT_READY\n if emane.VERSIONSTR == \"\":\n raise ValueError(\"EMANE version not properly detected\")\n nems = []\n with self._objslock:\n if emane.VERSION < emane.EMANE092:\n self.buildxml()\n self.initeventservice()\n self.starteventmonitor()\n if self.numnems() > 0:\n # TODO: check and return failure for these methods\n self.startdaemons()\n self.installnetifs()\n else:\n self.buildxml2()\n self.initeventservice()\n self.starteventmonitor()\n if self.numnems() > 0:\n self.startdaemons2()\n self.installnetifs(do_netns=False)\n for e in self._objs.itervalues():\n for netif in e.netifs():\n nems.append((netif.node.name, netif.name,\n e.getnemid(netif)))\n if nems:\n emane_nems_filename = os.path.join(self.session.session_dir,\n \"emane_nems\")\n try:\n with open(emane_nems_filename, \"w\") as f:\n for nodename, ifname, nemid in nems:\n f.write(\"%s %s %s\\n\" % (nodename, ifname, nemid))\n except IOError:\n logger.exception(\"Error writing EMANE NEMs file: %s\")\n\n return EmaneManager.SUCCESS", "def load_models(self, episode):\n\t\tself.actor.load_state_dict(torch.load('/home/abhinavds/Documents/Projects/ToyModel/ckpt/rl/Models_genus/' + str(episode) + '_actor.pt'))\n\t\tself.critic.load_state_dict(torch.load('/home/abhinavds/Documents/Projects/ToyModel/ckpt/rl/Models_genus/' + str(episode) + '_critic.pt'))\n\t\tself.genus.load_state_dict(torch.load('/home/abhinavds/Documents/Projects/ToyModel/ckpt/rl/Models_genus/' + str(episode) + '_genus.pt'))\n\t\tutils.hard_update(self.target_actor, self.actor)\n\t\tutils.hard_update(self.target_critic, self.critic)\n\t\tprint ('Models loaded succesfully')", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def test_get_model_metadata_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")", "def load_model(app: FastAPI) -> None:\n\n logging.info(\"Starting up the application\")\n model_path = DATA_MODEL_PATH\n\n if model_path.exists():\n model = FraudDetection(model_path)\n app.state.model = model\n logging.info(f\"Loaded model {model_path}\")\n else:\n app.state.model = FraudDetection()\n logging.warning(f\"No existing model found in {model_path}\")", "def load_app(self, app_name, can_postpone=False):\n self.handled[app_name] = None\n self.nesting_level += 1\n app_module = import_module(app_name)\n try:\n models = import_module('.models', app_name)\n except ImportError:\n self.nesting_level -= 1\n # If the app doesn't have a models module, we can just ignore the\n # ImportError and return no models for it.\n if not module_has_submodule(app_module, 'models'):\n return None\n # But if the app does have a models module, we need to figure out\n # whether to suppress or propagate the error. If can_postpone is\n # True then it may be that the package is still being imported by\n # Python and the models module isn't available yet. So we add the\n # app to the postponed list and we'll try it again after all the\n # recursion has finished (in populate). If can_postpone is False\n # then it's time to raise the ImportError.\n else:\n if can_postpone:\n self.postponed.append(app_name)\n return None\n else:\n raise\n\n self.nesting_level -= 1\n if models not in self.app_store:\n self.app_store[models] = len(self.app_store)\n self.app_labels[self._label_for(models)] = models\n return models", "def _load_model(self):\r\n filepath = f\"Models/{self.environment.name}/q_network\"\r\n # Check if model exists in default directory\r\n if path.exists(filepath):\r\n self.q_network = NetworkBuilder._load_model(filepath)\r\n self.target_network = NetworkBuilder._load_model(filepath)\r\n print(\"Models loaded\")\r\n return True\r\n else:\r\n print(f\"'{filepath}' not found\")\r\n return False", "def test_get_model_method(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"qualified_name\")\n except Exception as e:\n exception_raised = True\n\n # assert\n self.assertFalse(exception_raised)\n self.assertTrue(type(model) is MLModelMock)", "def load_trainer(self):\n super().load_trainer()\n\n logging.info(\"[Server #%d] Loading a pre-trained model.\", os.getpid())\n self.trainer.load_model()", "def __getAndInitializeJEM(self):\n\n if self.__getFromCvmfsOrLocal():\n self.logger.debug('loaded JEM from cvmfs')\n return self.__import()\n else:\n self.logger.debug('failed to laod JEM from cvmfs, trying jem page')\n\n if self.__getFromJEMpage():\n self.logger.debug('loaded JEM from JEM page')\n return self.__import()\n else:\n self.logger.debug(\"couldn't load JEM\")\n return False", "def load_model():\n logger.info('load_model called')\n return 1", "def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")", "def test_model_import(self, iris_astore):\n model = register_model(\n iris_astore, self.MODEL_NAME, self.PROJECT_NAME, force=True\n )\n\n assert self.MODEL_NAME == model.name", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)", "def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")", "def _ensure_gazebo_startup(self):\n model_base_name = \"__start_up_box__\"\n rospy.wait_for_message(self.param.topics.gazebo_models, ModelStates)\n i = 0\n while True:\n # Get all models currently listed in Gazebo's models\n # that contain the model_base_name in their name.\n boxes = list(b for b in self.model_names if model_base_name in b)\n\n if len(boxes) != 0:\n for b in boxes:\n self._remove_model(b)\n return\n i += 1\n self._spawn_model(f\"\"\"<model name=\"{model_base_name}{i}\"></model>\"\"\")\n rospy.sleep(0.1)\n rospy.wait_for_message(self.param.topics.gazebo_models, ModelStates)" ]
[ "0.7130563", "0.69357973", "0.6056981", "0.60229075", "0.59975207", "0.59605366", "0.5903529", "0.5884921", "0.57850355", "0.5753735", "0.5745536", "0.5728512", "0.5696617", "0.56890875", "0.568479", "0.5596342", "0.5584993", "0.5554426", "0.55042964", "0.5497591", "0.5494007", "0.5491591", "0.5481477", "0.5449091", "0.544562", "0.5411983", "0.54089695", "0.5407197", "0.5397618", "0.53946996" ]
0.8328525
0
Shutdown all CORE session.
def shutdown(self) -> None: logger.info("shutting down all sessions") while self.sessions: _, session = self.sessions.popitem() session.shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n # shutdown all known sessions\n for session in self.sessions.values():\n session.shutdown()\n\n # if we are a daemon remove pid file\n if self.config[\"daemonize\"]:\n pid_file = self.config[\"pidfile\"]\n try:\n os.unlink(pid_file)\n except OSError:\n logger.exception(\"error daemon pid file: %s\", pid_file)\n\n # remove server from server list\n CoreServer.remove_server(self)", "def at_server_shutdown(self):\n for session in self.sessions.all():\n session.sessionhandler.disconnect(session)", "def close_all(cls):\n for sess in cls._session_registry.values():\n sess.close()", "def shutdown(self):\n logging.info(\"Shutdown\")\n self._sessionmanager.Shutdown()", "def close_all(cls) -> None:\n\n close_all_sessions()", "def shutdown(self):\n\t\tCORE.info('The session is shutting down. Sending UMC modules an EXIT request (%d processes)' % len(self.__processes))\n\t\tfor module_name, process in self.__processes.items():\n\t\t\tCORE.info('Ask module %s to shutdown gracefully' % (module_name,))\n\t\t\treq = Request('EXIT', arguments=[module_name, 'internal'])\n\t\t\tprocess.request(req)", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def _shutdown(self):\n self.logger.debug(\"Unregistering feature modules.\")\n for feature in self._features.values():\n try:\n if hasattr(feature.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(feature.handle.module_unregister())\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering feature module '{feature.name}'.\")\n self.logger.debug(\"Unregistering protocol modules.\")\n for protocol in self._protocols.values():\n try:\n if hasattr(protocol.handle, \"module_unregister\"):\n self.eventloop.run_until_complete(\n protocol.handle.module_unregister(protocol.contexts, self._shutdown_reason)\n )\n except Exception:\n self.logger.exception(f\"Exception occurred while unregistering protocol module '{protocol.name}'.\")\n self.eventloop.run_until_complete(self.database.close())\n if len(self._db_connections) > 0:\n self.logger.debug(\"Cleaning up unclosed database connections\")\n for module in list(self._db_connections):\n self.eventloop.run_until_complete(self.database_disconnect(module))", "def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()", "def shutdown(self):", "def shutdown_all(self, now=False):", "def close_all_sessions() -> None:\n\n for sess in _sessions.values():\n sess.close()", "def shutdown_allcomponents(self):\n for key,obj in reversed(self.components.get_tuplelist()):\n obj.shutdown()", "def shutdown(self):\n ...", "def shutdown(self):\n\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def close(self):\n self._core.close()", "async def shutdown(self, ctx):\n await self.bot.session.close()\n await self.bot.logout()", "def quit(self):\n \n if 'driver' in self.__dict__:\n self.driver.quit()\n if 'session' in self.__dict__:\n self.session.close()\n if 'conn' in self.__dict__:\n self.conn.close()", "def shutdown(self):\n self.action('shutdown')", "def close(self):\n for session in self.sessions.copy().values():\n session.close(SessionCloseErrorCode.SESSION_DIED)", "def shutdown():\n\n cmd = dict()\n cmd[\"type_\"] = \"shutdown\"\n cmd[\"name_\"] = \"all\"\n\n ## In case of the shutdown there will be no returned message to\n ## check the success.\n s = comm.send_and_receive_socket(cmd)\n\n s.close()", "def preShutdown(self):\r\n for user in self._users.values():\r\n user.destroy()", "def shutdown(self):\n self.exit_app()", "def shutdown(self):\t\r\n\t\tself.is_running = False\r\n\t\tfor connection in self.established_connection_list:\r\n\t\t\tconnection.send('The server has been shutdown adruptly by the server owner.\\n')\r\n\t\t\tconnection.socket_send()", "def _shutdown(self):" ]
[ "0.7054555", "0.7012447", "0.6973338", "0.69510627", "0.6917507", "0.6871635", "0.6789869", "0.67295724", "0.67286533", "0.6678548", "0.66759485", "0.6605788", "0.6588758", "0.6574293", "0.654621", "0.6505152", "0.6505152", "0.6505152", "0.6468864", "0.6468864", "0.64258343", "0.6401179", "0.63965183", "0.6363012", "0.6357274", "0.6351957", "0.6332015", "0.6331201", "0.6326368", "0.62650025" ]
0.77668494
0
Sets the snapshot_date of this ZacksAnalystRatingSnapshot.
def snapshot_date(self, snapshot_date): self._snapshot_date = snapshot_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rating_date(self, rating_date):\n\n self._rating_date = rating_date", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def revision_date(self, revision_date):\n\n self._revision_date = revision_date", "def rating_start_date(self, rating_start_date):\n\n self._rating_start_date = rating_start_date", "def set_date(self, date):\n self.date = date\n return", "def set_date(self, date):\n self.date = date", "def election_date(self, election_date):\n\n self._election_date = election_date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n self._date = date", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date: datetime):\n if _date is None:\n raise ValueError(\"Invalid value for `_date`, must not be `None`\") # noqa: E501\n\n self.__date = _date", "def acquisition_date(self, acquisition_date):\n\n self._acquisition_date = acquisition_date", "def set_date(self, date):\n self.data['date'] = date", "def date(self, date):\n if date is None:\n raise ValueError(\n \"Invalid value for `date`, must not be `None`\"\n ) # noqa: E501\n\n self._date = date", "def balance_date(self, balance_date):\n\n self._balance_date = balance_date", "def coverage_start_date(self, coverage_start_date):\n\n self._coverage_start_date = coverage_start_date", "def coverage_start_date(self, coverage_start_date):\n\n self._coverage_start_date = coverage_start_date", "def authorization_date(self, authorization_date):\n\n self._authorization_date = authorization_date", "def receipt_date(self, receipt_date):\n\n self._receipt_date = receipt_date", "def receipt_date(self, receipt_date):\n\n self._receipt_date = receipt_date", "def snapshot_metadata(self, snapshot_metadata):\n if snapshot_metadata is None:\n raise ValueError(\"Invalid value for `snapshot_metadata`, must not be `None`\")\n\n self._snapshot_metadata = snapshot_metadata", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date", "def start_date(self, start_date):\n\n self._start_date = start_date" ]
[ "0.6246575", "0.5952784", "0.5949085", "0.58533394", "0.5798545", "0.57474214", "0.57247525", "0.5682116", "0.5682116", "0.5682116", "0.5682116", "0.5682116", "0.5641901", "0.55963737", "0.55963737", "0.55558777", "0.55346274", "0.54616004", "0.54373753", "0.5357031", "0.5351021", "0.5351021", "0.5346992", "0.53329664", "0.53329664", "0.52931523", "0.5244468", "0.5244468", "0.5244468", "0.5244468" ]
0.82132596
0
Sets the rating_date of this ZacksAnalystRatingSnapshot.
def rating_date(self, rating_date): self._rating_date = rating_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rating_start_date(self, rating_start_date):\n\n self._rating_start_date = rating_start_date", "def rating_end_date(self, rating_end_date):\n\n self._rating_end_date = rating_end_date", "def _update_rating_history(self, rating: float, date: Union[str, float]):\n self.rating_history.append((date, rating))", "def update_rating(self, new_rating: float, date: Union[str, float]):\n self.logger.info(f\"Updating rating for {self.id}: {self.rating:.3f} --> {new_rating:.3f}\")\n self.rating = new_rating\n self._update_rating_history(rating=new_rating, date=date)", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def rating(self, rating):\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating > 5): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value less than or equal to `5`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rating is not None and rating < 1): # noqa: E501\n raise ValueError(\"Invalid value for `rating`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._rating = rating", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def date(self, date):\n\n self._date = date", "def snapshot_date(self, snapshot_date):\n\n self._snapshot_date = snapshot_date", "def set_date(self, date):\n self.date = date\n return", "def set_date(self, date):\n self.date = date", "def rating_id(self, rating_id: int):\n\n self._rating_id = rating_id", "def date(self, date):\n self._date = date", "def _date(self, _date: datetime):\n if _date is None:\n raise ValueError(\"Invalid value for `_date`, must not be `None`\") # noqa: E501\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def _date(self, _date):\n\n self.__date = _date", "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def date(self, date):\n if date is None:\n raise ValueError(\n \"Invalid value for `date`, must not be `None`\"\n ) # noqa: E501\n\n self._date = date", "def update_date(self, update_date):\n\n self._update_date = update_date", "def update_date(self, update_date):\n\n self._update_date = update_date", "def set_exchange_rate_date(self, exchange_rate_date):\n self.set_value_into_input_field(self.exchange_rate_date_locator, exchange_rate_date)", "def set_date(self, date):\n self.data['date'] = date", "def get_rating_as_of_date(\n self,\n date: Union[str, float],\n default_rating: float = DEFAULT_INITIAL_RATING\n ) -> float:\n history_df = DataFrame(self.rating_history, columns=[\"date\", \"rating\"])\n\n # only select one entry per distinct date\n history_df[\"r\"] = history_df.groupby([\"date\"]).rank(method=\"first\", ascending=False)\n history_df = history_df[history_df[\"r\"] == 1]\n\n # get the rating for the latest date\n history_df = history_df[history_df[\"date\"] <= date].sort_values(\"date\", ascending=False)\n if history_df.shape[0] == 0:\n return default_rating\n else:\n return history_df.reset_index().loc[0, \"rating\"]", "def revision_date(self, revision_date):\n\n self._revision_date = revision_date", "def add_review_date(self, doc, reviewed):\n if len(doc.reviews) != 0:\n if not self.review_date_set:\n self.review_date_set = True\n date = utils.datetime_from_iso_format(reviewed)\n if date is not None:\n doc.reviews[-1].review_date = date\n return True\n else:\n raise SPDXValueError('Review::ReviewDate')\n else:\n raise CardinalityError('Review::ReviewDate')\n else:\n raise OrderError('Review::ReviewDate')", "def balance_date(self, balance_date):\n\n self._balance_date = balance_date" ]
[ "0.7667483", "0.6481708", "0.6421852", "0.6388244", "0.62596005", "0.6228435", "0.59492034", "0.59492034", "0.59492034", "0.59492034", "0.59492034", "0.59451556", "0.5936428", "0.5929696", "0.5898084", "0.5878672", "0.5782881", "0.5771359", "0.5771359", "0.5727382", "0.5727382", "0.5691145", "0.5677572", "0.5677572", "0.56542224", "0.5623758", "0.5596601", "0.5541087", "0.55315113", "0.5457952" ]
0.835676
0
Sets the mean of this ZacksAnalystRatingSnapshot.
def mean(self, mean): self._mean = mean
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_average(self, avg, num_samples):\n self._average = avg * num_samples\n self.num_samples = num_samples", "def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_mean(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._mean if not avg else self._mean_avg", "def mean_radius(self, mean_radius):\n\n self._mean_radius = mean_radius", "def averaging(self, value: int):\n self._averaging = value\n\n self.events.averaging()\n self._update_avg()\n\n self.refresh()", "def update_mean_user_rating(self):\n self.mean_user_rating = self.ratings.groupby(['user_id'])['rating'].mean().reset_index()", "def mean(self):\n return self._lift(\"mean\")", "def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()", "def mean(self):\n return self._mean", "def mean(self):\n return self._mean", "def mean(self):\n return self._summarize(lambda c: c.mean)", "def reset_mean(self,new_mean):\n self.mean = new_mean\n return", "def reset_mean(self,new_mean):\n self.mean = new_mean\n return", "def set_mean_face(mean_face):\n View.__mean_face = mean_face", "def get_mean(self):\n self.meanval = np.mean(self.adulist)", "def mean(self):\n return self.vmean", "def average_rating(self):\n reviews = self.gamereview_set.all()\n\n try:\n return mean([ review.rating for review in reviews ])\n\n except StatisticsError:\n return None", "def getMean(self):\n return self.mean", "def mean(self):\n return self._mean_func", "def mean(self):\n return self.sum / self.sum_weights", "def mean(self):\n return self.aggregate(np.mean)", "def average(self):\n return self.properties.get('average')", "def with_population_mean(self, mean):\n\t\tself.variables['population_mean'] = mean\n\t\treturn self", "def global_mean(self):\n if self._global_mean is None:\n self._global_mean = np.mean([r for (_, _, r) in\n self.all_ratings()])\n\n return self._global_mean", "def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)", "def mean_value(self):\n\n return self._system.mean()", "def sample_mean(self, x_dict={}):\n raise NotImplementedError()", "def update_rating_average(self, rating):\n self.num_ratings += 1\n self.rating_total += rating\n self.save(update_fields=[\"num_ratings\", \"rating_total\"])\n self.average_rating = int(round(self.rating_total/self.num_ratings))\n self.save(update_fields=[\"average_rating\"])\n return", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def mean(self) -> float:\n return self._data.mean()", "def mean(self):\r\n return np.mean(self.data_array)" ]
[ "0.6930181", "0.67363703", "0.6655305", "0.66153383", "0.6466718", "0.646478", "0.6442922", "0.6403945", "0.6403945", "0.63922215", "0.6262132", "0.6262132", "0.6237978", "0.62258685", "0.6158235", "0.6155664", "0.6135507", "0.60126746", "0.598887", "0.5976859", "0.5922096", "0.59210557", "0.59006447", "0.5899764", "0.5878244", "0.5846228", "0.5830576", "0.5830168", "0.58048385", "0.5791693" ]
0.7557623
0
Sets the percentile of this ZacksAnalystRatingSnapshot.
def percentile(self, percentile): self._percentile = percentile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass", "def percentile(self, pct):\n return percentile(self.results, pct, interpolation='nearest')", "def surprise_percent(self, surprise_percent):\n\n self._surprise_percent = surprise_percent", "def set_percent(self, percent):\n self.percent = percent\n self.set_bars()", "def setPercent(*args):", "def setPercent(*args):", "def test_set_clip_precentile_update(self):\n data = np.array([1., 2., 3.])\n self.mode.set_clip_percentile(10, 90)\n result = self.mode.get_bounds(data)\n self.mode.set_clip_percentile(20, 80)\n result = self.mode.get_bounds(data)\n np.testing.assert_array_almost_equal(result,\n np.percentile(data, [20, 80]))", "def _adjust_screening_percentile(screening_percentile, mask_img, verbose=0):\n original_screening_percentile = screening_percentile\n # correct screening_percentile according to the volume of the data mask\n mask_volume = _get_mask_volume(mask_img)\n if mask_volume > 1.1 * MNI152_BRAIN_VOLUME:\n warnings.warn(\n \"Brain mask is bigger than the volume of a standard \"\n \"human brain. This object is probably not tuned to \"\n \"be used on such data.\",\n stacklevel=2,\n )\n elif mask_volume < 0.005 * MNI152_BRAIN_VOLUME:\n warnings.warn(\n \"Brain mask is smaller than .5% of the volume \"\n \"human brain. This object is probably not tuned to \"\n \"be used on such data.\",\n stacklevel=2,\n )\n\n if screening_percentile < 100.0:\n screening_percentile = screening_percentile * (\n MNI152_BRAIN_VOLUME / mask_volume\n )\n screening_percentile = min(screening_percentile, 100.0)\n # if screening_percentile is 100, we don't do anything\n\n if verbose > 1:\n print(\n f\"Mask volume = {mask_volume:g}mm^3 = {mask_volume / 1000.0:g}cm^3\"\n )\n print(\n \"Standard brain volume \"\n f\"= {MNI152_BRAIN_VOLUME:g}mm^3 \"\n f\"= {MNI152_BRAIN_VOLUME / 1.0e3:g}cm^3\"\n )\n print(\n f\"Original screening-percentile: {original_screening_percentile:g}\"\n )\n print(\n f\"Volume-corrected screening-percentile: {screening_percentile:g}\"\n )\n return screening_percentile", "def SetValue(self, percent):\n pass", "def test_set_clip_precentile_update_data(self):\n data = np.array([1., 2., 3.])\n self.mode.set_clip_percentile(10, 90)\n result = self.mode.get_bounds(data)\n data = data + 3\n result = self.mode.get_bounds(data)\n np.testing.assert_array_almost_equal(result,\n np.percentile(data, [10, 90]))", "def test_set_clip_precentile(self):\n data = np.array([[1., 2., 3.], [1., 2., 3.]])\n self.mode.set_clip_percentile(10, 90)\n result = self.mode.get_bounds(data)\n np.testing.assert_array_almost_equal(result,\n np.percentile(data, [10, 90]))", "def get_percentile(self, q):\n return None", "def get_percentile(self, q):\n return None", "def percentage(self, percentage):\n\n self._percentage = percentage", "def percentage(self, percentage):\n\n self._percentage = percentage", "def percentage(self, percentage):\n\n self._percentage = percentage", "def percentage(self, percentage):\n\n self._percentage = percentage", "def percentile_normalization(self,I,perc=99.):\n # first zero out negative values\n I =I - I.min()\n np.clip(I, 0, None, out=I)\n # then normalize the 99th percentile\n percI = np.percentile(I, perc)\n #np.clip (I,None,percI,out=I)\n if percI == 0:\n print('Cannot normalize based on percentile; as 99-th percentile is 0. Ignoring normalization')\n return I\n else:\n I = I / percI * perc/100.\n return I", "def set_limits_percent(self, percent=95):\n zmin = np.nanmin(self.pixels.get_array())\n zmax = np.nanmax(self.pixels.get_array())\n dz = zmax - zmin\n frac = percent / 100.0\n self.autoscale = False\n self.set_limits_minmax(zmin, zmax - (1.0 - frac) * dz)", "def estimate_median(self):\n return self.estimate_percentile(0.5)", "def percentile_normalization(data: np.ndarray, percentile: int = 1) -> np.ndarray:\n\n min_percentile = np.percentile(data, percentile)\n max_percentile = np.percentile(data, 100 - percentile)\n\n # limit maximum intensity of data by max_percentile\n data[data >= max_percentile] = max_percentile\n\n # limit minimum intensity of data by min_percentile\n data[data <= min_percentile] = min_percentile\n\n return data", "def save_loss_percentile(\n col1,\n sensor_name,\n percentile=99.5,\n file_path=\"./test_env_loss_percentiles/\",\n):\n\n file_name = sensor_name + \"_loss_percentile.pkl\"\n\n loss_percentile = np.percentile(col1, percentile)\n\n dump(loss_percentile, open(file_path + file_name, \"wb\"))\n\n return loss_percentile", "def percentile(self, percentile):\n assert(percentile >= 0 and percentile <= 100)\n assert(self.kind in [\"exponential\", \"linear\", \"enumerated\", \"boolean\"])\n\n fraction = percentile/100\n to_count = fraction*self.buckets.sum()\n percentile_bucket = 0\n\n for percentile_bucket in range(len(self.buckets)):\n freq = self.buckets.values[percentile_bucket]\n if to_count - freq <= 0:\n break\n to_count -= freq\n\n percentile_lower_boundary = self.buckets.index[percentile_bucket]\n percentile_frequency = self.buckets.values[percentile_bucket]\n\n if percentile_bucket == len(self.buckets) - 1 or percentile_frequency == 0:\n return percentile_lower_boundary\n\n width = self.buckets.index[percentile_bucket + 1] - self.buckets.index[percentile_bucket]\n return percentile_lower_boundary + width*to_count/percentile_frequency", "def median(self, name, **kwargs):\n data = self.get(name,**kwargs)\n return np.percentile(data,[50])", "def get_statistics_percentile(self,table,field):\n dict = {}\n for x in xrange(1,11):\n dict[x] = db.session.execute(\"select statistics_viewCount as percentile from meta order by percentile asc limit 1 OFFSET 19346*\"+str(x)+\"/10-1\").first().percentile", "def percent_pf(self, percent_pf):\n\n self._percent_pf = percent_pf", "def __init__(self, quantile):\n self.quantile = quantile\n if self.quantile < 0 or self.quantile > 1:\n verif.util.error(\"Quantile must be between 0 and 1\")", "def set_limits_percent(self, percent=95):\n low = np.nanmin(self.datasource.data[\"values\"])\n high = np.nanmax(self.datasource.data[\"values\"])\n\n frac = percent / 100.0\n self.set_limits_minmax(low, high - (1.0 - frac) * (high - low))", "def normalizeprctile(expdat,percent=80):\n\tparams=locals()\n\n\t# select the bacteria to use - don't want to include very low freq. bacteria\n\tnewexp=hs.filterminreads(expdat,1*len(expdat.samples))\n\n\tpercvals=np.percentile(newexp.data,percent,axis=0)\n#\tplt.figure()\n#\tplt.plot(percvals)\n\tpercvals=percvals/np.mean(percvals)\n\tnewexp=hs.copyexp(expdat)\n\tfor idx,samp in enumerate(expdat.samples):\n\t\tnewexp.data[:,idx]=newexp.data[:,idx]*percvals[idx]\n\tnewexp.filters.append(\"normalize percentile %f\" % percent)\n\ths.addcommand(newexp,\"normalizeprctile\",params=params,replaceparams={'expdat':expdat})\n\n\treturn newexp", "def setFitness(self, fitness):\n self._fitness = fitness" ]
[ "0.55393034", "0.551872", "0.54966956", "0.54480946", "0.5393786", "0.5393786", "0.5380648", "0.53383744", "0.5170568", "0.51268977", "0.510302", "0.50660586", "0.50660586", "0.5058735", "0.5058735", "0.5058735", "0.5058735", "0.5056522", "0.50451845", "0.5021903", "0.5003763", "0.49809188", "0.49791545", "0.49300605", "0.49089974", "0.49047673", "0.48841727", "0.48605254", "0.4811071", "0.48032057" ]
0.68931764
0
Sets the strong_buys of this ZacksAnalystRatingSnapshot.
def strong_buys(self, strong_buys): self._strong_buys = strong_buys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def buys(self, buys):\n\n self._buys = buys", "def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating", "def weightedBoldness(self):\n\n\t\treturn sum([blend.varietal.boldness * blend.portion / 100.0 for blend in self.blends])", "def setwealth(self, w):\n self.wealth = w", "def bid_low(self, bid_low):\n\n self._bid_low = bid_low", "def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1", "def wins(self, wins):\n\n self._wins = wins", "def wins(self, wins):\n\n self._wins = wins", "def strong(self, on, **kw):\n tag = 'strong'\n if on:\n return self._open(tag, allowed_attrs=[], **kw)\n return self._close(tag)", "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "def buying_rate(self, buying_rate):\n\n self._buying_rate = buying_rate", "def event_house_bust(self) -> None:\n print(f\"The house's hand contains {min(self.house.hand.value)}, they're bust\")\n self.event_player_wins()", "def holds(self, holds):\n\n self._holds = holds", "def SetBold(self, bold):\r\n\r\n self._isBold = bold", "def bends(self, bends):\n\n self._bends = bends", "def bends(self, bends):\n\n self._bends = bends", "def sight_wards_bought_in_game(self, sight_wards_bought_in_game):\n\n self._sight_wards_bought_in_game = sight_wards_bought_in_game", "def implied_volatility(self, implied_volatility):\n\n self._implied_volatility = implied_volatility", "def affordable(self):\n\t\tfor shop in shopslist:\n\t\t\tfor bike in shop.bike_prices:\n\t\t\t\tif self.custacct >= shop.bike_prices[bike]:\n\t\t\t\t\tself.affordable_bikes.append(bike)\n\n\t\treturn self.affordable_bikes", "def affordable(self):\n\t\tfor shop in shopslist:\n\t\t\tfor bike in shop.bike_prices:\n\t\t\t\tif self.custacct >= shop.bike_prices[bike]:\n\t\t\t\t\tself.affordable_bikes.append(bike)\n\n\t\treturn self.affordable_bikes", "def set_weights(self, wts):\n\t\tself.wts = wts", "def set_gain_boost(self, enabled):\n if lib.is_SetGainBoost(self.hcam,2):\n lib.is_SetGainBoost(self.hcam,1 if enabled else 0)\n return self.get_gain_boost()", "def badass_buys_by_key(self, key):\n badbuys = []\n for badbuy in self.badassbuy:\n if badbuy.mindkey == key:\n badbuys.append(badbuy)\n return badbuys", "def sells(self, sells):\n\n self._sells = sells", "def player_b_rating_adjustment(self, player_b_rating_adjustment):\n\n self._player_b_rating_adjustment = player_b_rating_adjustment", "def update_water_collecting():\n if init.game_state.rain_water_uses > 0 or init.game_state.current_location[\"Key\"] in cs.water_locations:\n sc.sm.get_screen(\"game\").ids.water_collecting.disabled = False\n else:\n sc.sm.get_screen(\"game\").ids.water_collecting.disabled = True", "def bills(self, bills):\n\n self._bills = bills", "def binarize(self):\n # Loop through the ratings and binarize based on overall average rating\n rating_sum = np.sum(self.ratings)\n rating_count = np.count_nonzero(self.ratings)\n rating_avg = (1.0 * rating_sum) / rating_count\n\n def binary_transform(x, rating_avg):\n if x == 0.0:\n return 0.0\n elif x >= rating_avg:\n return 1.0\n else:\n return -1.0\n\n btransform = np.vectorize(binary_transform, otypes=[np.float])\n if self.is_turbo:\n self.ratings = btransform(self.ratings, rating_avg)", "def collectBigBlind(self):\n\t\tself.setBigBlindBetAmount()\n\t\tif self.noOfPlayers() == 2:\n\t\t\tplayer, seatNo = self.findNthPlayerFromSeat(self.curDealerSeatNo, 1)\n\t\telse:\n\t\t\tplayer, seatNo = self.findNthPlayerFromSeat(self.curDealerSeatNo, 2)\n\t\tself.collectMoney(player, self.bigBlind)\n\t\tself.setBigBlindBetAmount() # Need to do this again because even if blind cant be paid, \n\t\t\t\t\t\t\t\t\t# next player still has to pay full blind" ]
[ "0.7033873", "0.5824333", "0.51823777", "0.49280098", "0.48780355", "0.48700973", "0.48350552", "0.4807561", "0.4807561", "0.47917977", "0.47565135", "0.46617305", "0.46569103", "0.46393418", "0.46238536", "0.46224138", "0.46224138", "0.46091217", "0.46026292", "0.4574411", "0.4574411", "0.45646256", "0.45503813", "0.4485923", "0.44678414", "0.44547588", "0.44284695", "0.4406765", "0.4398909", "0.43884456" ]
0.8174262
0
Sets the buys of this ZacksAnalystRatingSnapshot.
def buys(self, buys): self._buys = buys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_buys(self, strong_buys):\n\n self._strong_buys = strong_buys", "def sells(self, sells):\n\n self._sells = sells", "def buying_rate(self, buying_rate):\n\n self._buying_rate = buying_rate", "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def set_sell_amount_from_buy_amount(self, *args, **kwargs):\n self._sell_amount = self.get_sell_amount_from_buy_amount(*args, **kwargs)", "def wins(self, wins):\n\n self._wins = wins", "def wins(self, wins):\n\n self._wins = wins", "def buyer(self, buyer):\n\n self._buyer = buyer", "def add_buy(self, trade):\n trade = self._format_sql(trade, self.buy_table)\n self.buys[trade['id']] = trade", "def sales(self, sales):\n\n self._sales = sales", "def buying_options(self, buying_options):\n\n self._buying_options = buying_options", "def bills(self, bills):\n\n self._bills = bills", "def buy_shoppingitem(self, user_id, shoppinglist_id, item_id):\n item = self.get_shoppingitem(user_id, shoppinglist_id, item_id)\n if not item['bought']:\n item['bought'] = True", "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def setBuyList(self, buyList):\n parsedBuyList = []\n for bought in buyList:\n if hasattr(bought, \"unitType\"):\n parsedBuyList.append(bought)\n elif isinstance(bought, dict) and u'unitType' in bought and u'territory' in bought:\n parsedBuyList.append(createBoughtUnitFromDict(bought, self.board.territories))\n else:\n raise Exception(\"Invalid buy list\", buyList)\n\n sumCost = self.costOfUnits(parsedBuyList)\n\n if sumCost <= self.board.currentCountry.money:\n self.board.buyList = parsedBuyList[:] # copy in buyList\n return True\n else:\n return False", "async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)", "def _process_buy(self, base, close_arr, sample, pred_buy):\n for bt in range(len(self.btl)):\n if pred_buy >= self.btl[bt]:\n for st in range(len(self.stl)):\n if self.perf[bt, st, self.BUY_PRICE] == 0:\n # first buy of a possible sequence of multiple buys before sell\n self.perf[bt, st, self.BUY_PRICE] = close_arr[sample]\n self.perf[bt, st, self.BUY_IX] = sample", "def carbs(self, carbs: List[RecipeObjectNutrientsCalories]):\n\n self._carbs = carbs", "def _append_all_buys(self, buy, from_symbol, date):\n\n buy['symbol'] = from_symbol\n buy['date'] = tb.DateConvert(buy['date']).date\n self.all_buys = pd.concat([pd.DataFrame(buy, index=[0]),\n self.all_buys],\n ignore_index=True)\n\n if self.sqlm:\n self.sqlm.add_buy(buy)", "def buyOpenVal(self, buyOpenVal):\n\n self._buyOpenVal = buyOpenVal", "def affordable_bikes(self, customer):\n affordable_bikes = []\n for bike in self.inventory:\n price = bike.total_cost() + (self.retail_margin * bike.total_cost())\n if customer.fund >= price:\n paid = price\n customer_fund_balance = customer.fund - paid\n affordable_bikes.append(bike)\n purchase = affordable_bikes[-1]\n print \"{} can afford {} for ${}\".format(\n customer.customer_name, bike.bicycle_name, paid)\n self.sold.append(purchase)\n return \"{} buys {} for ${} and her fund balance is ${}\\n\".format(\n customer.customer_name, purchase, paid, customer_fund_balance)", "def brands(self, brands):\n\n self._brands = brands", "def selling_rate(self, selling_rate):\n\n self._selling_rate = selling_rate", "def userSellWeaponObj(self, user : bbUser.bbUser, weapon : bbWeapon.bbWeapon):\n user.credits += weapon.getValue()\n self.weaponsStock.addItem(weapon)\n user.inactiveWeapons.removeItem(weapon)", "def userBuyShipObj(self, user : bbUser.bbUser, requestedShip : bbShip.bbShip):\n if self.userCanAffordItemObj(user, requestedShip):\n self.shipsStock.removeItem(requestedShip)\n user.credits -= requestedShip.getValue()\n user.inactiveShips.addItem(requestedShip)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy ship \" + requestedShip.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedShip.getValue()))", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def ramp_down(self) -> None:\n for stock in self.stocks:\n if stock.are_any_shares_owned():\n self.cash_balance = stock.sell(-1, self.cash_balance, self.buy_budget)", "def bends(self, bends):\n\n self._bends = bends", "def bends(self, bends):\n\n self._bends = bends" ]
[ "0.5943578", "0.58260715", "0.5590503", "0.5314247", "0.5207249", "0.51555926", "0.51555926", "0.50987566", "0.49954265", "0.4991696", "0.49129245", "0.4755713", "0.47396746", "0.47301865", "0.47301865", "0.4660928", "0.4651183", "0.46375775", "0.4636185", "0.45578387", "0.4553743", "0.45370477", "0.45352295", "0.4531259", "0.45228496", "0.4510267", "0.45005322", "0.4484729", "0.44593948", "0.44593948" ]
0.74221236
0
Sets the holds of this ZacksAnalystRatingSnapshot.
def holds(self, holds): self._holds = holds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def binarize(self):\n total = 0\n count = 0\n avg_rating = 0\n for movie_id, movie in enumerate(self.ratings):\n for user_id, rating in enumerate(movie):\n if rating != 0:\n self.ratings[movie_id,user_id] = 1 if rating > 2.5 else -1", "def wins(self, wins):\n\n self._wins = wins", "def wins(self, wins):\n\n self._wins = wins", "def set_attributes(self):\n for i, battery in enumerate(sorted(self.batteries.values(),\n key=operator.attrgetter(\"weight\"))):\n setattr(battery, \"cap\", self.caps[self.big_iterations][i])\n if self.caps[self.big_iterations][i] is 450:\n cost = 900\n elif self.caps[self.big_iterations][i] is 900:\n cost = 1350\n else:\n cost = 1800\n setattr(battery, \"cost\", cost)\n battery.capacity = self.caps[self.big_iterations][i]", "def holding(self, holding: float):\n if holding is None:\n raise ValueError(\"Invalid value for `holding`, must not be `None`\") # noqa: E501\n\n self._holding = holding", "def strong_buys(self, strong_buys):\n\n self._strong_buys = strong_buys", "def reviews(self, reviews: object):\n\n self._reviews = reviews", "def update_boy(self, hash, new_rate):\n image = self._db.boys.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.boys.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)", "def payment_holds(self, payment_holds):\n\n self._payment_holds = payment_holds", "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def rolls(self, rolls):\n self._rolls = rolls", "def set_ages(self):\n for i in self.individuals.values():\n i.set_age(i._age_line)", "def getRawRatings(self):\n\n try:\n judgeNotesLogger.info(\"getRawRatings: Retrieving Raw Ratings from '%s'\", self.notesFile)\n for rating in self.ratingsToSongs.keys():\n numOfSongsWithRating = len(self.ratingsToSongs[rating])\n self.ratingsRaw[rating] = numOfSongsWithRating\n\n except:\n judgeNotesLogger.warning(\"getRawRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def _retention_rate(self):\n res = {}\n for ret_line in self.browse(self.ids):\n if ret_line.invoice_id:\n pass\n else:\n res[ret_line.id] = 0.0\n return res", "def pay_rolls(self, pay_rolls):\n\n self._pay_rolls = pay_rolls", "def aggregate_rating(self, aggregate_rating: object):\n\n self._aggregate_rating = aggregate_rating", "def setArmor(self, armor):\n self.av = armor", "def __eq__(self, other):\n if not isinstance(other, ZacksAnalystRatingSnapshot):\n return False\n\n return self.__dict__ == other.__dict__", "def take_snapshot(self):\r\n self.snapshot = self.gain, self.block, self.locked, self.bucket_num", "def get_ratings(self):\n return self.ratings", "def get_ratings(self):\n return self.ratings", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def update_girl(self, hash, new_rate):\n image = self._db.girls.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.girls.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)", "def sensitive(self, sensitive):\n\n self._sensitive = sensitive", "def rates(self, rates):\n\n self._rates = rates", "def rates(self, rates):\n\n self._rates = rates", "def setValues(\n self,\n checkSelfIntersection: Boolean = ON,\n autoCaching: Boolean = ON,\n maxCachedStates: int = 5,\n ):\n pass", "def set_stratum_data(self) -> None:\n if not self.stratum_factory:\n return\n\n stratum_stats = self.stratum_factory.get_stats()\n completed_jobs = 0\n blocks_found = 0\n estimated_hash_rate = 0.0\n for stats in stratum_stats:\n completed_jobs += stats.completed_jobs\n blocks_found += stats.blocks_found\n estimated_hash_rate = sum_weights(estimated_hash_rate, stats.estimated_hash_rate)\n\n self.completed_jobs = completed_jobs\n self.blocks_found = blocks_found\n self.estimated_hash_rate = estimated_hash_rate" ]
[ "0.5835982", "0.5835982", "0.4976238", "0.48544487", "0.48544487", "0.48132378", "0.4691935", "0.46543285", "0.46492696", "0.4644989", "0.46391696", "0.4630273", "0.46297568", "0.46284485", "0.4615285", "0.45675987", "0.45078152", "0.44719487", "0.44543308", "0.44518128", "0.4451031", "0.44374782", "0.44374782", "0.44345102", "0.4431111", "0.44293055", "0.4417168", "0.4417168", "0.44132808", "0.4410516" ]
0.6009718
0
Sets the sells of this ZacksAnalystRatingSnapshot.
def sells(self, sells): self._sells = sells
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def sales(self, sales):\n\n self._sales = sales", "def selling_rate(self, selling_rate):\n\n self._selling_rate = selling_rate", "def set_sell_amount_from_buy_amount(self, *args, **kwargs):\n self._sell_amount = self.get_sell_amount_from_buy_amount(*args, **kwargs)", "def sell_stock (self, ticker, sell_date):\n \n self.__validate_sell__() \n self.__get_sell_share_price__(ticker, sell_date)\n self.__calc_profit_from_sales__() \n self.__update_sell_delta_amount__()\n self.__save_sell__()\n\n del self.invested[ticker]", "def add_sell(self, trade):\n trade = self._format_sql(trade, self.sell_table)\n self.sells[trade['id']] = trade", "def earnings(self, earnings):\n\n self._earnings = earnings", "def sellTradedVal(self, sellTradedVal):\n\n self._sellTradedVal = sellTradedVal", "def sell_all(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price -= np.abs(slip_factor)\n\n self.trade_manager.sell_all(from_symbol, to_symbol, price, amount, date)", "def sell(self):\n self.status = \"sold\"\n return self", "def sellTrdAvg(self, sellTrdAvg):\n\n self._sellTrdAvg = sellTrdAvg", "def seller(self, seller):\n\n self._seller = seller", "def sell_max_amount(self, sell_max_amount):\n\n self._sell_max_amount = sell_max_amount", "def sellOpenVal(self, sellOpenVal):\n\n self._sellOpenVal = sellOpenVal", "def sell_currency(self, sell_currency):\n\n self._sell_currency = sell_currency", "def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1\n\n return dataframe", "def sell_limit(self, market, quantity, rate):\n return self.api_query('Trade', {'type':'sell', 'pair': market, 'amount': quantity, 'rate':'%.8f'%rate})", "def count_sell(self):\n return Library.functions.count_sell(self._book)", "def offers(self, offers):\n\n self._offers = offers", "def buys(self, buys):\n\n self._buys = buys", "def pay_rolls(self, pay_rolls):\n\n self._pay_rolls = pay_rolls", "def rolls(self, rolls):\n self._rolls = rolls", "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def losses(self, losses):\n\n self._losses = losses", "async def on_sell_all(self, payload):\n\n if self.current_page in self.source._to_sell:\n self.source._to_sell = set()\n else:\n self.source._to_sell = set(range(self.source.get_max_pages()))\n\n await self.show_page(self.current_page)", "def calculate_sale_return(self,\n supply: int,\n connector_balance: int,\n connector_weight: int,\n sell_amount: int) -> int:\n pass", "def populate_sell_trend(dataframe: DataFrame, metadata: dict) -> DataFrame:\n conditions = []\n\n conditions.append(\n ((dataframe['bull'] > 0) & (dataframe['rsi'] > params['bull-sell-rsi-value'])) |\n (~(dataframe['bull'] > 0) & (dataframe['rsi'] > params['bear-sell-rsi-value']))\n )\n\n conditions.append(dataframe['volume'] > 0)\n\n dataframe.loc[\n reduce(lambda x, y: x & y, conditions),\n 'sell'] = 1\n\n return dataframe", "def rental_offers(self, rental_offers):\n\n self._rental_offers = rental_offers", "def sell_stock(self, symbol):\n amount_to_sell = self.get_equity(symbol)\n chirp.order_sell_fractional_by_price(symbol, amount_to_sell)\n self.L.add_line('', symbol, 'SOLD', amount_to_sell)" ]
[ "0.66252005", "0.6244003", "0.60547477", "0.59269816", "0.5827568", "0.5623113", "0.55961716", "0.5472035", "0.5459563", "0.54290825", "0.5401822", "0.53335994", "0.53216684", "0.5297495", "0.52785707", "0.5262857", "0.523921", "0.5225706", "0.517438", "0.51605576", "0.51303476", "0.51299417", "0.5101053", "0.5101053", "0.5065657", "0.50569606", "0.49713394", "0.49082753", "0.4846667", "0.48182675" ]
0.79995453
0
Sets the strong_sells of this ZacksAnalystRatingSnapshot.
def strong_sells(self, strong_sells): self._strong_sells = strong_sells
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sells(self, sells):\n\n self._sells = sells", "def strong_buys(self, strong_buys):\n\n self._strong_buys = strong_buys", "def set_sell_amount_from_buy_amount(self, *args, **kwargs):\n self._sell_amount = self.get_sell_amount_from_buy_amount(*args, **kwargs)", "async def soft_sell(self, pair: str, detection_name: str, trigger_data: dict):\n\n params = core.Detector.get_detection_params(detection_name, {\n 'apply': None,\n 'ignore': None\n })\n\n remove_indexes = []\n\n for index, trade in enumerate(self.trades[pair]['open']):\n if not self._is_applied(trade, params) or self._is_ignored(trade, params):\n continue\n\n adjusted_value = self.market.adjusted_close_values[pair][-1]\n target_value = 0.0 if trade['rebuy'] else trade['soft_target']\n trade['soft_sells'].append(detection_name)\n\n if adjusted_value >= target_value:\n if trade['soft_sells'].count(detection_name) >= trade['soft_max']:\n coro = self._trade_methods['sell'](trade, 'SOFT SELL', None, detection_name, trigger_data)\n utils.async_task(coro, loop=common.loop)\n self.trades[pair]['closed'] = []\n remove_indexes.append(index)\n\n check_value = adjusted_value * (1.0 - trade['stop_check'])\n cutoff_value = adjusted_value * (1.0 - trade['stop_cutoff'])\n stop_value = adjusted_value * (1.0 - trade['stop_percent'])\n\n if check_value > trade['check_value']:\n trade['check_value'] = check_value\n\n if cutoff_value > trade['cutoff_value']:\n trade['cutoff_value'] = cutoff_value\n\n if stop_value > trade['stop_value']:\n if stop_value > trade['check_value']:\n trade['stop_value'] = trade['check_value']\n else:\n trade['stop_value'] = stop_value\n\n soft_factor = trade['sell_pushes'] + len(trade['soft_sells'])\n hard_factor = trade['sell_pushes'] + len(trade['hard_sells'])\n trade['soft_target'] *= (1.0 - config['trade_dynamic_sell_percent'] * soft_factor)\n trade['hard_target'] *= (1.0 - config['trade_dynamic_sell_percent'] * hard_factor)\n\n for index in reversed(remove_indexes):\n del self.trades[pair]['open'][index]\n if remove_indexes:\n await self._track_num_open_trades(pair)\n\n base, quote, _ = common.get_pair_elements(pair)\n if base == config['trade_base'] and quote in config['min_base_volumes']:\n await self.balancer.remit_soft_sell(quote, detection_name)\n\n self.save_attr('trades', max_depth=1, filter_items=[pair])\n self.save_attr('trade_stats', max_depth=2, filter_items=[pair], filter_keys=[self.time_prefix])", "def sell_stock (self, ticker, sell_date):\n \n self.__validate_sell__() \n self.__get_sell_share_price__(ticker, sell_date)\n self.__calc_profit_from_sales__() \n self.__update_sell_delta_amount__()\n self.__save_sell__()\n\n del self.invested[ticker]", "def populate_sell_trend(dataframe: DataFrame, metadata: dict) -> DataFrame:\n conditions = []\n\n conditions.append(\n ((dataframe['bull'] > 0) & (dataframe['rsi'] > params['bull-sell-rsi-value'])) |\n (~(dataframe['bull'] > 0) & (dataframe['rsi'] > params['bear-sell-rsi-value']))\n )\n\n conditions.append(dataframe['volume'] > 0)\n\n dataframe.loc[\n reduce(lambda x, y: x & y, conditions),\n 'sell'] = 1\n\n return dataframe", "async def hard_sell(self, pair: str, detection_name: str, trigger_data: dict):\n\n params = core.Detector.get_detection_params(detection_name, {\n 'apply': None,\n 'ignore': None\n })\n\n remove_indexes = []\n\n for index, trade in enumerate(self.trades[pair]['open']):\n if not self._is_applied(trade, params) or self._is_ignored(trade, params):\n continue\n\n adjusted_value = self.market.adjusted_close_values[pair][-1]\n target_value = 0.0 if trade['rebuy'] else trade['hard_target']\n trade['hard_sells'].append(detection_name)\n\n if adjusted_value >= target_value:\n coro = self._trade_methods['sell'](trade, 'HARD SELL', None, detection_name, trigger_data)\n utils.async_task(coro, loop=common.loop)\n self.trades[pair]['closed'] = []\n remove_indexes.append(index)\n\n check_value = adjusted_value * (1.0 - trade['stop_check'])\n cutoff_value = adjusted_value * (1.0 - trade['stop_cutoff'])\n stop_value = adjusted_value * (1.0 - trade['stop_percent'])\n\n if check_value > trade['check_value']:\n trade['check_value'] = check_value\n\n if cutoff_value > trade['cutoff_value']:\n trade['cutoff_value'] = cutoff_value\n\n if stop_value > trade['stop_value']:\n if stop_value > trade['check_value']:\n trade['stop_value'] = trade['check_value']\n else:\n trade['stop_value'] = stop_value\n\n hard_factor = trade['sell_pushes'] + len(trade['hard_sells'])\n trade['hard_target'] *= (1.0 - config['trade_dynamic_sell_percent'] * hard_factor)\n\n for index in reversed(remove_indexes):\n del self.trades[pair]['open'][index]\n\n if remove_indexes:\n await self._track_num_open_trades(pair)\n\n base, quote, _ = common.get_pair_elements(pair)\n if base == config['trade_base'] and quote in config['min_base_volumes']:\n await self.balancer.remit_hard_sell(quote, detection_name)\n\n self.save_attr('trades', max_depth=1, filter_items=[pair])\n self.save_attr('trade_stats', max_depth=2, filter_items=[pair], filter_keys=[self.time_prefix])", "def selling_rate(self, selling_rate):\n\n self._selling_rate = selling_rate", "def sellTrdAvg(self, sellTrdAvg):\n\n self._sellTrdAvg = sellTrdAvg", "def sell_all(self):\n\n from_symbol = self.symbol\n to_symbol = self.currency\n price = self.data[0].close\n amount = self.portfolio['buy_sell_amount'][self.currency]\n date = self.date\n\n if self.slippage:\n slip_factor = (self.data[-1].high - self.data[-1].close)*self.slippage\n price -= np.abs(slip_factor)\n\n self.trade_manager.sell_all(from_symbol, to_symbol, price, amount, date)", "def earnings(self, earnings):\n\n self._earnings = earnings", "def sellOpenVal(self, sellOpenVal):\n\n self._sellOpenVal = sellOpenVal", "def sell(self):\n self.status = \"sold\"\n return self", "async def on_sell_all(self, payload):\n\n if self.current_page in self.source._to_sell:\n self.source._to_sell = set()\n else:\n self.source._to_sell = set(range(self.source.get_max_pages()))\n\n await self.show_page(self.current_page)", "def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n dataframe.loc[(qtpylib.crossed_below(dataframe['ema'], dataframe['ema2'])),'sell'] = 1\n\n return dataframe", "def net_gains(self):\n if self.shares == 0:\n self.debug += '\\nNo shares owned.'\n else:\n price = self.daily['Adj Close'][-1]\n gains = self.shares * (price - self.avg_paid)\n percent = (price / self.avg_paid - 1) * 100\n gains = round(gains, 2)\n percent = round(percent, 2)\n if gains < 0:\n penalty = self.sells - int(round(self.sells / 3.0, 0))\n self.debug += '\\nNET LOSS: {}, {}%, AVOID SELLING! sells - {}'.format(gains, percent, penalty)\n self.sells -= penalty\n else:\n self.debug += '\\nNet gains: ${}, {}%'.format(gains, percent)", "def count_sell(self):\n return Library.functions.count_sell(self._book)", "def sales(self, sales):\n\n self._sales = sales", "def add_sell(self, trade):\n trade = self._format_sql(trade, self.sell_table)\n self.sells[trade['id']] = trade", "def pay_rolls(self, pay_rolls):\n\n self._pay_rolls = pay_rolls", "def check_bollinger(self):\n upper, lower = self.bollinger_bands()\n if self.daily['Adj Close'][-1] > upper[-1]:\n self.debug += '\\nAbove upper bollinger: sells + 1'\n self.sells += 1\n elif self.daily['Adj Close'][-1] < lower[-1]:\n self.debug += '\\nBelow lower bollinger: buys + 1'\n self.buys += 1", "def sell_max_amount(self, sell_max_amount):\n\n self._sell_max_amount = sell_max_amount", "def wins(self, wins):\n\n self._wins = wins", "def wins(self, wins):\n\n self._wins = wins", "def _set_spikes(self, listOfSpikes):\n self._spikes = listOfSpikes", "def sell_stock(self, symbol):\n amount_to_sell = self.get_equity(symbol)\n chirp.order_sell_fractional_by_price(symbol, amount_to_sell)\n self.L.add_line('', symbol, 'SOLD', amount_to_sell)", "def rolls(self, rolls):\n self._rolls = rolls", "def target_sell_price(self):\n return super(Player, self).target_sell_price", "def ReflectingSeller(Seller):\n increase_step = 0.01\n\n if Seller.has_sold == True:\n Seller.like_sell *= (1+increase_step)\n elif Seller.like_sell * (1-increase_step) <= Seller.min_value and Seller.has_sold == False:\n Seller.like_sell = Seller.min_value\n else: \n Seller.like_sell *= (1-increase_step)\n Seller.has_sold = False #return to normal state", "def buys(self, buys):\n\n self._buys = buys" ]
[ "0.6637629", "0.65615624", "0.54384863", "0.5224136", "0.5186626", "0.51016766", "0.5085686", "0.50319314", "0.49949828", "0.49781185", "0.4922862", "0.49039868", "0.4898402", "0.48969296", "0.4878801", "0.4862272", "0.48586756", "0.48254427", "0.47724262", "0.47236255", "0.46886495", "0.46714395", "0.4637264", "0.4637264", "0.46193653", "0.4614327", "0.46111774", "0.45852882", "0.4576167", "0.45552775" ]
0.86615497
0
Recursive function to return records from CMR using scroll. The first time The function is called, just send a dictionary of what your looking for. This function will call itself as many times as needed to collect all the records and return them in a list. When calling recursively, send the scroll id
def get_block_of_records(search, scroll_id=None): url = "https://cmr.uat.earthdata.nasa.gov/search/collections.umm_json" accept = "application/vnd.nasa.cmr.umm_results+json" body = search.copy() if scroll_id is None: # first time here, request a scroll id and clear out headers body.update({"scroll": "true", "page_size": "50"}) headers = {} else: # recursive call, set the scroll id and don't touch the body headers = {"CMR-Scroll-Id": str(scroll_id)} result = net.post(url, body, accept=accept, headers=headers) items = result.get("items", []) count = len(items) # will be zero if we are at the end resp_headers = result.get("http-headers", {}) scroll_id = resp_headers.get("CMR-Scroll-Id", "") results = [] if count > 0: # probably not done yet, try one more time. This time, send the scroll id results = get_block_of_records(search, scroll_id=scroll_id) results.extend(items) # add the recursive call records to this calls else: # no records came back, work is done, tell CMR it can clear scroll url_clear = "https://cmr.uat.earthdata.nasa.gov/search/clear-scroll" headers['Content-Type'] = 'application/json' data = '{"scroll_id": "' + str(scroll_id) + '"}' net.post(url_clear, data, accept=None, headers=headers) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scroll_for_sessions(self, callback):\n \n print('%s Scrolling for sessions in %r...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), self.index))\n \n response = ElasticConnection._es.search(\n index = self.index,\n scroll = '5m',\n body = {\n \"size\": 2500,\n \"_source\": {\n \"excludes\": [ \"requests.extended-information\", \"requests.flags\" ]\n },\n \"query\": self._query(),\n \"sort\": [\n {\n \"start-micros\": { \"order\": \"asc\" }\n }\n ]\n }\n )\n \n sid = response['_scroll_id']\n scroll_size = len(response['hits']['hits'])\n scroll_num = 1\n \n while scroll_size > 0:\n print('%s Scroll %r: Processing %r elements...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scroll_num, scroll_size))\n \n callback([ d['_source'] for d in response['hits']['hits']])\n \n print('%s Scroll %r: Processing done. Retrieving next documents...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scroll_num))\n \n response = ElasticConnection._es.scroll(scroll_id = sid, scroll = '5m')\n \n sid = response['_scroll_id']\n scroll_size = len(response['hits']['hits'])\n scroll_num += 1\n \n print('%s Reached the end of the scroll.' % datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))\n ElasticConnection._es.clear_scroll(scroll_id = sid)", "def _scrolling_request(self, path, method='GET', body=None, headers=None):\n assert 'pagination' in body\n paginated_view = body\n url = '{}{}'.format(self._url_base, path)\n headers = self._headers() if headers is None else headers\n\n scrolling = True\n while scrolling:\n response, content = super(DSBaseService, self)._request(url,\n method,\n body=str(paginated_view).replace(\"'\", '\"'),\n headers=headers)\n\n if int(response['status']) == 200:\n data = json.loads(content)\n offset = data['currentPage']['offset']\n size = data['currentPage']['size']\n total = data['total']\n if offset + size < total:\n paginated_view['pagination']['offset'] = offset + size\n else:\n scrolling = False\n yield data\n elif int(response['status']) == 429:\n # rate limited, wait before resuming scroll requests\n time.sleep(1)\n else:\n scrolling = False", "def scroll(*args):", "def scan(client, index, scroll='10m'):\n\n response = client.search(search_type='scan', scroll=scroll, index=index)\n\n print 'Got {0} total hits'.format(response['hits']['total'])\n\n scroll_id = response['_scroll_id']\n\n while 1:\n response = client.scroll(scroll_id, scroll=scroll)\n if not response['hits']['hits']:\n break\n for hit in response['hits']['hits']:\n yield hit\n scroll_id = response['_scroll_id']", "def dump_index(self, index, output=None):\n endpoint = \"/{}/_search\".format(index)\n url = self.base_url + endpoint\n params = {\"scroll\": \"1m\"}\n result_size = 10000\n data = {\"sort\": [\"_doc\"], \"size\": result_size}\n scroll_headers = {\"Content-Type\": \"application/json\"}\n scroll_headers.update(self.headers)\n\n # Initial request to _search endpoint\n r1 = requests.get(url, headers=scroll_headers, params=params, data=json.dumps(data), verify=False)\n r1.raise_for_status()\n r1_dict = r1.json()\n r1_ml: list = r1_dict[\"hits\"][\"hits\"]\n\n total_hits = r1_dict['hits']['total']\n # To find the number of time we have to scroll.\n # Divide total results by result_size\n # Round up to nearest integer\n # Subtract 1 because we already pulled the first ${result_size} results in the first request\n # Provide result or 0 if it is negative\n num_of_scrolls = max(int(math.ceil((total_hits / result_size))) - 1, 0)\n if self.verbose:\n print(\"num of scrolls: {}\".format(num_of_scrolls))\n\n # Get _scroll_id\n # Scroll requests hit generic _search/scroll endpoint\n scroll_id = r1_dict[\"_scroll_id\"]\n scroll_endpoint = \"/_search/scroll\"\n scroll_url = self.base_url + scroll_endpoint\n data = {\"scroll\": \"1m\", \"scroll_id\": scroll_id}\n\n # Call scroll API til we have all results pushed into r1_dict\n for i in range(num_of_scrolls):\n if self.verbose:\n print(\"Current length of message list: {}\".format(len(r1_ml)))\n print(\"Calling scroll endpoint: {}/{}\".format(i + 1, num_of_scrolls))\n start = time.time()\n r_scroll = requests.get(scroll_url, headers=scroll_headers, data=json.dumps(data), verify=False)\n if self.verbose:\n end = time.time()\n print(\"Time taken for scroll request: {}\".format(end - start))\n r_scroll.raise_for_status()\n r_scroll_dict = r_scroll.json()\n if self.verbose:\n print(\"Extending r1_ml with new messages\")\n r1_ml.extend(r_scroll_dict[\"hits\"][\"hits\"])\n\n # print(\"Dict obj size: {}\".format(sys.getsizeof(json.dumps(r1_dict))))\n\n # If output is specified then output to file\n if output is not None:\n with open(output, 'w') as f:\n json.dump(r1_dict, f, indent=2)\n return r1_dict", "def __search_for_file_list(job_id: str, batch_id: str, status='NOT_STARTED') -> list:\n es = __connect_ES()\n query = { # TODO Would speed up by reduce the result fields\n \"size\": 10000,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\"match\": {\"job_id\": job_id}},\n {\"match\": {\"batch_id\": batch_id}},\n {\"match\": {\"status\": status}}\n ]\n }\n }\n }\n eprint(\">>> going to query: {}\".format(query))\n\n resp = es.search(\n index=elastic_search_index,\n body=query,\n scroll='9s'\n )\n\n # keep track of pass scroll _id\n old_scroll_id = resp['_scroll_id']\n all_hits = resp['hits']['hits']\n # use a 'while' iterator to loop over document 'hits'\n while len(resp['hits']['hits']):\n\n # make a request using the Scroll API\n resp = es.scroll(\n scroll_id=old_scroll_id,\n scroll='2s' # length of time to keep search context\n )\n\n # check if there's a new scroll ID\n if old_scroll_id != resp['_scroll_id']:\n eprint(\"NEW SCROLL ID:\", resp['_scroll_id'])\n\n # keep track of pass scroll _id\n old_scroll_id = resp['_scroll_id']\n\n # eprint the response results\n eprint(\"\\nresponse for index: \" + elastic_search_index)\n eprint(\"_scroll_id:\", resp['_scroll_id'])\n eprint('response[\"hits\"][\"total\"][\"value\"]:{}'.format(resp[\"hits\"][\"total\"]))\n\n # iterate over the document hits for each 'scroll'\n all_hits.extend(resp['hits']['hits'])\n eprint(\"DOC COUNT:\", len(all_hits))\n\n return all_hits", "def paginate(self, data=[], index=[], link=[], end_id=None, space=0):\n #etiquetas que no se dibujaran su unica funcion es servir de indice \n #para paginar resultados\n if end_id != None:\n self.set_index(index+[end_id]) \n else:\n self.set_index(index)\n \n #objs = []\n #for _id in index:\n # objs.append(self.find_label_by_id())\n obj = self.find_label_by_id(index[0]) #tomo el primer elemento como indice\n start = obj.y #posicion inicial Y\n delta = obj.size + space #salto de cada fila vertical\n #hasta donde dibujar, el objecto con el id end_id sirve de referencia\n #sino se dibuja hasta el final de pagina\n if end_id != None:\n end = self.find_label_by_id(end_id).y\n else:\n end = 0\n\n objs = {} #objectos que serviran como referencia\n for key in index:\n objs[key] = self.find_label_by_id(key)\n \n pos = start #inicializo la posicion de paginacion \n self.pages = [[]] #vacio todas las paginas y solo dejo una en blanco \n c = 0 #indice de pagina\n eid = random.randint(1,5000) #elemnto id\n\n dic = {}\n for lnk in link:\n # label -> id data\n dic[lnk[0]] = lnk[1]\n\n #comienzo paginado \n for row in data:\n for key in dic:\n #obj_id = key #identificador de obj de referencia\n col = dic[key] #identificador de columna\n lbl = copy.copy(objs[key])\n lbl.y = pos\n lbl.text = row[col]\n lbl.id = \"{0}_field_{1}\".format(lbl.id, eid)\n lbl.index = False #el elemento se mostrara\n self.pages[c].append(lbl)\n pos -= delta\n #si llego al limite de la pagina agrego una nueva\n if pos <= end:\n self.pages.append([])\n pos = start\n c += 1", "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def __navigate_scroll(self):\n try:\n _title = self.browser.title\n _body = self.browser.find_element_by_tag_name('body')\n\n i = 0\n while i < 3:\n _html = str(self.browser.page_source)\n _content = Content(_html, _title)\n _attrs = _content.last_divs\n\n scroll_items = []\n for _attr in _attrs:\n xpath_string = '//div'\n\n for k, v in _attr.items():\n if not v:\n xpath_string = xpath_string + \"[@\" + str(k) + \"]\"\n else:\n if isinstance(v, list):\n _vstring = [\"contains(@\" + str(k) + \", '\" + str(_v) + \"')\" for _v in v]\n vstring = \" and \".join(_vstring)\n\n xpath_string = xpath_string + \"[\" + vstring + \"]\"\n\n div = self.browser.find_elements_by_xpath(xpath_string)\n\n for d in div: scroll_items.append(d)\n\n if len(scroll_items) > 10:\n j = 0\n while j < 10:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[j])\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n time.sleep(1)\n j += 1\n except Exception as e:\n print(e)\n j += 1\n continue\n \n else:\n for item in scroll_items:\n try:\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", item)\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n _body.send_keys(Keys.HOME)\n time.sleep(1)\n except Exception as e:\n print(e)\n continue\n\n self.browser.execute_script(\"arguments[0].scrollIntoView(true)\", scroll_items[0])\n new_html = str(self.driver.page_source)\n new_content = Content(new_html, _title)\n new_attrs = new_content.last_divs\n\n i += 1\n if new_attrs == _attrs:\n break\n else:\n continue\n\n return self.browser.page_source\n\n except:\n return None", "def paginated_call(self) -> global___Snippet.ClientCall:", "def search_all_records(self, data: dict, execution_context: dict):", "def get_requests( self ,search_id:int ,filter_on:str ,from_date:datetime ,upto_date:datetime ,from_page:int=1 ,upto_page:int=20 ,step=20 ,loopback:dict=None ) -> list((str,dict,str,dict)):\n reqs = []\n params = self.request_params\n params[ 'searchid' ] = search_id\n f = 0\n if self._internal_ids:\n f += 1\n params[ f'field{f}' ] = 'internalid'\n params[ f'operator{f}'] = 'anyof'\n params[ f'field{f}a' ] = self._internal_ids\n\n if filter_on:\n f += 1\n# params[ f'join{f}' ] = self._join_to\n params[ f'operator{f}'] = 'onorafter'\n params[ f'field{f}a' ] = from_date.strftime(\"%m/%d/%Y %I:%M %p\"), # Date format is NOT negotiable! Value is ib parent object.\n f += 1\n# params[ f'join{f}' ] = self._join_to\n params[ f'field{f}' ] = filter_on\n params[ f'operator{f}'] = 'before'\n params[ f'field{f}a' ] = upto_date.strftime(\"%m/%d/%Y %I:%M %p\"), # Date format is NOT negotiable! Value is ib parent object.\n\n for page_from in range( from_page ,upto_page ,step ):\n param = params.copy()\n param[ 'from_page'] = page_from\n param[ 'upto_page'] = page_from + step\n\n if loopback:\n ctxback = loopback.copy()\n else:\n ctxback = self.get_loopback() # NOTE: Does make a copy.\n ctxback['from_page'] = param['from_page']\n ctxback['upto_page'] = param['upto_page']\n ctxback['ordinal' ] = page_from // step\n\n reqs.append( (HTTP_GET ,self._request_url ,param ,None ,ctxback) )\n\n return reqs", "def __scroll(self, result, item, index=1, containerObject=None, relatedAreaEnd=None):\r\n defaultSideWidth=150\r\n counter=0\r\n initialDump = None\r\n\r\n itemCommented = self._getCommented(item) # commented/translated version for test step run\r\n\r\n if not self.isItemScrollable(item,containerObject=containerObject, relatedAreaEnd=relatedAreaEnd):\r\n if containerObject:\r\n self.phone.fail('Cannot scroll to item, item %s (related to %s) is not scrollable' % (self._getCommented(item),self._getCommented(containerObject)))\r\n else:\r\n self.phone.fail('Cannot scroll to item, item is not scrollable %s' %self._getCommented(item))\r\n\r\n maximumDuration = 240000\r\n startTime=time.time()\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n containerX, containerY, containerW, containerH = [int(c) for c in result[-1].getAttribute('container-area').split(\",\")]\r\n\r\n screenWidth = min([self.getScreenWidth(),containerX + containerW])\r\n screenHeight = min([self.getScreenHeight(),containerY + containerH])\r\n\r\n screenTop = max(0,containerY)\r\n\r\n while result[0]==self.phone.uiState.HIDDEN:\r\n initialDump = self.currentState.toxml('utf-8')\r\n\r\n # Check if item is outside of screen at right\r\n if result[1][0]>=screenWidth:\r\n yCoordinate = 20\r\n\r\n distance=result[1][0] #Distance from end of screen to coordinate\r\n\r\n #If y coordinates are bigger than screenwith then set them to 0\r\n if screenWidth-distance<0:\r\n x_move=0\r\n else:\r\n x_move=screenWidth-distance\r\n self.phone._touch.drawLine((screenWidth,yCoordinate),(x_move,yCoordinate))\r\n self.phone._run('Scrolling left \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Checking if item is outside of screen at bottom of screen\r\n if result[1][1]>=screenHeight:\r\n scrollEndY=screenHeight-result[1][1] #Distance from end of screen to coordinate\r\n distanceToScroll = scrollEndY\r\n\r\n # increase scrollModStep if we haven't been able to scroll\r\n # NOTE: This is done due to possible brightness adjust bar in settings list\r\n if previousScrollValue == 0:\r\n previousScrollValue = scrollEndY\r\n elif previousScrollValue == scrollEndY:\r\n scrollModStep += 40\r\n else:\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n if scrollEndY<screenTop:\r\n scrollEndY=screenTop\r\n\r\n # -60 so that we won't grab the option list from the bottom of the screen\r\n # scrollModStep is used when for adjusting y coordinate\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,(screenHeight-60) - scrollModStep),(screenWidth-defaultSideWidth,scrollEndY))\r\n self.phone._run('Scrolling down \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Check if we can safely scroll several times in a row\r\n distanceToScrollMore = screenHeight-result[1][1]\r\n scrolledDistance = distanceToScroll-distanceToScrollMore\r\n if abs(scrolledDistance) > 100:\r\n sweepsRequired = int(distanceToScrollMore/scrolledDistance)\r\n sweeps = min(sweepsRequired-2, 10) # Max 10 sweeps in a row without any checks\r\n if sweeps > 0:\r\n for i in range(0,sweeps):\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,(screenHeight-60) - scrollModStep),(screenWidth-defaultSideWidth,scrollEndY))\r\n self.phone._run('Scrolling down \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n\r\n # Checking if item is outside of screen at up of screen\r\n if result[1][1]<=max(screenTop,(self.phone.uiState.statusbarHeight*2)):# Item must be scrolled lower than status bar\r\n #distance=abs(result[1][1])+self.phone.uiState.statusbarHeight #Distance from top of the screen to coordinate which is now negative\r\n distance=abs(result[1][1]-max(screenTop,self.phone.uiState.statusbarHeight)) #Distance from top of the screen to coordinate which is now negative\r\n distance += ((screenHeight-screenTop)/2)\r\n distanceToScroll = distance\r\n\r\n # y_start must be min. 20 pixels from screenTop to ensure that ntf-drawer is not opened\r\n y_start = max(screenTop,(self.phone.uiState.statusbarHeight*3), 20)\r\n\r\n # increase scrollModStep if we haven't been able to scroll\r\n # NOTE: This is done due to possible brightness adjust bar in settings list\r\n if previousScrollValue == 0:\r\n previousScrollValue = distance\r\n elif previousScrollValue == distance:\r\n scrollModStep += 40\r\n else:\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n if screenTop==0:\r\n y_move = distance+(self.phone.uiState.statusbarHeight*3)\r\n else:\r\n y_move = distance+screenTop\r\n\r\n if y_move>=screenHeight:\r\n y_move = screenHeight-1\r\n\r\n # scrollModStep is used when for adjusting y coordinate\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,y_start + scrollModStep),(screenWidth-defaultSideWidth,y_move))\r\n self.phone._run('Scrolling up \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Check if we can safely scroll several times in a row\r\n distanceToScrollMore=abs(result[1][1]-max(screenTop,self.phone.uiState.statusbarHeight))\r\n distanceToScrollMore += ((screenHeight-screenTop)/2)\r\n scrolledDistance = distanceToScroll-distanceToScrollMore\r\n if abs(scrolledDistance) > 100:\r\n sweepsRequired = int(distanceToScrollMore/scrolledDistance)\r\n sweeps = min(sweepsRequired-2, 10) # Max 10 sweeps in a row without any checks\r\n if sweeps > 0:\r\n for i in range(0,sweeps):\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,y_start + scrollModStep),(screenWidth-defaultSideWidth,y_move))\r\n self.phone._run('Scrolling up \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # if phone UI has changed, let's not increase the counter\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n counter=counter+1\r\n\r\n # give up when counter has maximum value or maximum time is up\r\n if counter == 10 or time.time() > startTime + maximumDuration/1000.0:\r\n self.phone.capture('Failed to scroll to item')\r\n #if initial dump and current dump are identical, phone UI is frozen -> fail testcase\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n self.phone.comment('KBD_KEY_KEYLOCK_TOGGLE pressed to check if phone UI is freezed or not')\r\n self.phone._pressKey('KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone._run('Press KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone.delay(500, False)\r\n self.getCurrentState(refresh = True)\r\n #if initial and current dumps are identical after pressing KBD_KEY_BACK then UI is frozen\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n errorString = 'Phone UI freeze detected, unable to scroll'\r\n self.phone.fail(errorString)\r\n\r\n if containerObject:\r\n self.phone.fail('Cannot scroll to item %s (related to %s)' % (self._getCommented(item), self._getCommented(containerObject)))\r\n else:\r\n self.phone.fail('Cannot scroll to item %s' %self._getCommented(item))\r\n\r\n return result", "def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results", "def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data", "def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return", "def get_records(self):\n url = f\"{self.baseurl}\" + \"?limit=\" + RECORDS_PER_PAGE\n\n while True:\n log.debug(\"Retrieving from OpenCity URL %s\", url)\n response = urlopen(url).read()\n json_content = json.loads(response)\n\n url = json_content[\"next\"]\n\n objects = json_content[\"items\"]\n for res in objects:\n lid = res[\"id\"]\n ltitle = res[\"title\"]\n log.info(f'Found id:{lid} \"{ltitle}\"')\n yield res\n\n if url is None:\n break", "def get_list(self, method=\"search\", **kwargs):\r\n\r\n lg = logging.getLogger(\"%s.%s\" % (self.ln, inspect.stack()[0][3]))\r\n lg.setLevel(self.log_level)\r\n\r\n args = inspect.getargvalues(inspect.currentframe())[3]\r\n lg.debug(\"\\n****** Args *****:\\n%s\",\r\n pp.pformat(args))\r\n\r\n es = kwargs.get(\"es\",self.es)\r\n doc_type = get2(kwargs, \"doc_type\", self.doc_type)\r\n fields = get2(kwargs, \"fields\")\r\n search_flds = kwargs.get(\"search_flds\")\r\n sort_dir = get2(kwargs,\"sort_dir\", \"asc\")\r\n sort_fields = get2(kwargs,\"sort_fields\", get2(kwargs, \"fields\", []))\r\n size = get2(kwargs,\"size\",10)\r\n term = get2(kwargs,\"term\",'').replace(\"/\",\"//\")\r\n filter_field = kwargs.get('filter_field')\r\n filter_value = kwargs.get('filter_value')\r\n highlight = kwargs.get('highlight',False)\r\n from_ = kwargs.get('from_')\r\n dsl = {}\r\n # set retutn to only return the fields specified or return the whole\r\n # document if not specified\r\n if fields is not None:\r\n dsl[\"_source\"] = fields\r\n else:\r\n fields = []\r\n # set query parameters based on the return method \"list\" or \"search\"\r\n if sort_dir != \"none\" and method == \"list\":\r\n dsl[\"sort\"] = []\r\n for fld in sort_fields:\r\n if fld is not None:\r\n dsl[\"sort\"].append({ fld: sort_dir })\r\n if method == \"search\":\r\n # query in elasticsearch breaks if the is a single open parenthesis\r\n # remove a single parenthesis from the search term\r\n if \"(\" in term and \")\" not in term:\r\n search_term = \"*%s*\" % term.replace(\"(\", \"\")\r\n elif term.startswith(\"<\") and term.endswith(\">\"):\r\n search_term = term\r\n else:\r\n search_term = \"*%s*\" % term\r\n if search_flds is not None and len(search_flds) > 0:\r\n fields_to_search = search_flds\r\n elif len(fields) > 0:\r\n fields_to_search = fields\r\n else:\r\n fields_to_search = []\r\n # dsl['query'] = {\r\n # \"bool\": {\r\n # \"should\": [\r\n # {\r\n # \"query_string\" : {\r\n # \"query\": search_term,\r\n # \"analyzer\": \"default\",\r\n # \"analyze_wildcard\": True\r\n # }\r\n # },\r\n # {\r\n # \"query_string\" : {\r\n # \"query\": search_term,\r\n # \"analyzer\": \"default\",\r\n # \"analyze_wildcard\": True,\r\n # \"fields\": fields_to_search,\r\n # \"boost\": 10\r\n # }\r\n # }\r\n # ]\r\n # }\r\n # }\r\n # dsl['query'] = {\r\n # \"query_string\" : {\r\n # \"query\": search_term,\r\n # \"analyzer\": \"default\",\r\n # \"analyze_wildcard\": True\r\n # }\r\n # }\r\n dsl['query'] = {\r\n \"query_string\" : {\r\n \"query\": search_term\r\n }\r\n }\r\n else:\r\n dsl['query'] = {'bool':{}}\r\n if filter_value:\r\n maps = mapping_ref(self.es_url)\r\n path = '%s/%s' % (self.es_index, doc_type)\r\n filter_types = make_list(maps[path].get(filter_field))\r\n fld_filterable = \\\r\n len(set(['keyword','lower']).intersection(set(filter_types)))\\\r\n > 0\r\n if fld_filterable:\r\n if filter_types[0] == 'text':\r\n filter_field = \"%s.%s\" % (filter_field, filter_types[1])\r\n else:\r\n return {'error':\r\n \"Field %s is not filterable. Use a field that has 'keyword' or 'lower' as a mapping\" % filter_field}\r\n dsl['query']['bool']['filter'] = {\r\n \"term\": { filter_field: filter_value }\r\n }\r\n # if highlight:\r\n # dsl['highlight'] = {\"fields\": {\"bf_contribution.rdf_type\":{}}}\r\n lg.info(\"\\n-------- size: %s\\ndsl:\\n%s\", size, json.dumps(dsl,indent=4))\r\n result = es.search(index=self.es_index,\r\n size=size,\r\n from_=from_,\r\n doc_type=doc_type,\r\n body=dsl)\r\n if kwargs.get(\"calc\"):\r\n result = self._calc_result(result, kwargs['calc'])\r\n lg.debug(pp.pformat(result))\r\n return result", "def getAllListPage():\n firstPage = city + '/line1'\n data = urlopen(firstPage).read().decode('gbk')\n urlList = getLineTypeList(data)\n urlList.append(firstPage)\n num = len(urlList)\n i = 0\n p = Pool(processes=4)\n pageData = p.map(readData, urlList)\n# manager = Manager()\n# pageData = manager.list()\n# while i < num:\n# procline = Process(target=readData, args=(urlList[i], pageData,))\n# procline.start()\n# procline.join()\n# i += 1\n return pageData", "def read_recs(self, tbl_name: str, area: str): \n found = []\n pks = set()\n #search in memtbl\n if tbl_name in self.memtbls:\n memtbl = self.memtbls[tbl_name]\n recs = memtbl.get_in_order_records()\n self._get_area_recs(recs, area, pks, found)\n\n #search in LRU\n if tbl_name not in self.page_table:\n #create LRU if necessary \n self.page_table[tbl_name] = [[], [], []] #L0, L1, L2\n LRU = self.page_table[tbl_name]\n else:\n LRU = self.page_table[tbl_name]\n self._level_read_recs(LRU[0], area, pks, found)\n self._level_read_recs(LRU[1], area, pks, found)\n self._level_read_recs(LRU[2], area, pks, found)\n \n #search in Storage\n res = self.storage.get_records(area, tbl_name, pks, found)\n if res == -1:\n return []\n \n bas, found = res\n \n #truncate the blocks returned from storage to fit the memory size\n bas[0] = bas[0][:self.LRU_size]\n bas[1] = bas[1][:self.LRU_size]\n bas[2] = bas[2][:self.LRU_size]\n #update the page table\n self._check_evicts(tbl_name, bas)\n \n\n return found", "def Acquisite_data(self, keyword=\"暴雨互助\", page=10, stop_if_repeat=True):\n\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n params = {\n 'containerid': '100103type=1&q=' + keyword,\n 'page_type': 'searchall',\n 'page': page\n }\n url = 'https://m.weibo.cn/api/container/getIndex?'\n response = requests.get(url, params=params).text\n id_ls = re.findall('\"id\":\"(.{16}?)\",', response, re.DOTALL)\n detail_url = ['https://m.weibo.cn/detail/' + i for i in id_ls]\n\n cnt = 0\n for i in detail_url:\n try:\n id = i[-16:]\n if id in self.data:\n if stop_if_repeat:\n break\n else:\n continue\n else:\n self.data[id] = dict()\n time.sleep(1)\n\n response = requests.get(i).text\n data = re.findall(\"var \\$render_data = \\[({.*})]\\[0]\", response, re.DOTALL)[0]\n data = json.loads(data)['status']\n\n created_at_time = data['created_at']\n log_text = data['text']\n log_text = re.sub('<.*?>', '', log_text)\n\n print(created_at_time, i, log_text)\n self.data[id]['time'] = created_at_time\n self.data[id]['link'] = i\n self.data[id]['post'] = log_text\n self.data[id]['valid'] = 1\n\n cnt += 1\n except Exception:\n print(\"weibo fetching error\")\n\n print(\"aquisite %d info\" % cnt)\n\n np.save(self.cache_path, self.data)", "def recur(self, tyme):\n super(GetDoer, self).recur(tyme)\n while len(self.queue) > 0:\n log.debug(f\"Processing `recur` for GetDoer; queue len={len(self.queue)}.\")\n callback, identifier, key = self.queue.popleft()\n result = LookupValues(identifier, key, callback)\n self.node.get(dht.InfoHash.get(key), get_cb=result.get_cb, done_cb=result.done_cb)", "def get_rest():\n\n size = int(request.form.get('size'))\n start = int(request.form.get('start'))\n\n try:\n data = database.mongodb[db_name].find({'parsed': True}, {'_id': 0})\\\n .limit(size)\\\n .skip(start)\n except Exception:\n return \"EOF\"\n resp = \"\"\n\n for idx, entry in enumerate(data):\n try:\n resp += '<tr>'\n\n resp += '<td class=\"col-md-1\">'+str(101+idx+start)+'</td>'\n\n resp += '<td class=\"col-md-2\"><a href=' + entry.get('url') + '>' \\\n + entry.get('url') + '</td>'\n resp += '<td class=\"col-md-3\">'\n resp += entry.get('title') \\\n if entry.get('title') is not None else 'AA'\n resp += '</td>'\n\n resp += '<td class=\"col-md-3\">'\n resp += entry.get('desc') \\\n if entry.get('desc') is not None else 'AA'\n resp += '</td>'\n\n resp += '<td class=\"col-md-3\">'\n resp += entry.get('keywords') \\\n if entry.get('keywords') is not None else 'AA'\n resp += '</td>'\n\n resp += '</tr>'\n except Exception as e:\n print(e)\n print(entry)\n continue\n print(\"DONE\")\n return resp", "def __getitem__(self, index: int) -> Optional[dict]:\n # if isinstance(index, slice) is True:\n\n # data_slice = index\n # start = data_slice.start\n # stop = data_slice.stop\n # step = data_slice.step\n\n # first_item_page = (start // 100) + 1\n # end_item_page = (stop // 100) + 1\n\n # all_data: List[dict] = []\n\n # for page_number in range(first_item_page, end_item_page+1):\n\n # # create url to query\n # params = {\"page\": items_page}\n # url = add_query_params(self.url, params)\n\n # data, _ = self.retrieve_data(url)\n\n # all_data += data\n\n # first_page_index = start % 100\n\n # needed_data = []\n # for index in range(start, stop, step):\n # needed_data.append(all_data[index])\n\n # return needed_data\n\n \n # get the page the item is on\n items_page = (index // 100) + 1\n\n # create url to query\n params = {\"page\": items_page}\n url = add_query_params(self.url, params)\n\n data, _, result = self.retrieve_data(url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Unable to get item from the api\")\n return None\n\n # get the position of data on the page\n page_index = index % 100\n\n try:\n return data[page_index]\n except KeyError as e:\n raise KeyError(\"Data does not exists for that index\") from e", "def _collect_results(self, request_method, request_args, request_kwargs={}, request_params={}):\n results = []\n cursor = None\n page_params = copy.copy(request_params)\n\n while True:\n if cursor:\n page_params['cursor'] = cursor\n response = request_method(\n *request_args,\n **request_kwargs,\n params=page_params\n )\n _raise_on_error(response)\n response_json = response.json()\n results.extend(response_json['results'])\n if response_json['next']:\n cursor = get_cursor_from_url(response_json['next'])\n else:\n return results", "def _retrieve_data(keyw, limit, page=1):\n # Max results per page is 100\n per_page = limit if limit < 100 else 100\n url = BASE_URL + QUALIFIERS % (keyw, per_page, page)\n\n req = requests.get(url)\n r_json = req.json()\n\n if limit > 100:\n r_json['items'].extend(_retrieve_data(keyw, limit - 100, page + 1).\n get('items', []))\n\n return r_json", "def _get_scroll(self, event):\n raise NotImplementedError", "def getRefreshList(self, startIndex=0, force=False):", "def get_data():\n log = common.LogFile('', LOGFILE)\n settings = load_settings()\n keywords = settings[\"keywords\"]\n api_key = settings[\"api_key\"]\n for keyword in keywords:\n print(\"[{}] : fetching data.\".format(keyword))\n filename = \"results_{0}.json\".format(keyword)\n results = {}\n hits_limit = 500\n start_at = 1\n counter = 0\n while True:\n url = create_url(keyword, hits_limit, start_at, api_key)\n records = get_records_from_url(url)\n total_results = get_total_hits(records)\n records = split_records(records)\n records_on_page = len(records)\n if records_on_page == 0:\n break\n else:\n for record in records:\n counter += 1\n id_no = extract_id_number(record)\n processed_dict = {'ID': id_no, 'problem': []}\n processed_record = parse_record(\n record, processed_dict, log)\n if id_no not in results:\n results[id_no] = processed_record\n if counter % 100 == 0:\n print(\"Processed {} out of {}\".format(\n counter, total_results))\n start_at += hits_limit\n time.sleep(THROTTLE)\n print(\"[{}] : fetched {} records to {}.\".format(\n keyword, len(results), filename))\n save_data(results, filename)", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)" ]
[ "0.5582365", "0.55574197", "0.54616094", "0.52296954", "0.5199142", "0.51651984", "0.51044744", "0.50029343", "0.49887702", "0.49234888", "0.4922378", "0.48991418", "0.4898491", "0.48976377", "0.48586583", "0.48360264", "0.48242566", "0.48100665", "0.48091716", "0.4801426", "0.47867757", "0.4781822", "0.47811934", "0.47696593", "0.47655684", "0.47612157", "0.47598585", "0.47521943", "0.47461402", "0.47108853" ]
0.7180109
0
Keep references to classes that are about to be instrumented. Used to search for unpatched classes after the instrumentation has run so that they can be patched manually.
def _record_unpatched_classes(): # type: () -> Dict[str, type] installed_packages = _get_installed_modules() original_classes = {} for package, orig_path in CLASSES_TO_INSTRUMENT.items(): if package in installed_packages: try: original_cls = _import_by_path(orig_path) except (AttributeError, ImportError): logger.debug("[OTel] Failed to import %s", orig_path) continue original_classes[package] = original_cls return original_classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _patch_remaining_classes(original_classes):\n # type: (Dict[str, type]) -> None\n # check which classes have actually been instrumented\n instrumented_classes = {}\n\n for package in list(original_classes.keys()):\n original_path = CLASSES_TO_INSTRUMENT[package]\n\n try:\n cls = _import_by_path(original_path)\n except (AttributeError, ImportError):\n logger.debug(\n \"[OTel] Failed to check if class has been instrumented: %s\",\n original_path,\n )\n del original_classes[package]\n continue\n\n if not cls.__module__.startswith(\"opentelemetry.\"):\n del original_classes[package]\n continue\n\n instrumented_classes[package] = cls\n\n if not instrumented_classes:\n return\n\n # replace occurrences of the original unpatched class in sys.modules\n for module_name, module in sys.modules.copy().items():\n if (\n module_name.startswith(\"sentry_sdk\")\n or module_name in sys.builtin_module_names\n ):\n continue\n\n for package, original_cls in original_classes.items():\n for var_name, var in vars(module).copy().items():\n if var == original_cls:\n logger.debug(\n \"[OTel] Additionally patching %s from %s\",\n original_cls,\n module_name,\n )\n\n setattr(module, var_name, instrumented_classes[package])", "def reset(self):\n super().reset()\n self._includes = set(self.class_helper._includes)", "def _forgetClassInstanceReferenceForTesting(cls):\n try:\n if hasattr(cls.cInstance, '_prepareToForgetSingleton'):\n # tell instance to release anything it might be holding onto.\n cls.cInstance._prepareToForgetSingleton()\n del cls.cInstance\n _removeSingleton(cls)\n except AttributeError:\n # run up the chain of base classes until we find the one that has the instance\n # and then delete it there\n for baseClass in cls.__bases__: \n if issubclass(baseClass, Singleton):\n baseClass._forgetClassInstanceReferenceForTesting()", "def _instrument_class(self, cls):\n newcls = type('InstrumentedClass', (cls, MapRedBase), {})\n return newcls", "def register_finders():\r\n\r\n # If the previous finder is set, then we've already monkeypatched, so skip.\r\n global __PREVIOUS_FINDER\r\n if __PREVIOUS_FINDER:\r\n return\r\n\r\n # save previous finder so that it can be restored\r\n previous_finder = _get_finder(zipimport.zipimporter)\r\n assert previous_finder, 'This appears to be using an incompatible setuptools.'\r\n\r\n # replace the zip finder with our own implementation of find_eggs_in_zip which uses the correct\r\n # metadata handler, in addition to find_wheels_in_zip\r\n pkg_resources.register_finder(\r\n zipimport.zipimporter, ChainedFinder.of(find_eggs_in_zip, find_wheels_in_zip))\r\n\r\n # append the wheel finder\r\n _add_finder(pkgutil.ImpImporter, find_wheels_on_path)\r\n\r\n if importlib_bootstrap is not None:\r\n _add_finder(importlib_bootstrap.FileFinder, find_wheels_on_path)\r\n\r\n __PREVIOUS_FINDER = previous_finder", "def resolve_base_classes(classes):\n for cl in classes.values():\n resolved = []\n for base in cl['bases']:\n if base in classes:\n resolved.append(base)\n cl['resolved_bases'] = resolved", "def _refresh_registry(cls) -> None:\n cls.objects_dict.clear()\n\n # Add new object instances to the registry.\n for name, clazz in inspect.getmembers(\n objects, predicate=inspect.isclass):\n if name == 'BaseObject':\n continue\n\n ancestor_names = [\n base_class.__name__ for base_class in inspect.getmro(clazz)]\n\n assert 'BaseObject' in ancestor_names\n cls.objects_dict[clazz.__name__] = clazz", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _get_classifers(self):\n return self.__classifers", "def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None", "def get_class_refs(self):\n return list(self._get_class_refs().values())", "def monkeyclass(request):\n from _pytest.monkeypatch import MonkeyPatch\n mpatch = MonkeyPatch()\n yield mpatch\n mpatch.undo()", "def dump_class_ref_counts(referrer_depth=2, cutoff=500, rcutoff=1,\r\n ignore=('tuple', 'list', 'function', 'dict',\r\n 'builtin_function_or_method',\r\n 'wrapper_descriptor')):\r\n import gc\r\n __dump_class_ref_counts(gc, referrer_depth, cutoff, rcutoff, ignore)\r\n gc.collect()\r\n plog(\"NOTICE\", \"GC: Done.\")", "def class_exts(cls):\n return set()", "def inject_classes(cls, cutoff_class=None, module=None):\n module = cls.get_module(module)\n for inter_class in cls.find_classes(cutoff_class=cutoff_class):\n inter_class.inject(module)", "def update_treemakers():\n global treemakers\n treemakers = {}\n for module_filename in glob(os.path.join(hax.hax_dir + '/treemakers/*.py')):\n module_name = os.path.splitext(os.path.basename(module_filename))[0]\n if module_name.startswith('_'):\n continue\n\n # Import the module, after which we can do hax.treemakers.blah\n __import__('hax.treemakers.%s' % module_name, globals=globals())\n\n # Now get all the treemakers defined in the module\n for tm_name, tm in inspect.getmembers(getattr(hax.treemakers, module_name),\n lambda x: type(x) == type and issubclass(x, TreeMaker)):\n if tm_name == 'TreeMaker':\n # This one is the base class; we get it because we did from ... import TreeMaker at the top of the file\n continue\n if tm_name in treemakers:\n raise ValueError(\"Two treemakers named %s!\" % tm_name)\n treemakers[tm_name] = tm", "def addClassToPickleWhitelist(cls):\n unpickleWhitelist_.add(cls)", "def unregister_finders():\r\n\r\n global __PREVIOUS_FINDER\r\n if not __PREVIOUS_FINDER:\r\n return\r\n\r\n pkg_resources.register_finder(zipimport.zipimporter, __PREVIOUS_FINDER)\r\n _remove_finder(pkgutil.ImpImporter, find_wheels_on_path)\r\n\r\n if importlib_bootstrap is not None:\r\n _remove_finder(importlib_bootstrap.FileFinder, find_wheels_on_path)\r\n\r\n __PREVIOUS_FINDER = None", "def patch_all():\n to_patch = ['str'] if IS_PY3 else ['str', 'unicode']\n to_patch = [getattr(__builtin__, klass) for klass in to_patch]\n for klass in to_patch:\n for meth in methods_to_patch:\n curse(klass, meth, getattr(PatchClass, meth))", "def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()", "def record_class_examined(self, cls):\n serialized = self.serialize_type(cls)\n if serialized is not None:\n self.classes_examined.add(serialized)", "def expand_classes_glob(classes, salt_data):\n all_classes = []\n expanded_classes = []\n saltclass_path = salt_data[\"path\"]\n\n for _class in classes:\n all_classes.extend(match_class_glob(_class, saltclass_path))\n\n for _class in all_classes:\n if _class not in expanded_classes:\n expanded_classes.append(_class)\n\n return expanded_classes", "def find_classes(cls, cutoff_class=None):\n cutoff_class = cutoff_class or Interface\n module = sys.modules[__name__]\n for ni, vi in inspect.getmembers(module, inspect.isclass):\n if issubclass(vi, cutoff_class) and vi is not cutoff_class:\n yield vi", "def reload_definitions():\n package_list = [\n # Reload minimum needs\n 'safe.definitions.minimum_needs',\n # Reload everything that depends on minimum_needs\n 'safe.definitions.fields',\n 'safe.definitions',\n\n # Reload min needs postprocessors\n 'safe.processors.minimum_needs_post_processors',\n # Reload everything that depends on postprocessors\n 'safe.processors',\n 'safe.impact_function.postprocessors',\n 'safe.impact_function',\n\n # Reload everything that depends on reporting\n 'safe.report.extractors.aggregate_postprocessors',\n 'safe.report.extractors.minimum_needs',\n 'safe.report'\n ]\n for p in package_list:\n reload(importlib.import_module(p))\n\n from safe.definitions import minimum_needs\n from safe import processors\n LOGGER.debug('Minimum Needs list:')\n for m in minimum_needs.minimum_needs_fields:\n LOGGER.debug(m)\n\n LOGGER.debug('Minimum Needs Processors list:')\n for m in processors.minimum_needs_post_processors:\n LOGGER.debug(m)", "def _refresh_cache():\n global _num_types, _num_funcs\n\n num_types = interrogate_number_of_global_types()\n num_funcs = interrogate_number_of_functions()\n\n if num_types != _num_types:\n for i in range(num_types):\n itype = interrogate_get_global_type(i)\n if interrogate_type_outer_class(itype):\n continue\n modname = interrogate_type_module_name(itype)\n _modules.add(modname)\n _store_type(modname, itype)\n\n _num_types = num_types\n\n if num_funcs != _num_funcs:\n for i in range(num_funcs):\n ifunc = interrogate_get_function(i)\n parent = interrogate_function_class(ifunc)\n if not parent:\n parent = interrogate_function_module_name(ifunc)\n _modules.add(parent)\n\n # Store it by both the original and mangled name.\n name = interrogate_function_name(ifunc)\n mangled_name1 = _translate_function_name(name, False)\n _func_cache[(parent, mangled_name1)] = ifunc\n if not name.startswith('~'):\n mangled_name2 = _translate_function_name(name, True)\n _func_cache[(parent, mangled_name2)] = ifunc\n\n _num_funcs = num_funcs", "def local_classes(self, classnames, typesets=frozenset(['cy', 'py'])):\n saved = {}\n for name in classnames:\n if 'c' in typesets and name in self.cython_ctypes:\n saved[name, 'c'] = _undot_class_name(name, self.cython_ctypes)\n if 'cy' in typesets and name in self.cython_cytypes:\n saved[name, 'cy'] = _undot_class_name(name, self.cython_cytypes)\n if 'py' in typesets and name in self.cython_pytypes:\n saved[name, 'py'] = _undot_class_name(name, self.cython_pytypes)\n self.clearmemo()\n yield\n for name in classnames:\n if 'c' in typesets and name in self.cython_ctypes:\n _redot_class_name(name, self.cython_ctypes, saved[name, 'c'])\n if 'cy' in typesets and name in self.cython_cytypes:\n _redot_class_name(name, self.cython_cytypes, saved[name, 'cy'])\n if 'py' in typesets and name in self.cython_pytypes:\n _redot_class_name(name, self.cython_pytypes, saved[name, 'py'])\n self.clearmemo()" ]
[ "0.76276684", "0.57317924", "0.5657213", "0.56442374", "0.5530063", "0.5519416", "0.55182266", "0.54908854", "0.54908854", "0.54908854", "0.54908854", "0.54908854", "0.54908854", "0.54071677", "0.540273", "0.5328092", "0.532508", "0.5324896", "0.53190255", "0.5307411", "0.52975565", "0.524014", "0.52386355", "0.51770693", "0.5172272", "0.5114553", "0.5111105", "0.5090796", "0.5071467", "0.5069664" ]
0.7603913
1
Besteffort attempt to patch any uninstrumented classes in sys.modules. This enables us to not care about the order of imports and sentry_sdk.init() in user code. If e.g. the Flask class had been imported before sentry_sdk was init()ed (and therefore before the OTel instrumentation ran), it would not be instrumented. This function goes over remaining uninstrumented occurrences of the class in sys.modules and replaces them with the instrumented class. Since this is looking for exact matches, it will not work in some scenarios (e.g. if someone is not using the specific class explicitly, but rather inheriting from it). In those cases it's still necessary to sentry_sdk.init() before importing anything that's supposed to be instrumented.
def _patch_remaining_classes(original_classes): # type: (Dict[str, type]) -> None # check which classes have actually been instrumented instrumented_classes = {} for package in list(original_classes.keys()): original_path = CLASSES_TO_INSTRUMENT[package] try: cls = _import_by_path(original_path) except (AttributeError, ImportError): logger.debug( "[OTel] Failed to check if class has been instrumented: %s", original_path, ) del original_classes[package] continue if not cls.__module__.startswith("opentelemetry."): del original_classes[package] continue instrumented_classes[package] = cls if not instrumented_classes: return # replace occurrences of the original unpatched class in sys.modules for module_name, module in sys.modules.copy().items(): if ( module_name.startswith("sentry_sdk") or module_name in sys.builtin_module_names ): continue for package, original_cls in original_classes.items(): for var_name, var in vars(module).copy().items(): if var == original_cls: logger.debug( "[OTel] Additionally patching %s from %s", original_cls, module_name, ) setattr(module, var_name, instrumented_classes[package])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _record_unpatched_classes():\n # type: () -> Dict[str, type]\n installed_packages = _get_installed_modules()\n\n original_classes = {}\n\n for package, orig_path in CLASSES_TO_INSTRUMENT.items():\n if package in installed_packages:\n try:\n original_cls = _import_by_path(orig_path)\n except (AttributeError, ImportError):\n logger.debug(\"[OTel] Failed to import %s\", orig_path)\n continue\n\n original_classes[package] = original_cls\n\n return original_classes", "def monkey_patch():\r\n # If CONF.monkey_patch is not True, this function do nothing.\r\n if not CONF.monkey_patch:\r\n return\r\n # Get list of modules and decorators\r\n for module_and_decorator in CONF.monkey_patch_modules:\r\n module, decorator_name = module_and_decorator.split(':')\r\n # import decorator function\r\n decorator = importutils.import_class(decorator_name)\r\n __import__(module)\r\n # Retrieve module information using pyclbr\r\n module_data = pyclbr.readmodule_ex(module)\r\n for key in module_data.keys():\r\n # set the decorator for the class methods\r\n if isinstance(module_data[key], pyclbr.Class):\r\n clz = importutils.import_class(\"%s.%s\" % (module, key))\r\n for method, func in inspect.getmembers(clz, inspect.ismethod):\r\n setattr(clz, method,\r\n decorator(\"%s.%s.%s\" % (module, key, method), func))\r\n # set the decorator for the function\r\n if isinstance(module_data[key], pyclbr.Function):\r\n func = importutils.import_class(\"%s.%s\" % (module, key))\r\n setattr(sys.modules[module], key,\r\n decorator(\"%s.%s\" % (module, key), func))", "def patch_sys_modules(\n modules=default_patches\n) -> Generator[None, None, None]:\n cache = {}\n for mod, name in modules:\n try:\n cache[(mod, name)] = getattr(mod, name)\n setattr(mod, name, raiser)\n except Exception as ex:\n logger.debug(f'Failed to replace module {name}, {ex}')\n\n try:\n yield\n finally:\n # replace the references\n for (mod, name), method in cache.items():\n setattr(mod, name, method)", "def monkeymodule():\n from _pytest.monkeypatch import MonkeyPatch\n\n mpatch = MonkeyPatch()\n yield mpatch\n mpatch.undo()", "def monkeyclass(request):\n from _pytest.monkeypatch import MonkeyPatch\n mpatch = MonkeyPatch()\n yield mpatch\n mpatch.undo()", "def patch_sys(cls):\n def patch_dict(old_value, new_value):\n old_value.clear()\n old_value.update(new_value)\n\n def patch_all(path, path_importer_cache, modules):\n sys.path[:] = path\n patch_dict(sys.path_importer_cache, path_importer_cache)\n patch_dict(sys.modules, modules)\n\n old_sys_path, old_sys_path_importer_cache, old_sys_modules = (\n sys.path[:], sys.path_importer_cache.copy(), sys.modules.copy())\n new_sys_path, new_sys_path_importer_cache, new_sys_modules = cls.minimum_sys()\n\n patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)\n yield", "def patch_all():\n to_patch = ['str'] if IS_PY3 else ['str', 'unicode']\n to_patch = [getattr(__builtin__, klass) for klass in to_patch]\n for klass in to_patch:\n for meth in methods_to_patch:\n curse(klass, meth, getattr(PatchClass, meth))", "def force_reimport(module):\n if \".\" in module:\n steps = module.split(\".\")\n else:\n steps = [module]\n\n for i in range(len(steps)):\n module = \".\".join(steps[0:i + 1])\n try:\n del sys.modules[module]\n except KeyError:\n pass", "def patch_sys(cls):\r\n def patch_dict(old_value, new_value):\r\n old_value.clear()\r\n old_value.update(new_value)\r\n\r\n def patch_all(path, path_importer_cache, modules):\r\n sys.path[:] = path\r\n patch_dict(sys.path_importer_cache, path_importer_cache)\r\n patch_dict(sys.modules, modules)\r\n\r\n old_sys_path, old_sys_path_importer_cache, old_sys_modules = (\r\n sys.path[:], sys.path_importer_cache.copy(), sys.modules.copy())\r\n new_sys_path, new_sys_path_importer_cache, new_sys_modules = cls.minimum_sys()\r\n\r\n patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)\r\n\r\n try:\r\n yield\r\n finally:\r\n patch_all(old_sys_path, old_sys_path_importer_cache, old_sys_modules)", "def wsgi_app(monkeypatch):\n class WSGIAppMock:\n \"\"\"Mock of a wsgi module.\"\"\"\n\n def application(self):\n \"\"\"Empty application method.\n\n Default method to be called when no specific callable\n is defined in the wsgi application identifier.\n\n It has an empty body because we are expecting to verify that\n the same method is return no the actual execution of it.\n \"\"\"\n\n def main(self):\n \"\"\"Empty custom method (callable) inside the mocked WSGI app.\n\n It has an empty body because we are expecting to verify that\n the same method is return no the actual execution of it.\n \"\"\"\n app = WSGIAppMock()\n # patch sys.modules, to include the an instance of WSGIAppMock\n # under a specific namespace\n monkeypatch.setitem(sys.modules, 'mypkg.wsgi', app)\n return app", "def inject_classes(cls, cutoff_class=None, module=None):\n module = cls.get_module(module)\n for inter_class in cls.find_classes(cutoff_class=cutoff_class):\n inter_class.inject(module)", "def lookup_class_with_patches(name):\n while name in PATH_PATCHES:\n name = PATH_PATCHES[name]\n return lookup_class(name)", "def test_bad_class(self):\n\n mock_entry_badclass = mock.create_autospec(EntryPoint)\n mock_entry_badclass.name = \"BadClass\"\n mock_entry_badclass.load = self.returnbadclass\n\n with pytest.warns(AstropyUserWarning, match=r\".*BadClass.*\"):\n populate_entry_points([mock_entry_badclass])", "def ifc_fallback_class(cls):\n\n if \"*\" in classes:\n raise ImportError(\"Already registered {oc} as fallback, cannot register {nc}\".format(\n oc=classes[\"*\"].__name__,\n nc=cls.__name__))\n classes[\"*\"] = cls\n return cls", "def add_custom_monkey_patching_module(self, module: any):\n self.monkey_patching_modules.append(module)\n return self", "def patch_class(self, mod, patches=None, **kwargs_patches):\n if not mod:\n raise ValueError(\"mod is empty\")\n\n if not patches: patches = {}\n patches.update(kwargs_patches) # combine both dicts\n\n def copy_dict(mod):\n d = {}\n for k, v in mod.__dict__.items():\n if k.startswith(\"__\"):\n d[k] = v\n else:\n if inspect.isroutine(v):\n d[k] = v\n\n elif inspect.isdatadescriptor(v):\n d[k] = v\n\n else:\n d[k] = copy.deepcopy(v)\n return d\n\n class_name = '{}Patched'.format(mod.__name__)\n\n # http://stackoverflow.com/questions/9541025/how-to-copy-a-python-class\n mod_patched = type(\n class_name,\n tuple([mod] + list(mod.__bases__)),\n #{k: copy.deepcopy(v) for k, v in mod.__dict__.items()}\n copy_dict(mod)\n )\n for name, patch in patches.items():\n # make sure we have a callable if we need a callable\n o = getattr(mod_patched, name, None)\n if inspect.isroutine(o) and not inspect.isroutine(patch):\n # so I ran into an issue with binding here, the value of patch was\n # being changed on each iteration and so when I then called it the final\n # value of patch was being returned instead of the value of patch\n # when the lambda was created. a partial was the only way I could\n # figure out how to make it work and return the correct value\n patch = functools.partial(lambda *a, **kw: kw[\"__patch\"], __patch=patch)\n\n setattr(mod_patched, name, patch)\n\n return mod_patched", "def test_ensureWhenNotImportedDontPrevent(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\")\n self.assertEqual(modules, {})", "def patchy(target, source=None):\n if isinstance(target, str):\n target = resolve(target)\n if isinstance(source, str):\n source = resolve(source)\n if isinstance(target, ModuleType):\n return PatchModule(target, source)\n elif isinstance(target, type) and source:\n return PatchClass(target, source)", "def register_finders():\r\n\r\n # If the previous finder is set, then we've already monkeypatched, so skip.\r\n global __PREVIOUS_FINDER\r\n if __PREVIOUS_FINDER:\r\n return\r\n\r\n # save previous finder so that it can be restored\r\n previous_finder = _get_finder(zipimport.zipimporter)\r\n assert previous_finder, 'This appears to be using an incompatible setuptools.'\r\n\r\n # replace the zip finder with our own implementation of find_eggs_in_zip which uses the correct\r\n # metadata handler, in addition to find_wheels_in_zip\r\n pkg_resources.register_finder(\r\n zipimport.zipimporter, ChainedFinder.of(find_eggs_in_zip, find_wheels_in_zip))\r\n\r\n # append the wheel finder\r\n _add_finder(pkgutil.ImpImporter, find_wheels_on_path)\r\n\r\n if importlib_bootstrap is not None:\r\n _add_finder(importlib_bootstrap.FileFinder, find_wheels_on_path)\r\n\r\n __PREVIOUS_FINDER = previous_finder", "def _isolateImports(mf, f, *a, **kw):\n\n\n oldMetaPath = sys.meta_path\n oldPathHooks = sys.path_hooks\n _PEP302Mapper._oldSysModules = sys.modules.copy()\n oldImport = __builtin__.__import__\n #where is your god now?\n sys.path_hooks = []\n sys.modules.clear()\n sys.meta_path = [mf]\n __builtins__['__import__'] = mf.xocImport\n\n\n\n #stupid special case for the stdlib\n if mf.mapper.contains('warnings'):\n sys.modules['warnings'] = mf.mapper.lookup('warnings')\n\n try:\n return f(*a, **kw)\n finally:\n sys.meta_path = oldMetaPath\n sys.path_hooks = oldPathHooks\n sys.modules.clear()\n sys.modules.update(_PEP302Mapper._oldSysModules)\n __builtins__['__import__'] = oldImport", "def test_ensureWhenNotImported(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\", \"m3\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None, \"m3\": None})", "def _wrap_module_methods(cls):\n exclusions = ([f.name for f in dataclasses.fields(cls)] +\n ['__eq__', '__repr__', '__init__', '__hash__',\n '__post_init__'])\n for key in _get_local_method_names(cls, exclude=exclusions):\n method = getattr(cls, key)\n wrapped_method = wrap_method_once(method)\n if _use_named_call and key != 'setup':\n # We import named_call at runtime to avoid a circular import issue.\n from flax.linen.transforms import named_call # pylint: disable=g-import-not-at-top\n wrapped_method = named_call(wrapped_method)\n setattr(cls, key, wrapped_method)\n return cls", "def nuke_mocker(request):\n m = mock.patch.dict(\"sys.modules\", {\"nuke\": mock.Mock()})\n m.start()\n request.addfinalizer(m.stop)", "def minimum_sys_modules(cls, site_libs, modules=None):\n\n modules = modules or sys.modules\n new_modules = {}\n\n for module_name, module in modules.items():\n # builtins can stay\n if not hasattr(module, '__path__'):\n new_modules[module_name] = module\n continue\n\n # Unexpected objects, e.g. namespace packages, should just be dropped:\n if not isinstance(module.__path__, list):\n TRACER.log('Dropping %s' % (module_name,), V=3)\n continue\n\n # Pop off site-impacting __path__ elements in-place.\n for k in reversed(range(len(module.__path__))):\n if cls._tainted_path(module.__path__[k], site_libs):\n TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)\n module.__path__.pop(k)\n\n # It still contains path elements not in site packages, so it can stay in sys.modules\n if module.__path__:\n new_modules[module_name] = module\n\n return new_modules", "def test_register_post_import_hook_reimport(self):\n test_hook = mock.MagicMock()\n register_post_import_hook('tests.utils.test_module', test_hook)\n import tests.utils.test_module\n reload_module(tests.utils.test_module)\n self.assertEqual(test_hook.call_count, 2)", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def testOverrideModule(self):\n\t\tc = Controller()\n\t\tc.override(os, 'getsid', classmethod(c.mock()))\n\t\tc.restore()", "def cleanup_import(package_name):\n\n for k in list(sys.modules):\n if not isinstance(k, str):\n # Some things will actually do this =_=\n continue\n elif k.startswith('astropy_helpers.tests'):\n # Don't delete imported test modules or else the tests will break,\n # badly\n continue\n if k == package_name or k.startswith(package_name + '.'):\n del sys.modules[k]", "def test_compat_module_with_ginkgo():\n with patch_module('lms.djangoapps.grades.new.course_grade_factory',\n {'CourseGradeFactory': 'cgf'}):\n with patch_module('certificates.models', {'GeneratedCertificate': 'gc'}):\n with patch_module('courseware.models', {'StudentModule': 'sm'}):\n with patch_module('courseware.courses', {'get_course_by_id': 'gcbid'}):\n with patch_module('openedx.core.djangoapps.xmodule_django.models',\n {'CourseKeyField': 'ckf'}):\n import figures.compat\n reload(figures.compat)\n assert figures.compat.RELEASE_LINE == 'ginkgo'\n assert hasattr(figures.compat, 'CourseGradeFactory')\n assert figures.compat.CourseGradeFactory == 'cgf'\n assert hasattr(figures.compat, 'GeneratedCertificate')\n assert figures.compat.GeneratedCertificate == 'gc'\n assert hasattr(figures.compat, 'StudentModule')\n assert figures.compat.StudentModule == 'sm'\n assert hasattr(figures.compat, 'get_course_by_id')\n assert figures.compat.get_course_by_id == 'gcbid'\n assert hasattr(figures.compat, 'CourseKeyField')\n assert figures.compat.CourseKeyField == 'ckf'", "def apply(cls, fs: Any) -> None:\n logger.debug(\"Patching %s with %s.\", fs.__class__.__name__, cls.__name__)\n fs._patch = cls\n for method_name in cls.patch_methods:\n # if fs hasn't method, raise AttributeError.\n origin = getattr(fs, method_name)\n method = getattr(cls, method_name)\n bound_method = method.__get__(fs, fs.__class__)\n setattr(fs, method_name, bound_method)\n setattr(fs, \"_origin_\" + method_name, origin)" ]
[ "0.7268333", "0.6414055", "0.6173626", "0.5864623", "0.5856952", "0.579441", "0.574507", "0.5737741", "0.5699048", "0.5694578", "0.5617746", "0.553665", "0.54914683", "0.5490592", "0.5467694", "0.5413526", "0.5408118", "0.5367441", "0.53309304", "0.53181887", "0.5274783", "0.5246495", "0.5217995", "0.5210787", "0.5190423", "0.51860774", "0.51860774", "0.5182606", "0.5178531", "0.51706463" ]
0.79764235
0
Applies batch normalization to this layer. Batch normalization must be deleted from the dnn afterwards and layers which were connected to the batch norm must be connected to this layer.
def apply_batch_normalization(self, layer): if type(layer) is not BatchNormalization: raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.') self._internal.apply_batch_normalization(layer._internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,\n scale=True, training=training, fused=True)", "def batch_norm(self, inputs):\n x = inputs\n x = self.bn(x)\n return x", "def batch_norm(x: tf.Tensor) -> tf.Tensor:\n return slim.batch_norm(x, activation_fn=tf.nn.relu, scope='postnorm')", "def batch_norm(input_tensor):\n epsilon = 1e-3\n batch_mean, batch_var = tf.nn.moments(input_tensor, [0])\n input_tensor = tf.nn.batch_normalization(input_tensor, mean=batch_mean, variance=batch_var, offset=None,\n scale=None, variance_epsilon=epsilon)\n\n return input_tensor", "def batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n):\n return FunctionLib.apply(\n 'BatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum)", "def batch_norm(in_tensor, phase_train, name, reuse=None, data_format='NHWC', center=True, scale=True):\n axis = -1 if data_format == 'NHWC' else 1\n with tf.variable_scope(name):\n # return tf.contrib.layers.batch_norm(in_tensor, is_training=phase_train, scope=scope, reuse=reuse)\n return tf.layers.batch_normalization(in_tensor, axis=axis, center=center, scale=scale, training=phase_train,\n reuse=reuse, fused=True, momentum=0.99, epsilon=1e-1)", "def norm_layer( x, training, name):\n top = tf.layers.batch_normalization( x, \n axis=3, # channels last \n training=training,\n name=name )\n return top", "def test_batch_norm_fold(self):\n inputs = tf.keras.Input(shape=(32, 32, 3,))\n conv = tf.keras.layers.Conv2D(32, (3, 3))(inputs)\n bn = tf.keras.layers.BatchNormalization(fused=True)(conv, training=False)\n relu = tf.nn.relu(bn)\n model = tf.keras.Model(inputs=inputs, outputs=relu)\n\n np.random.seed(0)\n w_shape = model.layers[0].input.shape\n numpy_data = np.random.rand(1, w_shape[1], w_shape[2], w_shape[3]).astype(np.float32)\n\n baseline_output = model(numpy_data)\n\n _, model = fold_all_batch_norms(model)\n output_after_fold = model(numpy_data)\n\n assert np.allclose(baseline_output, output_after_fold, atol=1.e-4)", "def normalize(self, x, train=True):\n if train is not None:\n mean, variance = tf.nn.moments(x, [0,1,2])\n assign_mean = self.mean.assign(mean)\n assign_variance = self.variance.assign(variance)\n with tf.control_dependencies([assign_mean, assign_variance]):\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, self.beta, self.gamma,\n self.epsilon, self.scale_after_norm)\n else:\n mean = self.ewma_trainer.average(self.mean)\n variance = self.ewma_trainer.average(self.variance)\n local_beta = tf.identity(self.beta)\n local_gamma = tf.identity(self.gamma)\n return tf.nn.batch_norm_with_global_normalization(\n x, mean, variance, local_beta, local_gamma,\n self.epsilon, self.scale_after_norm)", "def normalize(self):\n self._data /= self.norm()", "def batch_norm(x, training, name):\n with tf.variable_scope(name):\n x = tf.cond(training, lambda: tf.contrib.layers.batch_norm(x, is_training=True, scope=name+'_batch_norm'),\n lambda: tf.contrib.layers.batch_norm(x, is_training=False, scope=name+'_batch_norm', reuse=True))\n return x", "def batch_normalization(x, phase_train, out_size):\r\n with tf.variable_scope('bn'):\r\n beta = tf.Variable(tf.constant(0.0, shape=[out_size]),\r\n name='beta', trainable=True)\r\n gamma = tf.Variable(tf.constant(1.0, shape=[out_size]),\r\n name='gamma', trainable=True)\r\n batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\r\n ema = tf.train.ExponentialMovingAverage(decay=0.5)\r\n\r\n def mean_var_with_update():\r\n ema_apply_op = ema.apply([batch_mean, batch_var])\r\n with tf.control_dependencies([ema_apply_op]):\r\n return tf.identity(batch_mean), tf.identity(batch_var)\r\n\r\n mean, var = tf.cond(phase_train,\r\n mean_var_with_update,\r\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\r\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\r\n return normed", "def batch_normalization(x, phase_train, out_size):\n\n\twith tf.variable_scope('bn'):\n\t\tbeta = tf.Variable(tf.constant(0.0, shape=[out_size]), name='beta', trainable=True)\n\t\tgamma = tf.Variable(tf.constant(1.0, shape=[out_size]), name='gamma', trainable=True)\n\t\tbatch_mean, batch_var = tf.nn.moments(x, [0], name='moments')\n\t\tema = tf.train.ExponentialMovingAverage(decay=0.5)\n\n\t\tdef mean_var_with_update():\n\t\t\tema_apply_op = ema.apply([batch_mean, batch_var])\n\t\t\twith tf.control_dependencies([ema_apply_op]):\n\t\t\t\treturn tf.identity(batch_mean), tf.identity(batch_var)\n\n\t\tmean, var = tf.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))\n\t\tnormed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n\treturn normed", "def batch_normalization(input_var=None):\n\n # Hyperparameters\n hp = Hyperparameters()\n hp('batch_size', 30)\n hp('n_epochs', 1000)\n hp('learning_rate', 0.01)\n hp('l1_reg', 0.00)\n hp('l2_reg', 0.0001)\n hp('patience', 5000)\n\n # Create connected layers\n # Input layer\n l_in = InputLayer(input_shape=(hp.batch_size, 28 * 28), input_var=input_var, name='Input')\n # Batch Normalization\n l_bn1 = BatchNormalization(incoming=l_in, name='Batch Normalization 1')\n # Dense Layer\n l_hid1 = DenseLayer(incoming=l_bn1, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 1')\n # Batch Normalization\n l_bn2 = BatchNormalization(incoming=l_hid1, name='Batch Normalization 2')\n # Dense Layer\n l_hid2 = DenseLayer(incoming=l_bn2, n_units=500, W=glorot_uniform, l1=hp.l1_reg,\n l2=hp.l2_reg, activation=relu, name='Hidden layer 2')\n # Batch Normalization\n l_bn3 = BatchNormalization(incoming=l_hid2, name='Batch Normalization 3')\n # Logistic regression Layer\n l_out = LogisticRegression(incoming=l_bn3, n_class=10, l1=hp.l1_reg,\n l2=hp.l2_reg, name='Logistic regression')\n\n # Create network and add layers\n net = Network('mlp with batch normalization')\n net.add(l_in)\n net.add(l_bn1)\n net.add(l_hid1)\n net.add(l_bn2)\n net.add(l_hid2)\n net.add(l_bn3)\n net.add(l_out)\n\n return net, hp", "def __call__(self, x, is_training=True):\n return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon,\n center=True, scale=True, is_training=is_training, scope=self.name)", "def batch_normal(x, is_train, name, activation_fn=None):\n with tf.name_scope(name), tf.variable_scope(name):\n outputs = tf.contrib.layers.batch_norm(x,\n decay=0.999,\n scale=True,\n activation_fn=activation_fn,\n is_training=is_train)\n return outputs", "def sync_batch_norm(\n input,\n running_mean,\n running_var,\n weight,\n bias,\n training=False,\n momentum=0.1,\n eps=1e-5,\n process_group=None,\n):\n if process_group is None:\n kwargs = locals()\n kwargs.pop('process_group')\n return batch_norm(**kwargs)\n return FunctionLib.apply(\n 'SyncBatchNorm', input.device,\n [input, weight, bias, running_mean, running_var],\n axis=1, epsilon=eps, use_stats=int(not training),\n momentum=1.0 - momentum, **process_group.arguments)", "def test_cnn_starts_with_batchnorm(self):\n model = modelgen.generate_CNN_model((None, 20, 3), 2, [32, 32], 100)\n assert str(type(model.layers[0])) \\\n == \"<class 'keras.layers.normalization.BatchNormalization'>\", \\\n 'Wrong layer type.'", "def BatchNormalization(inputs, data_format):\n return tf.layers.BatchNormalization(axis=1 if data_format == 'channels_first' else 3,\n momentum=_BATCH_NORM_DECAY,\n epsilon=_BATCH_NORM_EPSILON,\n scale=True)(inputs)", "def BatchNorm(inputs, axis=None, training=None, momentum=0.9, epsilon=1e-5,\n center=True, scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n virtual_batch_size=None,\n internal_update=False):\n # parse shapes\n shape = inputs.get_shape().as_list()\n ndims = len(shape)\n\n assert axis is not None\n\n # parse training/ctx\n ctx = get_current_tower_context()\n if training is None:\n training = ctx.is_training\n training = bool(training)\n TF_version = get_tf_version_number()\n if not training and ctx.is_training:\n assert TF_version >= 1.4, \\\n \"Fine tuning a BatchNorm model with fixed statistics is only \" \\\n \"supported after https://github.com/tensorflow/tensorflow/pull/12580 \"\n if ctx.is_main_training_tower: # only warn in first tower\n logger.warn(\"[BatchNorm] Using moving_mean/moving_variance in training.\")\n # Using moving_mean/moving_variance in training, which means we\n # loaded a pre-trained BN and only fine-tuning the affine part.\n\n coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])\n with rename_get_variable(\n {'moving_mean': 'mean/EMA',\n 'moving_variance': 'variance/EMA'}):\n if TF_version >= 1.5:\n layer = tf.layers.BatchNormalization(\n axis=axis,\n momentum=momentum, epsilon=epsilon,\n center=center, scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n virtual_batch_size=virtual_batch_size,\n fused=True,\n _reuse=tf.get_variable_scope().reuse\n )\n else:\n assert virtual_batch_size is None, \"Feature not supported in this version of TF!\"\n layer = tf.layers.BatchNormalization(\n axis=axis,\n momentum=momentum, epsilon=epsilon,\n center=center, scale=scale,\n beta_initializer=beta_initializer,\n gamma_initializer=gamma_initializer,\n fused=True,\n _reuse=tf.get_variable_scope().reuse\n )\n xn = layer.apply(inputs, training=training, scope=tf.get_variable_scope())\n\n # maintain EMA only on one GPU is OK, even in replicated mode.\n # because training time doesn't use EMA\n if ctx.is_main_training_tower:\n for v in layer.non_trainable_variables:\n add_model_variable(v)\n if not ctx.is_main_training_tower or internal_update:\n restore_collection(coll_bk)\n\n if training and internal_update:\n assert layer.updates\n with tf.control_dependencies(layer.updates):\n ret = tf.identity(xn, name='output')\n else:\n ret = tf.identity(xn, name='output')\n\n vh = ret.variables = VariableHolder(\n moving_mean=layer.moving_mean,\n mean=layer.moving_mean, # for backward-compatibility\n moving_variance=layer.moving_variance,\n variance=layer.moving_variance) # for backward-compatibility\n if scale:\n vh.gamma = layer.gamma\n if center:\n vh.beta = layer.beta\n return ret", "def Batchnorm(name, axes, inputs, is_training=None, stats_iter=None, update_moving_stats=True, fused=True, labels=None, n_labels=None):\n if axes != [0,2,3]:\n raise Exception('unsupported')\n batch_mean, batch_var = tf.nn.moments(inputs, axes, keep_dims=True)\n shape = batch_mean.get_shape().as_list() # shape is [1,n,1,1]\n offset_m = lib.param(name+'.offset', np.zeros([n_labels,shape[1]], dtype='float32'))\n scale_m = lib.param(name+'.scale', np.ones([n_labels,shape[1]], dtype='float32'))\n offset = tf.nn.embedding_lookup(offset_m, labels)\n # offset = tf.Print(offset,['offset',offset])\n scale = tf.nn.embedding_lookup(scale_m, labels)\n # scale = tf.Print(scale,['scale',scale])\n\n moving_mean = lib.param(name + '.moving_mean', np.zeros(batch_mean.get_shape(), dtype='float32'), trainable=False)\n moving_variance = lib.param(name + '.moving_variance', np.ones(batch_var.get_shape(), dtype='float32'),trainable=False)\n\n def _batch_norm_training():\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, offset[:,:,None,None], scale[:,:,None,None], 1e-5)\n\n def _batch_norm_inference():\n # Version which blends in the current item's statistics\n mean = moving_mean[None, :, None, None]\n var = moving_variance[None, :, None, None]\n '''\n batch_size = tf.cast(tf.shape(inputs)[0], 'float32')\n mean, var = tf.nn.moments(inputs, [2,3], keep_dims=True)\n mean = ((1./batch_size)*mean) + (((batch_size-1.)/batch_size)*moving_mean)[None,:,None,None]\n var = ((1./batch_size)*var) + (((batch_size-1.)/batch_size)*moving_variance)[None,:,None,None]\n '''\n return tf.nn.batch_normalization(inputs, mean, var, offset[:,:,None,None], scale[:,:,None,None],\n 1e-5), mean, var\n\n if is_training is None:\n outputs = _batch_norm_training()\n else:\n if is_training:\n outputs = _batch_norm_training()\n else:\n outputs = _batch_norm_inference()\n\n if update_moving_stats:\n no_updates = lambda: outputs\n\n def _force_updates():\n \"\"\"Internal function forces updates moving_vars if is_training.\"\"\"\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)\n\n if is_training:\n outputs = _force_updates()\n else:\n outputs = no_updates()\n\n return outputs", "def normalize_layer(tensor, name, norm_use='bn'):\n if norm_use == \"gn\":\n x = GroupNorm(name=name + 'gn', groups=32)(tensor)\n elif norm_use == \"bn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'bn', epsilon=1.001e-5)(tensor)\n elif norm_use == \"rbn\":\n x = tf.keras.layers.BatchNormalization(axis=-1, name=name + 'rbn', epsilon=1.001e-5, renorm=True)(tensor)\n elif norm_use == \"in\":\n x = InstanceNormalization(axis=-1, name=name + 'in')(tensor)\n else:\n x = tensor\n return x", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)", "def _compute_normalization(self, normalize=True):\n if normalize:\n if self._img_norm is None:\n if np.sum(self._data) == 0:\n self._img_norm = 1\n else:\n self._img_norm = self._compute_raw_image_norm()\n\n if self._img_norm != 0.0 and np.isfinite(self._img_norm):\n self._data /= (self._img_norm * self._normalization_correction)\n self._normalization_status = 0\n else:\n self._normalization_status = 1\n self._img_norm = 1\n warnings.warn('Overflow encountered while computing '\n 'normalization constant. Normalization '\n 'constant will be set to 1.', NonNormalizable)\n else:\n self._normalization_status = 2", "def convert_batch_norm(g, op, block):\n\n ipt_name = op.input(\"X\")[0]\n scale_name = op.input(\"Scale\")[0]\n bias_name = op.input(\"Bias\")[0]\n mean_name = op.input(\"Mean\")[0]\n variance_name = op.input(\"Variance\")[0]\n epsilon = op.attr(\"epsilon\")\n out = _op.nn.batch_norm(\n g.get_node(ipt_name),\n g.get_node(scale_name),\n g.get_node(bias_name),\n g.get_node(mean_name),\n g.get_node(variance_name),\n epsilon=epsilon,\n )\n g.add_node(op.output(\"Y\")[0], out[0])", "def batch_norm(x, train, init, act=None, name=None, eps=1e-5, decay=0.9):\n\n return tf.contrib.layers.batch_norm(x,\n decay=decay,\n epsilon=eps,\n scale=True,\n param_initializers=init,\n is_training=train,\n scope=name,\n activation_fn=act,\n updates_collections=None)", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)" ]
[ "0.71378446", "0.7028209", "0.699852", "0.6959648", "0.6896941", "0.68904907", "0.68822706", "0.66368127", "0.66204464", "0.65987813", "0.659512", "0.65843093", "0.6543911", "0.65430003", "0.65364105", "0.65344566", "0.65333307", "0.65062654", "0.6484016", "0.6480447", "0.645781", "0.6446731", "0.6426032", "0.64177656", "0.64112175", "0.63802844", "0.6372297", "0.6366454", "0.63581145", "0.6353004" ]
0.7985232
0
Gets the free term vector, of element_count length.
def free_term(self): return Blob.Blob(self._internal.get_free_term())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zero_free_term(self):\n return self._internal.get_zero_free_term()", "def f_vector(self):\n try:\n return self._f_vector\n except AttributeError:\n self._f_vector = vector(ZZ,[len(x) for x in self.face_lattice().level_sets()])\n return self._f_vector", "def get_free(self):\r\n\t\treturn len(self.free_objects)", "def get_Term_frees(self, arg, free, non_free):\n args_vars = arg.variables() # get term's variables\n if args_vars != set(): # the set is not empty\n for var in args_vars:\n if var not in non_free and is_variable(var): # if it wasnt refrenced and is a var add it\n free.add(var)", "def __len__(self):\n return _libsbml.ListWrapperCVTerm___len__(self)", "def __len__(self):\n return self._vector.degree()", "def freq_vector(cls, n, fs):\n return freq_vector(n, fs, sided='single')", "def empty(self):\n return _uhd_swig.uhd_size_vector_t_empty(self)", "def __len__(self):\n return len(self._terms)", "def free(self):\n return self.i_free().j_free()", "def get_vector_span(self, i, j):\n from sage.rings.infinity import Infinity\n from sage.matrix.constructor import matrix\n data = self[i,j]\n if not data:\n return None\n elif len(data) == 1:\n return FreeModule(ZZ, self._nvars).submodule([])\n else:\n return matrix([x-data[0] for x in data]).row_space()", "def getSize(self):\n return _libsbml.ListWrapperCVTerm_getSize(self)", "def get_word_vector(doc_id, corpus):\n inv_index = vsm_retrieval.get_inverted_index(corpus)\n word_vec = np.zeros(len(inv_index))\n for count_vec, word in enumerate(inv_index):\n word_vec[count_vec] = inv_index[word].get(doc_id, {'frequency': 0})['frequency']\n return word_vec", "def dense_vector (n, init_val=0.0):\n return [init_val] * n", "def length(x):\r\n return Feature(x, \"length\")", "def vector(self):\n return self.__vector", "def _get_vector_size(self):\n if len(self):\n return len(self.values()[0])\n else:\n return 0", "def take_vec(self):\n vec = aux.vec(self.numbers)\n\n return vec", "def get_free_dofs(self) -> list[int]:\n\n return flatten([node.get_free_dofs() for node in self.nodes])", "def vector(self):\n return self.q[1:4]", "def iterator(self):\n return _uhd_swig.uhd_size_vector_t_iterator(self)", "def termFreq(self):\n if self._tf is None:\n nwords = len(self)\n self._tf = []\n for t in self.freqDist:\n self._tf.append( ( t[0], t[1] / nwords ) )\n return self._tf", "def __call__(self):\n return self._vector", "def GravityVector(self):\n if self.Cid() == 0:\n return self.N\n ## TODO: shouldn't be scaled by the ???\n p = self.cid_ref.transform_vector_to_global(self.N)\n return self.scale * p", "def counit(self, element):\n return element.coefficient([])", "def create_feature_vector(ix, term_dict, bow):\n\n\ttfv = list()\n\t# get corpus length (n. of docs)\n\tnum_docs = ix.num_docs\n\tfor idx, tf in bow:\n\t\t# get term from dict index\n\t\tterm = ix[idx]\n\t\t# filter out terms not contained in self.term_dict\n\t\tif term not in term_dict:\n\t\t\tcontinue\n\t\t# filter out terms w/ length gt 20\n\t\tif len(term) > 20:\n\t\t\tcontinue\n\t\t# filter out non-alphabetical terms\n\t\tif not term.isalpha():\n\t\t\tcontinue\n\t\t# get document frequency \n\t\tdf = ix.dfs[idx]\n\t\t# compute ratio between df and num_docs\n\t\tratio = df / num_docs\n\t\tif ratio > 0.1: # skip term - requires tuning: check if it's okay to keep it as is\n\t\t\tcontinue\n\t\t# append term w/ tf to tfv\n\t\ttfv.append((term, tf))\n\treturn tfv", "def generate_vector(text, tf=None):\n if not _trained:\n print(\"Make sure to train parameterizer first\")\n exit(1)\n if tf is None:\n tf = term_frequency.generate_vector(text)\n vector = []\n for i in range(len(tf)):\n vector.append(tf[i] * _idfs[i])\n return vector", "def get_free_indices(program, program_len):\n used = get_used_indices(program)\n total = set(range(program_len + len(program.input_types)))\n return total - used", "def unit_vector(self,vector):\n\t\tunit_vector_query=0;\n\t\tfor word in vector:\n\t\t\tunit_vector_query += vector[word]*vector[word];\n\t\tunit_vector_query = math.sqrt(unit_vector_query);\n\t\treturn unit_vector_query", "def vector(self):\n \n v_list = Householder.triangle_operation(self)[1]\n \n return(v_list)" ]
[ "0.6391553", "0.6213692", "0.58426434", "0.5717408", "0.5716961", "0.5586635", "0.55293155", "0.5515443", "0.54636407", "0.54122466", "0.5402079", "0.5396786", "0.5375249", "0.53743243", "0.536684", "0.536628", "0.5319798", "0.52847767", "0.52813905", "0.52696437", "0.52508456", "0.5246025", "0.5244521", "0.5231832", "0.52213246", "0.5193071", "0.5177292", "0.51671016", "0.5163238", "0.5156031" ]
0.6329039
1
Get model last response
def get_model_api_last_response(self): return self._last_response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_api_last_response_id(self):\n return self._last_response_id", "def last(self):\n data = self._http_get(\"last\")\n return data.json()", "def get_latest_model():\n return get_models()[-1]", "def get_last_result(self):\n return self.last_result", "def test_get_last_response(self):\r\n response_dict = self.combinedoe.get_last_response(0)\r\n self.assertEqual(response_dict['type'], \"selfassessment\")\r\n self.assertEqual(response_dict['max_score'], self.max_score)\r\n self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)", "def getLast(self):\r\n return self._data[-1]", "def get_last_response_ajax(self, data):\r\n return self.get_last_response(self.current_task_number)", "def lastsave(self):\n self.connect()\n self._write('LASTSAVE\\r\\n')\n return self._get_numeric_response()", "def fetch_last_model_file(self):\n try:\n filename = self.model_files[-1]\n return self.make_path(filename)\n except IndexError:\n return None", "def getLastData(self) -> ghidra.program.model.listing.Data:\n ...", "def last_percept(self):\n return self.percept", "def get_last(self):\n return self.get_block(len(self.chain)-1)", "def get_last_update(self):\n return self.ticker.all().order_by('-created').first()", "def get_last(self, count):", "def at_last(self):\n return self._collection.at_last()", "def _get_last_json_result_string(self):\n return self.last_json_result", "def last_update(self):\r\n request = http.Request('GET', '/metadata/last_update.json')\r\n return request, parsers.parse_json", "def get_my_last_event(self):\r\n return self._handler.get_my_last_event()", "def last_hit(self):\n return self._last_hit", "def getLast(self):\n return self.dataBuffer[len(self.dataBuffer) - 1]", "def last(self):\n if self.ordered:\n queryset = self.reverse()\n else:\n self._check_ordering_first_last_queryset_aggregation(method=\"last\")\n queryset = self.order_by(\"-pk\")\n for obj in queryset[:1]:\n return obj", "def get_last_saved_estimation(self):\n return None", "def result(self):\n # most pythonic way to get last in last is -1\n return self.history[-1]", "def get_last_solution(self):\n return self.last_result", "def get_response(self):\n return self.__response", "def _last_request():\n return httpretty.httpretty.last_request", "def last(self):\n return Null", "def last(self):\n return self.last and self.last.value or None", "def last_update(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_update')", "def last_attempt(self) -> 'outputs.AttemptResponse':\n return pulumi.get(self, \"last_attempt\")" ]
[ "0.73073435", "0.6988277", "0.67950827", "0.67702544", "0.65754414", "0.6431394", "0.6355076", "0.6326154", "0.63088065", "0.62795436", "0.624241", "0.62157893", "0.61976624", "0.61938447", "0.6164053", "0.61567557", "0.6153006", "0.6150398", "0.61497515", "0.61475986", "0.61388236", "0.6133648", "0.6127", "0.6110502", "0.60950786", "0.60917175", "0.60568553", "0.6044905", "0.60322183", "0.6000704" ]
0.8733788
0
Get last model response ID
def get_model_api_last_response_id(self): return self._last_response_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_api_last_response(self):\n return self._last_response", "def getLastObjectId(self):\n return self.objId", "def latest_id(self):\n return self.checkpoints[-1]", "def _get_id(self):\n return self.id", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def GetCommandId(self):\r\n \r\n return self._last_id", "def get_current_id(self) -> int:\n try:\n return self.cursor.execute(f\"SELECT MAX(id) FROM {table_locations};\").fetchone()\n except Exception as e:\n msg = f'We faced some problems with the getting last id value. Mistake: {e}'\n self.proceed_error(msg)\n return -1", "def get_current_id(self):\n\n id = self.ids[-1]\n\n if id is None:\n raise KeyError()\n\n return id", "def getID(self) -> int:\n ...", "def id(self):\n return self.__pairs[-1][1]", "def GetCommandId(self):\r\n\r\n return self._last_id", "def getID():", "def __get_last_id(cls):\n db = database.db_connection()\n cursor = db.cursor()\n sql_query = \"SELECT max(id_user) FROM user\"\n cursor.execute(sql_query)\n row = cursor.fetchone()\n cursor.close()\n return int(row[0])", "def getId(self):\n # XXX-Aurel : this must be based on the GID definition\n # As GID in TioSafe case is unique, it must be used to get\n # the last ID of an inserted object (usefull for cases where\n # transactionnal operation is not provided like with prestashop)\n #raise ValueError, self.last_id\n return LastIdBrain.getId(self)", "def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id", "def get_id(self):\n return self.id" ]
[ "0.75364536", "0.69300216", "0.6603426", "0.6570031", "0.6540917", "0.6540917", "0.6540917", "0.6540917", "0.6533437", "0.6508722", "0.65032995", "0.64846146", "0.64710206", "0.64190453", "0.6388252", "0.6376812", "0.6371169", "0.63536155", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545", "0.6344545" ]
0.8848147
0
Pad an image up to the target size.
def pad_image(img, target_size): rows_missing = target_size[0] - img.shape[2] cols_missing = target_size[1] - img.shape[3] padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant') return padded_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resize_and_pad(img, target_size): \n max_shape = np.max(img.shape)\n max_index = np.argmax(img.shape)\n if img.shape[1] < target_size and img.shape[2] < target_size: \n #interpolate\n factor = float(target_size)/max_shape\n scaled_img = misc.imresize(img, (max(1,int(img.shape[1]*factor)), max(1,int(img.shape[2]*factor))), interp=\"nearest\")\n else:\n #downsample maximum dimension to target size \n factor = max_shape/float(target_size)\n scaled_img = misc.imresize(img, (max(1,int(img.shape[1]/factor)), max(1,int(img.shape[2]/factor))), interp=\"nearest\") \n\n #fill up \n padded_image = np.stack([np.lib.pad(scaled_img[:,:,0], \n ((0,target_size-scaled_img.shape[0]),\n (0,target_size-scaled_img.shape[1])), \n 'constant', constant_values=(255)),\n np.lib.pad(scaled_img[:,:,1], \n ((0,target_size-scaled_img.shape[0]),\n (0,target_size-scaled_img.shape[1])), \n 'constant', constant_values=(255)),\n np.lib.pad(scaled_img[:,:,2], \n ((0,target_size-scaled_img.shape[0]),\n (0,target_size-scaled_img.shape[1])), \n 'constant', constant_values=(255))])\n \n \n return padded_image", "def pad_image(img, output_path, pad_size=[8,8,8,8], buckets=None):\n top, left, bottom, right = pad_size\n old_im = Image.open(img)\n old_size = (old_im.size[0] + left + right, old_im.size[1] + top + bottom)\n new_size = get_new_size(old_size, buckets)\n new_im = Image.new(\"RGB\", new_size, (255,255,255))\n new_im.paste(old_im, (left, top))\n new_im.save(output_path)", "def _pad_image(self, img: ndarray, pad_width: int = 10) -> ndarray:\n self.padded_img = np.zeros(\n (img.shape[0] + pad_width*2, img.shape[1]+pad_width*2))\n self.padded_img[pad_width:-pad_width, pad_width:-pad_width] = img\n return self.padded_img", "def pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]) ->torch.Tensor:\n new_size = to_tuple(new_size)\n old_size = img.shape[-2:]\n pad_size = (torch.tensor(new_size) - torch.tensor(old_size)) / 2\n padding = torch.cat((torch.floor(pad_size), torch.ceil(pad_size)))\n padding[padding < 0] = 0\n padding = [int(x) for x in padding]\n return F.pad(img, padding=padding, padding_mode='edge')", "def pad_to(image,w,h):\n iw,ih = image.shape\n wd = int(w-iw)\n assert wd>=0\n w0 = wd/2\n w1 = wd-w0\n hd = int(h-ih)\n assert hd>=0\n h0 = hd/2\n h1 = hd-h0\n result = zeros((w,h))\n result[w0:w0+iw,h0:h0+ih] = image\n return result", "def padding(image, padded_size):\n image_row, image_col = image.shape #asigna alto y ancho de la imagen \n\n padded_image = np.zeros((image_row + padded_size*2, image_col + padded_size*2)) #matriz de imagen con padding en zeros\n print(\"Padded image zeros:\")\n print(padded_image)\n\n padded_image[padded_size:padded_size + image_row, padded_size:padded_size + image_col] = image #matriz de imagen con padding\n print(\"Padded image:\")\n print(padded_image)\n\n \n return padded_image", "def pad(self, *args, **kwargs):\n return _image.image_pad(self, *args, **kwargs)", "def reshape_and_pad(image: np.ndarray, target_size: Union[List[int], Tuple[int]]):\r\n # input shapes\r\n input_shape = image.shape[:2]\r\n # calculate the width and height\r\n w_ratio = float(target_size[1]) / input_shape[1]\r\n h_ratio = float(target_size[0]) / input_shape[0]\r\n # take the smaller ratio to ensure the whole image fits in the new shape\r\n ratio = min(w_ratio, h_ratio)\r\n # calculate the new size\r\n new_size = tuple([int(x * ratio) for x in input_shape])\r\n\r\n # resize the image\r\n scaled_image = cv2.resize(image, (new_size[1], new_size[0]))\r\n\r\n # width and height differences\r\n delta_w = target_size[1] - new_size[1]\r\n delta_h = target_size[0] - new_size[0]\r\n\r\n # image position within the new image\r\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\r\n left, right = delta_w // 2, delta_w - (delta_w // 2)\r\n\r\n # padding color\r\n padding_color = [0, 0, 0]\r\n new_image = cv2.copyMakeBorder(scaled_image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=padding_color)\r\n\r\n return new_image", "def pad(self, src):\n if(self.pre_pad):\n dst = src.new(\n src.size(0),\n src.size(1),\n src.size(2),\n src.size(3),\n 2\n ).zero_()\n dst.narrow(dst.ndimension()-1, 0, 1).copy_(\n torch.unsqueeze(src, 4)\n )\n else:\n padded = self.padding_module.updateOutput(src)\n dst = src.new(\n padded.size(0),\n padded.size(1),\n padded.size(2),\n padded.size(3),\n 2\n ).zero_()\n dst.narrow(4, 0, 1).copy_(\n torch.unsqueeze(padded, 4)\n )\n return dst", "def pad_to_square(image, min_size, **pad_kwargs):\n\n h, w = image.shape[:2]\n\n if h >= min_size and w >= min_size:\n return image\n\n top = bottom = left = right = 0\n\n if h < min_size:\n top = (min_size - h) // 2\n bottom = min_size - h - top\n if w < min_size:\n left = (min_size - w) // 2\n right = min_size - w - left\n\n return np.pad(image,\n ((top, bottom),\n (left, right),\n (0, 0)), **pad_kwargs)", "def pad(img, pad_size=32):\n\n if pad_size == 0:\n return img\n\n height, width = img.shape[:2]\n\n if height % pad_size == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = pad_size - height % pad_size\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n if width % pad_size == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = pad_size - width % pad_size\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n\n return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)", "def padding(img, n):\n img = np.pad(img, [(n, n), (n, n)], mode='constant', constant_values=0)\n\n return img", "def run_padding(self):\n\n image_padded, mask, self.pad_to_right, self.pad_to_bottom = gen_padded_image_and_mask (os.path.join('utils_dfn/temp', self.file_name_with_ext),\n self.new_height, self.new_width)\n cv2.imwrite(os.path.join('utils_dfn/img', self.file_name + '_padded_resized.png'), image_padded)\n cv2.imwrite(os.path.join('utils_dfn/mask', self.file_name + '_mask.png'), mask)", "def _pad_img(self, results):\n pad_val = self.pad_val.get('img', 0)\n for key in results.get('img_fields', ['img']):\n if self.pad_to_square:\n max_size = max(results[key].shape[:2])\n self.size = (max_size, max_size)\n if self.size is not None:\n padded_img = general_ocr.impad(\n results[key], shape=self.size, pad_val=pad_val)\n elif self.size_divisor is not None:\n padded_img = general_ocr.impad_to_multiple(\n results[key], self.size_divisor, pad_val=pad_val)\n results[key] = padded_img\n results['pad_shape'] = padded_img.shape\n results['pad_fixed_size'] = self.size\n results['pad_size_divisor'] = self.size_divisor", "def crop_or_pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]):\n new_size = to_tuple(new_size)\n if list(new_size) == list(img.shape[-2:]):\n return img\n img = pad(img, new_size)\n img = crop(img, new_size)\n return img", "def pad_img_to(img, target_hw, location='upper-left', mode='constant'):\n if len(img.shape) == 3:\n pad = [None, None, (0, 0)]\n else:\n pad = [None, None]\n\n if location == 'upper-left':\n for i in range(2):\n pad[i] = (0, target_hw[i] - img.shape[i])\n\n elif location == 'center':\n for i in range(2):\n excess = target_hw[i] - img.shape[i]\n x1 = math.ceil(excess / 2)\n x2 = excess - x1\n pad[i] = (x1, x2)\n\n else:\n raise ValueError('{} is not a valid location argument'.format(location))\n\n return np.pad(img, pad, mode=mode)", "def padImage(image, padList):\r\n\r\n #pad along far x:<---->\r\n padFarX= np.zeros((image.shape[0], image.shape[1], padList[0]))\r\n image= np.concatenate((image, padFarX), axis=2)\r\n\r\n #pad along far y\r\n padFarY= np.zeros((image.shape[0], padList[1], image.shape[2]))\r\n image= np.concatenate((image, padFarY), axis=1)\r\n\r\n #pad along far z\r\n padFarZ= np.zeros((padList[2], image.shape[1], image.shape[2]))\r\n image= np.concatenate((image, padFarZ), axis=0)\r\n\r\n #pad along close x, adjust center\r\n padCloseX= np.zeros((image.shape[0], image.shape[1], padList[3]))\r\n image= np.concatenate((padCloseX, image), axis=2)\r\n\r\n #pad along close y adjust center\r\n padCloseY= np.zeros((image.shape[0], padList[4], image.shape[2]))\r\n image= np.concatenate((padCloseY, image), axis=1)\r\n\r\n #pad along close z, adjust center\r\n padCloseZ= np.zeros((padList[5], image.shape[1], image.shape[2]))\r\n image= np.concatenate((padCloseZ, image), axis=0)\r\n\r\n\r\n #print \"PADDED IMAGE SHAPE: \" + str(image.shape)\r\n return image", "def pad_img(image, label):\n paddings = [[2,2],[2,2],[0,0]]\n return tf.pad(image, paddings, mode=\"CONSTANT\", constant_values=0.0), label", "def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img", "def expand_rect_padding(img_path, padding_x, padding_top, padding_bottom, out_path):\n pil_image_frame = Image.open(img_path)\n im_width, im_height = pil_image_frame.size \n \n n_width = im_width + 2 * padding_x\n n_height = im_height + padding_top + padding_bottom\n \n old_size = (im_width, im_height)\n new_size = (n_width, n_height)\n new_im = Image.new(\"RGB\", new_size, \"white\") \n new_im.paste(pil_image_frame, ((new_size[0]-old_size[0])/2, padding_top)) # insert image into center of new canvas with vertical shift = padding_top \n\n new_im.save(out_path, \"JPEG\")", "def zero_pad_and_crop_img(img, amount=4):\n padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,\n img.shape[2]))\n padded_img[amount:img.shape[0] + amount, amount: img.shape[1] + amount, :] = img\n top = np.random.randint(low=0, high=2 * amount)\n left = np.random.randint(low=0, high=2 * amount)\n new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]\n return new_img", "def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')", "def pad_image(\n self,\n image: np.ndarray,\n pad_size: Dict[str, int],\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n output_height, output_width = pad_size[\"height\"], pad_size[\"width\"]\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n\n pad_width = output_width - input_width\n pad_height = output_height - input_height\n\n padded_image = pad(\n image,\n ((0, pad_height), (0, pad_width)),\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )\n return padded_image", "def padding(src, min_size):\n # pad before put into convolutional layer\n src_dim = src.dim()\n if src_dim[0][1] >= min_size:\n return src\n pad_size = min_size - src_dim[0][1]\n channels = src_dim[0][2] if len(src_dim[0]) >= 3 else 1\n if pad_size == 1:\n return dy.concatenate([src, dy.zeroes((src_dim[0][0], 1, channels))], d=1)\n else:\n left_border = int(pad_size) / 2\n right_border = (int(pad_size)+1) / 2\n return dy.concatenate([dy.zeroes((src_dim[0][0], left_border, channels)), src, dy.zeroes((src_dim[0][0], right_border, channels))], d=1) # do concatenate along cols", "def image_pad(image, pad_width=None, axis=0, mode='symmetric'):\n hei, wid = image.shape[0], image.shape[1]\n\n if pad_width is None:\n th = hei // 10\n tw = wid // 10\n pad_width = ((th, th), (tw, tw), (0, 0))\n if axis == 0:\n if type(pad_width[0]) == tuple:\n pad_width = (pad_width[0], (0, 0), (0, 0))\n else:\n pad_width = (pad_width, (0, 0), (0, 0))\n if axis == 1:\n if type(pad_width[0]) == tuple:\n pad_width = ((0, 0), pad_width[1], (0, 0))\n else:\n pad_width = ((0, 0), pad_width, (0, 0))\n if len(image.shape) == 3:\n newimage = np.pad(image, pad_width, mode)\n elif len(image.shape) == 2:\n newimage = np.squeeze(np.pad(image[:, :, np.newaxis], pad_width, mode))\n\n return cv2.resize(newimage, (wid, hei), interpolation=cv2.INTER_NEAREST)", "def pad_to(tensor: torch.Tensor, target_length: int, mode: str = 'constant', value: float = 0):\n return F.pad(tensor, (0, target_length - tensor.shape[-1]), mode=mode, value=value)", "def pad(image_array, final_dims_in_pixels, zero_fill_mode=False):\n\n dims = len(final_dims_in_pixels)\n original_dims_in_pixels = [image_array.shape[d]\n for d in range(len(image_array.shape))]\n\n # test if input and output dimensions match\n if dims != len(original_dims_in_pixels):\n raise ValueError(\"Dimensions of the input (\" + str(len(image_array.shape)) +\n \") do not match those of output (\" + str(len(final_dims_in_pixels)) + \")\")\n\n # test if desired final image is larger than original\n if any(final_dims_in_pixels[d] < original_dims_in_pixels[d] for d in range(dims)):\n raise ValueError(\n \"Final dimensions are smaller than original. Did you mean to `crop`?\")\n\n padded_image_array = np.zeros(final_dims_in_pixels)\n new_first_image_pixel = [0 for i in range(dims)]\n new_last_image_pixel = [0 for i in range(dims)]\n\n for dim in range(dims):\n new_first_image_pixel[dim] = int(math.floor(\n (final_dims_in_pixels[dim] - original_dims_in_pixels[dim]) / 2))\n new_last_image_pixel[dim] = new_first_image_pixel[dim] + \\\n original_dims_in_pixels[dim]\n\n # for 2D:\n if dims == 2:\n padded_image_array[new_first_image_pixel[0]: new_last_image_pixel[0],\n new_first_image_pixel[1]: new_last_image_pixel[1]] = image_array\n elif dims == 3:\n padded_image_array[new_first_image_pixel[0]: new_last_image_pixel[0],\n new_first_image_pixel[1]: new_last_image_pixel[1],\n new_first_image_pixel[2]: new_last_image_pixel[2]] = image_array\n if zero_fill_mode:\n padded_image_array = padded_image_array*0.\n\n return(padded_image_array)", "def pil_pad_image(img, v_pad_before, v_pad_after, h_pad_before, h_pad_after, cval=None):\n # type: (PImage.Image, int, int, int, int, tuple) -> PImage.Image\n\n width = img.width + h_pad_before + h_pad_after\n height = img.height + v_pad_before + v_pad_after\n mode = img.mode\n\n if width == img.width and height == img.height:\n return img\n\n # Make sure the cval is in the correct format if None default to black\n if cval is not None:\n if isinstance(cval, float):\n cval = int(round(cval))\n elif isinstance(cval, int):\n cval = cval\n else:\n cval = np.round(cval).astype(dtype=np.int32)\n cval = tuple(cval)\n else:\n cval = 0\n\n try:\n padded_img = PImage.new(mode=mode, size=(width, height), color=cval)\n padded_img.paste(img, box=(h_pad_before, v_pad_before))\n except TypeError as e:\n print 'ERROR: Could not create new PIL image PImage.new(mode={}, size={}, color={}), error: {}'.format(mode, (width, height), cval, e.message)\n raise e\n\n return padded_img", "def image_pad(image, pixel_loc_x, pixel_loc_y):\r\n input_size = np.shape(image)\r\n padded_image = np.zeros((input_size[0]+200, input_size[1]+200, 1))\r\n if np.size(input_size) == 2:\r\n padded_image[:, :, 0] = skut.pad(image[:, :], 100, mode='constant', constant_values=float(0))\r\n else:\r\n for i in range(input_size[2]):\r\n if i == 0:\r\n padded_image[:, :, 0] = skut.pad(image[:, :, i], 100, mode='constant', constant_values=float(0))\r\n else:\r\n padded_dim = np.zeros((input_size[0]+200, input_size[1]+200, 1))\r\n padded_dim[:, :, 0] = skut.pad(image[:, :, i], 100, mode='constant', constant_values=float(0))\r\n padded_image = np.append(padded_image, padded_dim, axis=2)\r\n pixel_loc_x = pixel_loc_x + 100\r\n pixel_loc_y = pixel_loc_y + 100\r\n return padded_image, pixel_loc_x, pixel_loc_y", "def pad_images(_input_image_paths : list[str], _output_image_dir : str, \\\n _pad_colour : tuple[int,int,int]) -> None:\n for image in _input_image_paths:\n with Image.open(image) as image_object:\n\n #Rotate the image based on the EXIF data's orientation tag.\n #Ensures that images taller than they are wide are kept as such when padding\n image_object = PIL.ImageOps.exif_transpose(image_object)\n\n old_x,old_y = image_object.size\n bigger_dimension = max(old_x,old_y)\n\n #Figure out how much extra should be added to each of the four sides\n x_additive = y_additive = 0\n if old_x > old_y:\n y_additive = (old_x - old_y)//2\n\n elif old_y > old_x:\n x_additive = (old_y - old_x)//2\n\n #Create a new, larger image with the requested padding colour,\n # and then paste the original image overtop in the correct position\n new_canvas = Image.new(\"RGB\", (bigger_dimension,bigger_dimension), _pad_colour)\n new_canvas.paste(image_object, (x_additive, y_additive))\n new_canvas.save(_output_image_dir + os.path.basename(image))" ]
[ "0.75301945", "0.7414062", "0.73439056", "0.72765875", "0.7182036", "0.7022596", "0.6945628", "0.6880894", "0.6790839", "0.6768397", "0.6740291", "0.6721193", "0.66918796", "0.6651381", "0.65719867", "0.6508936", "0.6486176", "0.64239997", "0.64179003", "0.637561", "0.6374147", "0.63447636", "0.63198537", "0.62972635", "0.62915725", "0.6290412", "0.62793", "0.6225504", "0.6203826", "0.62031466" ]
0.83163095
0
Calcute the confusion matrix by given label and pred
def get_confusion_matrix(gt_label, pred_label, class_num): index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): cur_index = i_label * class_num + i_pred_label if cur_index < len(label_count): confusion_matrix[i_label, i_pred_label] = label_count[cur_index] return confusion_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confusion_matrix(label, pred, num_class, ignore=255):\n\toutput = pred.cpu().numpy().transpose(0, 2, 3, 1)\n\t#mask = label.cpu().numpy().transpose(0, 2, 3, 1)\n\tseg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\n\t#seg_gt = np.asarray(np.argmax(mask, axis=3), dtype=np.int)\n\tseg_gt = label.cpu().numpy()\n\n\tignore_index = seg_gt != ignore\n\tseg_gt = seg_gt[ignore_index]\n\tseg_pred = seg_pred[ignore_index]\n\n\tindex = (seg_gt * num_class + seg_pred).astype('int32')\n\tlabel_count = np.bincount(index)\n\tconfusion_matrix = np.zeros((num_class, num_class))\n\n\tfor i_label in range(num_class):\n\t\tfor i_pred in range(num_class):\n\t\t\tcur_index = i_label * num_class + i_pred\n\t\t\tif cur_index < len(label_count):\n\t\t\t\tconfusion_matrix[i_label,\n\t\t\t\t\t\t\t\t i_pred] = label_count[cur_index]\n\treturn confusion_matrix", "def get_confusion_matrix(gt_label, pred_label, class_num, ignore_label): #seg_gt, seg_pred, args.num_classes\n\n pred_label = pred_label.flatten()\n if torch.is_tensor(gt_label) == True:\n gt_label = gt_label.cpu().detach().numpy()\n\n gt_label = gt_label.flatten()\n\n valid_flag = gt_label != ignore_label\n valid_inds = np.where(valid_flag)[0]\n\n pred_label = pred_label[valid_flag]\n gt_label = gt_label[valid_flag]\n\n index = (gt_label * class_num + pred_label).astype('int32') #gt_label(array([0, 1]), array([316446, 12684])) pred_label (array([0, 1], dtype=uint8), array([ 77728, 251402]))\n\n label_count = np.bincount(index)\n\n confusion_matrix = np.zeros((class_num, class_num))\n\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix", "def get_confusion_matrix(gt_label, pred_label, class_num):\n index = (gt_label * class_num + pred_label).astype('int32')\n label_count = np.bincount(index)\n confusion_matrix = np.zeros((class_num, class_num))\n\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix", "def confusion_matrix(self,predictions,labels):\n TP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == True))\n FP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == False))\n FN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == True))\n TN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == False))\n\n return np.array([[TP,FP],[FN,TN]])", "def confusion_matrix_(y_true, y_pred, labels=None):\r\n tp = 0\r\n tn = 0\r\n fp = 0\r\n fn = 0\r\n if labels == None:\r\n values = list(set(y_true))\r\n else:\r\n values = labels\r\n if (len(values)) != 2:\r\n return None\r\n for i, elem in enumerate(y_true):\r\n if y_pred[i] == values[1] and y_true[i] == y_pred[i]:\r\n tp += 1\r\n elif y_pred[i] == values[1] and y_true[i] != y_pred[i]:\r\n fp += 1\r\n elif y_pred[i] == values[0] and y_true[i] == y_pred[i]:\r\n tn += 1\r\n elif y_pred[i] == values[0] and y_true[i] != y_pred[i]:\r\n fn += 1\r\n matrix = np.array([[tp, fp], [fn, tn]])\r\n return matrix", "def confusion_matrix(gt, pred) -> np.ndarray:\n \n # Number of classes inferred from gt. Assuming classes are enumerated 0 ..\n n_classes = gt.max() + 1\n cm = np.zeros((n_classes, n_classes), dtype=np.uint32)\n \n # Fill matrix\n for gt_class in range(n_classes):\n for pred_class in range(n_classes):\n cm[pred_class, gt_class] = ((pred == pred_class) & (gt == gt_class)).sum()\n \n return cm", "def confusion_matrix(pred, target, num_classes=21):\n mat = np.zeros((num_classes, num_classes))\n for c in range(num_classes):\n mask = target == c\n if mask.any():\n vec, _ = np.histogram(pred[mask],\n bins=np.arange(num_classes+1))\n mat[c, :] += vec\n\n return mat", "def confusion_matrix(y_true, y_pred, labels):\n\n #Define variables\n matrix = []\n #Creates matrix dimensions\n for i in range(len(labels)):\n matrix.append([])\n for j in range(len(labels)):\n matrix[i].append(0)\n\n for i in range(len(y_true)):\n trueIndex = -1\n predIndex = -1\n #Get indexes of true and predicted values\n for j, label in enumerate(labels):\n if(label == y_true[i]):\n trueIndex = j\n if(label == y_pred[i]):\n predIndex = j\n matrix[trueIndex][predIndex] = matrix[trueIndex][predIndex] + 1\n\n return matrix", "def getConfusionMatrix(pred, real):\n # print pd.crosstab(pred, real) \n \n total = float(real.shape[0])\n \n tp = 0 # true positive\n tn = 0 # true negitive\n fp = 0 # false positive\n fn = 0 # false negitive\n for predicted, actual in zip(pred, real):\n if predicted == actual:\n if predicted == 1:\n tp += 1\n else:\n tn += 1\n else:\n if predicted == 1:\n fp += 1\n else:\n fn += 1\n \n\n print \"(tp, tn, fp, fn):\" , tp, tn, fp, fn\n print \"accuracy is :\", (tp+tn)/total", "def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3", "def confusion_matrix(y_true, y_pred, labels):\r\n matrix = []\r\n\r\n for i, yt in enumerate(labels):\r\n matrix.append([])\r\n for _, yp in enumerate(labels):\r\n matrix[i].append(0)\r\n\r\n for t, p in zip(y_true, y_pred):\r\n t_num = labels.index(t)\r\n p_num = labels.index(p)\r\n matrix[t_num][p_num] += 1\r\n\r\n return matrix", "def get_confusion_matrix(true_label, predictions, num_index):\n class_matrix = np.zeros(shape=(num_index, num_index))\n false_group = [[] for _ in range(num_index)]\n for idx, true, pred in zip(range(len(predictions)),true_label, predictions):\n class_matrix[true][pred] += 1\n if true != pred:\n false_group[true].append(idx)\n return class_matrix, false_group", "def get_confusion_matrix(labels_true: np.ndarray, labels_pred: np.ndarray) -> sparse.csr_matrix:\n check_vector_format(labels_true, labels_pred)\n mask = (labels_true >= 0) & (labels_pred >= 0)\n if np.sum(mask):\n n_labels = max(max(labels_true), max(labels_pred)) + 1\n row = labels_true[mask]\n col = labels_pred[mask]\n data = np.ones(np.sum(mask), dtype=int)\n return sparse.csr_matrix((data, (row, col)), shape=(n_labels, n_labels))\n else:\n raise ValueError('No sample with both true non-negative label and predicted non-negative label.')", "def compute_confuse_matrix(y_targetlabel_list_single, y_logit_array_single, label_dict, name='default'):\n #1.get target label and predict label\n # y_target_labels=get_target_label_short(y_targetlabel_list_single) #e.g. y_targetlabel_list[0]=[2,12,88]\n y_target_labels = y_targetlabel_list_single\n\n # y_predict_labels=[i for i in range(len(y_logit_array_single)) if y_logit_array_single[i]>=0.50] #TODO 0.5PW e.g.[2,12,13,10]\n # y_predict_labels= y_logit_array_single.index(min(y_logit_array_single))\n\n flag = max(y_logit_array_single)\n y_predict_labels = []\n for i in range(len(y_logit_array_single)):\n if abs(y_logit_array_single[i] - flag) < 0.1:\n y_predict_labels.append(i)\n\n a = list(set(y_target_labels))\n b = list(set(y_predict_labels))\n acc = operator.eq(a,b)\n\n #if len(y_predict_labels)<1: y_predict_labels=[np.argmax(y_logit_array_single)] #TODO ADD 2018.05.29\n if random.choice([x for x in range(random_number)]) ==1:\n print(name+\".y_target_labels:\",y_target_labels,\";y_predict_labels:\",y_predict_labels) #debug purpose\n\n #2.count number of TP,FP,FN for each class\n y_labels_unique=[]\n y_labels_unique.extend(y_target_labels)\n y_labels_unique.extend(y_predict_labels)\n y_labels_unique=list(set(y_labels_unique))\n for i,label in enumerate(y_labels_unique): #e.g. label=2\n TP, FP, FN = label_dict[label]\n if label in y_predict_labels and label in y_target_labels:#predict=1,truth=1 (TP)\n TP=TP+1\n elif label in y_predict_labels and label not in y_target_labels:#predict=1,truth=0(FP)\n FP=FP+1\n elif label not in y_predict_labels and label in y_target_labels:#predict=0,truth=1(FN)\n FN=FN+1\n label_dict[label] = (TP, FP, FN)\n return label_dict, acc", "def confusion_matrix(classifier_output, true_labels):\n\n # TODO: finish this.\n true_pos = 0.0\n true_neg = 0.0\n false_neg = 0.0\n false_pos = 0.0\n for elem1,elem2 in zip(classifier_output, true_labels):\n if(elem1==elem2) and (elem1==1):\n true_pos += 1\n elif(elem1==elem2) and (elem2!=1):\n true_neg += 1\n elif(elem1 != 1):\n false_neg +=1\n else:\n false_pos +=1\n conf_matrix = np.array([[true_pos, false_neg],[false_pos, true_neg]])\n return conf_matrix", "def confusion_matrix(preds, labels, conf_matrix):\n for p, t in zip(preds, labels):\n conf_matrix[t, p] += 1\n return conf_matrix", "def _prep_confusion_matrix(self, y_test, y_pred, labels):\n\n # Calculate confusion matrix and flatten it to a simple array\n if len(y_test.shape) == 1:\n confusion_array = metrics.confusion_matrix(y_test, y_pred).ravel()\n\n # Structure into a DataFrame suitable for Qlik\n result = []\n i = 0\n for t in labels:\n for p in labels:\n result.append([str(t), str(p), confusion_array[i]])\n i = i + 1\n self.model.confusion_matrix = pd.DataFrame(result, columns=[\"true_label\", \"pred_label\", \"count\"])\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)\n # Handle confusion matrix format for multi-label classification\n else:\n confusion_array = metrics.multilabel_confusion_matrix(y_test, y_pred)\n result = pd.DataFrame(confusion_array.reshape(-1, 4), columns=[\"true_negative\", \"false_positive\", \"false_negative\", \"true_positive\"])\n self.model.confusion_matrix = pd.DataFrame(np.arange(len(confusion_array)), columns=[\"step\"])\n self.model.confusion_matrix = pd.concat([self.model.confusion_matrix, result], axis=1)\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)", "def calc_ac_score(labels_true, labels_pred):\n nclass = len(np.unique(labels_true))\n labels_size = len(labels_true)\n mat = labels_size * np.ones((nclass, nclass))\n \n idx = 0\n \n for i in range(labels_size):\n mat[labels_pred[i], labels_true[i]] -= 1.0\n \n munkres = Munkres()\n mapping = munkres.compute(mat)\n \n ac = 0.0\n\n for i in range(labels_size):\n val = mapping[labels_pred[i]][1]\n if val == labels_true[i]:\n ac += 1.0\n\n ac = ac / labels_size \n \n return ac", "def create_confusion_matrix(labels, logits):\n L = labels.shape[1]\n result = np.zeros((L, L))\n result = np.matmul(labels.T, logits)\n\n return result", "def compute_confuse_matrix_batch(y_logits_array, y_targetlabel_list, label_dict, name='default'):\n #print('*****************y_logits_array********************')\n #print(y_logits_array[0])\n #print('*****************y_targetlabel_list********************')\n #print(y_targetlabel_list[0])\n acc = 0\n for i, y_logits_array_single in enumerate(y_logits_array):\n label_dict, m = compute_confuse_matrix(y_targetlabel_list[i], y_logits_array_single, label_dict, name=name)\n if m == True:\n acc = acc+1\n print('##################### acc ######################')\n print(acc*1.0/len(y_logits_array ))\n\n return label_dict", "def get_confmatrix(self,y_pred,y_test):", "def confusion_matrix(df):\n rows, true_counts = np.unique(df[\"label\"].values, return_counts=True)\n cols, predicted_counts = np.unique(df[\"label\"].values, return_counts=True)\n\n matrix = np.ndarray(shape=(len(rows), len(cols)), dtype=float)\n for ri, row in enumerate(rows):\n for ci, col in enumerate(cols):\n matrix[ri][ci] = len(df[(df.label == row) & (df.classification == col)])\n\n return matrix, rows, cols", "def custom_confusion_matrix(predictions, targets):\n tp, fp, fn, tn = [], [], [], []\n\n for pred, targ in zip(predictions, targets):\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1: # True positive\n tp.append(1)\n elif shift_pred == 1 and shift_targ == 0: # False positive\n fp.append(1)\n elif shift_pred == 0 and shift_targ == 1: # False negative\n fn.append(1)\n elif shift_pred == 0 and shift_targ == 0: # True negative:\n tn.append(1)\n\n tp_count = len(tp)\n fp_count = len(fp)\n fn_count = len(fn)\n tn_count = len(tn)\n\n conf_matrix = np.array([\n [tp_count, fp_count],\n [fn_count, tn_count]\n ])\n\n return conf_matrix", "def _confusion_matrix(self, actual, classify, p_sum, reject):\r\n if self.preds is None:\r\n self._predict(classify)\r\n x_actu = pd.Series(actual, name='Actual')\r\n if reject:\r\n y_pred = pd.Series(self.preds[:, 0, 0], name='Predicted')\r\n else:\r\n y_pred = pd.Series(self.preds[:, 0], name='Predicted')\r\n if len(pd.Series(pd.unique(y_pred)).dropna()) == len(np.unique(actual)): # Check if the number of different\r\n # target in y pred is the same than is actual\r\n return pd.crosstab(x_actu, y_pred, margins=p_sum, dropna=False)\r\n else:\r\n df = pd.crosstab(x_actu, y_pred, margins=p_sum, dropna=False)\r\n mask = np.in1d(np.unique(actual), np.unique(y_pred)) # Add the missing targets to y_pred\r\n if p_sum:\r\n column_z = [0] * (len(np.unique(actual)) + 1)\r\n else:\r\n column_z = [0] * len(np.unique(actual))\r\n for idx in np.where(~mask)[0]:\r\n df.insert(loc=int(idx), column=self.targets[idx], value=column_z) # Add a zero column in the matrix\r\n return df", "def confusion_matrix(y_true, y_pred, table_show=True):\n\tFIRST_CLASS = 1\n\tSECOND_CLASS = 0\n\n\tzipped = np.array(list(zip(y_true, y_pred)))\n\ttp, fn, fp, tn = 0, 0, 0, 0\n\n\tfor y_true, y_pred in zipped:\n\t\tif y_true == y_pred and y_true == FIRST_CLASS:\n\t\t\ttp += 1\n\t\telif y_true == y_pred and y_true == SECOND_CLASS:\n\t\t\ttn += 1\n\t\telif y_true != y_pred and y_true == SECOND_CLASS:\n\t\t\tfp += 1\n\t\telse:\n\t\t\tfn += 1\n\n\tif table_show:\n\t\treturn np.array([tp, fn, fp, tn]).reshape([2,2])\n\n\treturn tp, fn, fp, tn", "def compute_confusion_matrix(model, lb, all_images, true_labels):\n\n # # load the model and label binarizer\n # print(\"[INFO] loading network and label binarizer...\")\n # model = load_model(model)\n # lb = pickle.loads(open(label_bin, \"rb\").read())\n\n lab2i = {label: j for j, label in enumerate(lb.classes_)}\n print(f\"Lab2i {lab2i}\")\n\n # make a prediction on the image\n preds = model.predict(all_images)\n print(f\"Shape preds {preds.shape}\")\n # print(f'Preds {preds}')\n\n all_best_i = preds.argmax(axis=1)\n print(f\"Shape all_best_i {all_best_i.shape}\")\n\n confusion = np.zeros((len(lb.classes_), len(lb.classes_)), dtype=np.uint16)\n\n for j, pro in enumerate(preds):\n # i = pro.argmax(axis=1)\n i = pro.argmax(axis=0)\n predicted_label = lb.classes_[i]\n correct = \"TRUE\" if true_labels[j] == predicted_label else \"FALSE\"\n print(\n f\"True: {true_labels[j]}\\tPredicted {predicted_label} with {pro[i]*100:.4f}%\\t{correct}\"\n )\n\n confusion[lab2i[predicted_label], lab2i[true_labels[j]]] += 1\n\n # print(f'Confusion matrix\\n{confusion}')\n return confusion, lb.classes_", "def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval", "def _multiclass_confusion_matrix_update(preds: Tensor, target: Tensor, num_classes: int) ->Tensor:\n unique_mapping = target * num_classes + preds\n bins = _bincount(unique_mapping, minlength=num_classes ** 2)\n return bins.reshape(num_classes, num_classes)", "def get_confusion_matrix(scores, labels):\n C = scores.size(-1)\n y_pred = scores.detach().cpu().numpy().reshape(-1, C) # (N, C)\n y_pred = np.argmax(y_pred, axis=1) # (N,)\n\n y_true = labels.detach().cpu().numpy().reshape(-1,)\n\n y = np.bincount(C * y_true + y_pred, minlength=C * C)\n\n if len(y) < C * C:\n y = np.concatenate([y, np.zeros((C * C - len(y)), dtype=np.long)])\n else:\n if len(y) > C * C:\n warnings.warn(\n \"Prediction has fewer classes than ground truth. This may affect accuracy.\"\n )\n y = y[-(C * C):] # last c*c elements.\n\n y = y.reshape(C, C)\n\n return y", "def confusion_matrix(self, y_true=None, y_pred=None, labels=None, normalize=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal=None)\n matrix, imap, imap_count = cu.calculate_confusion_matrix(y_true, y_pred, labels, normalize)\n return matrix, imap, imap_count" ]
[ "0.7771614", "0.75971806", "0.7573982", "0.7315963", "0.730737", "0.7280311", "0.71592414", "0.71516883", "0.71338004", "0.7126695", "0.7106781", "0.6955897", "0.6951744", "0.69315773", "0.69232666", "0.6904852", "0.6879925", "0.6878973", "0.6876468", "0.6837224", "0.6835314", "0.6815069", "0.67771167", "0.6748935", "0.67432773", "0.67248315", "0.6717838", "0.6704325", "0.66650087", "0.6644909" ]
0.7601832
1
Try to create a track, with a bad source_id. Returns a 404 response with detail message.
def test_create_with_bad_id(self): # Count the number of records before the save existing_records_count = Track.objects.all().count() post_data = {'source_type': 'spotify', 'source_id': 00} resp = self.api_client.post('/api/metadata/tracks/', data=post_data) data = json.loads(resp.content) new_records_count = Track.objects.all().count() # Ensure the request filed with a 404, and an error message is returned self.assertEqual(resp.status_code, 404) self.assertEqual(existing_records_count, new_records_count) self.assertEqual(data['detail'], u'The record could not be found.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_with_bad_backend(self):\n # Count the number of records before the save\n post_data = {\n 'source_type': 'test',\n 'source_id': '4bCOAuhvjsxbVBM5MM8oik',\n }\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def test_log_track_invalid_source(self):\n with self.assertRaises(Exception):\n self.app.log_track(self.track_obj('silence.mp3'), source='foo')\n self.assertEqual(self.get_track_count(), 0)", "def test_retrieve_with_bad_id(self):\n resp = self.api_client.get('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'Not found')", "def test_insert_invalid_source(self):\n track = Track(artist='Artist', title='Title')\n with self.assertRaises(Exception):\n pk = track.insert(self.app.db,\n self.app.curs,\n 'foobar',\n datetime.datetime.now())", "def test_create_task_invalid_task_id_error(self):\n task_id = \"unk\"\n rv = TEST_CLIENT.post(\n TASK_ROUTE,\n json={\n \"copyFrom\": task_id,\n },\n )\n result = rv.json()\n\n expected = {\n \"message\": \"source task does not exist\",\n \"code\": \"InvalidTaskId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def handleStatus_404(self):\n log.err('HTTP Error 404')", "def test_404(self):\n response = self.make_call(origin='Milano Lambrate', destination='Milano Cadorna')\n self.assert400(response)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.htsv.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_report_id_not_found(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n \"/_synapse/admin/v1/event_reports/123\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n self.assertEqual(\"Event report not found\", channel.json_body[\"error\"])", "def check_upload_source_log_exists(upload_id: int) -> Response:\n try:\n workspace: Workspace = database.retrieve(upload_id)\n except IOError as ioe:\n logger.error(\"%s: SourceLogExistCheck: There was a problem connecting\"\n \" to database: %s\", upload_id, ioe)\n raise InternalServerError(messages.UPLOAD_DB_CONNECT_ERROR)\n except database.WorkspaceNotFound as nf:\n logger.info(\"%s: Workspace not found: '%s'\", upload_id, nf)\n raise NotFound(messages.UPLOAD_NOT_FOUND) from nf\n\n logger.info(\"%s: Test for source log.\", upload_id)\n headers = {\n 'ETag': workspace.log.checksum,\n 'Content-Length': workspace.log.size_bytes,\n 'Last-Modified': workspace.log.last_modified,\n 'ARXIV-OWNER': workspace.owner_user_id\n }\n return {}, status.OK, headers", "def testBadTrackType(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'Track ID 5: telepathic (junk for the rest'\n )", "def video_no_found(error):\n return {'message': 'video does not exist'}, 404", "def test_load_missing_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n track = Track.from_filename(filename)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_view_with_invalid_pk(self):\n response = self.client.get(self.get_url(self.study.pk + 1))\n self.assertEqual(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_bad_pk(self):\n self.url_kwargs[self.pk_url_kwarg] = 1234\n response = self._get()\n self.assertEquals(response.status_code, 404)", "def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def test_create_episode_missing_study(self):\n _, session_id = self.init_session()\n episode = sample_episode(study_id='missing', session_id=session_id)\n with self.assertRaises(ValueError):\n self.storage.create_episode(episode)", "def test_nonexisting_event(self):\n response = self.client.get(\"/events/1\")\n self.assertEqual(response.status_code, 404)", "def test_make_request_error(self):\n response = Helper.make_request(self.url_404)\n self.assertEqual(response.status_code, 404)", "def test_get_stream_bad_status(req):\n req.get(ENTREZ_URL, text=u'Nope!', status_code=404)\n params = dict(id='FAKE')\n with pytest.raises(InvalidIdError):\n core.get_stream(ENTREZ_URL, params)", "def test_household_bad_create(self):\n tester = app.test_client(self)\n response = tester.post('/household/', data = {}, follow_redirects=True)\n self.assertEqual(response.status_code, 400)", "def test_id_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_id(-1)", "async def test_txn_get_with_bad_id(self):\n self.stream.preset_response(self.status.NO_RESOURCE)\n response = await self.get_assert_status('/transactions/bad', 404)\n\n self.assert_has_valid_error(response, 72)", "def test_getting_one_question_with_invalid_questionId(self):\n response = self.get_one_question_with_invalid_questionId()\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def _taco_test_post_param_new_404(self):\n body = '{ \"id\": 500, \"name\": \"item5\", \"content\": \"qwerwqer5\" }'\n env = self.get_env('POST', '/item/5', body=body)\n result = webapi_start(env, lambda status, response_headers: self.assertEqual(status, '404'))[0]" ]
[ "0.7134957", "0.65041846", "0.6113626", "0.5729739", "0.55509627", "0.5464614", "0.5420374", "0.5404015", "0.54009336", "0.5392408", "0.5372617", "0.53524536", "0.53095245", "0.53046906", "0.53046906", "0.53046906", "0.53046906", "0.5304292", "0.5304292", "0.5304292", "0.5272363", "0.52649075", "0.5212095", "0.52097183", "0.52065885", "0.5199609", "0.5179015", "0.516782", "0.51117057", "0.5108497" ]
0.7263801
0
Remove a track from the database Returns a successful response, with a detail message.
def test_delete(self): # Count the number of records before the save existing_records_count = Track.objects.all().count() resp = self.api_client.delete('/api/metadata/tracks/2/') data = json.loads(resp.content) new_records_count = Track.objects.all().count() # Ensure request was successful, and the record is removed from the database. # Should return with a success message. self.assertEqual(resp.status_code, 200) self.assertEqual(existing_records_count-1, new_records_count) self.assertEqual(data['detail'], 'Track successfully removed')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_music():\n track_id = request.vars.track_id\n if track_id is None:\n raise HTTP(500)\n db(db.track_data.track_id == track_id).delete()\n return \"ok\"", "def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')", "def remove_track(self, track_uri):\n if track_uri in self.trackToPlay:\n del self.trackToPlay[track_uri]\n else:\n raise KeyError('Track not in tracklist')", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def delete(self):\r\n delete_tracks(self.project, [self])", "def delete(self, show_id):\r\n song = Shows.query.filter_by(ShowID=show_id).first_or_404()\r\n db.session.delete(song)\r\n db.session.commit()\r\n return make_response(\"\", 204)", "def remove(self):\n instance = self.get_object() \n instance.delete() \n return self.response(status='Successfully Delete')", "def deleteid(self, track_id):\n yield from self.command('deleteid {}'.format(track_id))\n return True", "def test_delete_with_bad_id(self):\n resp = self.api_client.delete('/api/metadata/tracks/100000/')\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')", "def remove(request, music_id: int) -> HttpResponseRedirect:\n music_item = get_object_or_404(Music, id=music_id)\n request.user.profile.playlist.remove(music_item)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def current_user_saved_tracks_delete(self, tracks=None, **kwargs):\n track_list = []\n if tracks is not None:\n track_list = map(self._get_track_id, tracks)\n return self._delete(API.MY_TRACKS.value, ids=\",\".join(track_list), **kwargs)", "def remove_record():\n # could use .../record/<name> in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"", "def remove(self):\n self._switch.odlclient._request(self._path, method=\"delete\")", "def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')", "def delete(uid: int):\n\n if not (genre := Genre.query.get(uid)):\n raise NotFoundError\n try:\n db.session.delete(genre)\n db.session.commit()\n except Exception:\n raise BadRequestError\n return \"\", 204", "def test_api_can_delete_music(self):\n music = Music.objects.get()\n response = self.client.delete(\n reverse('details', kwargs={'pk': music.id}),\n format = \"json\",\n follow = True\n )\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)", "def delete_record(self):\r\n try:\r\n db.session.delete(self)\r\n db.session.commit()\r\n return {\"error\": False}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def delete(self, player_id):\n current_player = DBPlayer.query.get(player_id)\n if not current_player:\n return get_response(404, 'Not exists.')\n try:\n db.session.delete(current_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(200, 'done!')", "def remove_song_from_pl(self, song, pl):\n to_send = self.db.remove_song_from_pl(song, pl)\n if not to_send:\n to_send = SUCCESS\n self.send_message(to_send)", "def user_playlist_remove_specific_occurrences_of_tracks(\n self, playlist_id, tracks, snapshot_id=None, **kwargs\n ):\n _id = self._get_playlist_id(playlist_id)\n ftracks = []\n for tr in tracks:\n ftracks.append(\n {\"uri\": self._get_uri(\"track\", tr[\"uri\"]), \"positions\": tr[\"positions\"]}\n )\n payload = {\"tracks\": ftracks}\n if snapshot_id:\n payload[\"snapshot_id\"] = snapshot_id\n # pylint: disable=no-member\n return self._delete(\n API.PLAYLIST_TRACKS.value.format(playlist_id=_id),\n payload=payload,\n **kwargs,\n )", "def StopTrack(self):\n handler = self.get_command_object(\"StopTrack\")\n handler()", "def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)", "def remove_measurement():\n meas_id = request.args.get('id', type=int)\n if meas_id is not None:\n db.session.query(Measurement).filter(Measurement.id == meas_id).delete()\n db.session.commit()\n\n return redirect('/measurements')", "def test_handle_remove(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n other_user = User(\"anotheruser\")\n other_user.github_id = \"githubID\"\n other_user.github_username = \"myuser\"\n self.db.retrieve.side_effect = [test_user, other_user,\n test_user, other_user]\n self.db.query.return_value = [team]\n team_attach = [team.get_attachment()]\n with self.app.app_context():\n self.testcommand.handle(\"team add brs ID\", user)\n resp, code = self.testcommand.handle(\"team remove brs ID\", user)\n expect = {'attachments': team_attach,\n 'text': 'Removed ' 'User from brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n self.gh.remove_team_member.assert_called_once_with(\"myuser\",\n \"githubid\")", "def delete(self, id):\n result = self._collection.remove({'_id': ObjectId(str(id))})\n # result is {u'n': 1, u'ok': 1} if deleted\n # TODO (cc) use constants for return codes and messages\n if result['ok'] == 1 and result['n'] == 1:\n return {'result': 'SUCCESS', 'msg': \"Delete was successful\", 'id': id}\n else:\n # TODO(cc) handle object not found error\n return {'result': 'FAILED', 'msg': 'Record not found in DB', 'id': id}", "def delete_song(id):\n # check if the song exists, if not return a 404 with a helpful message\n song = session.query(models.Song).get(id)\n if not song:\n message = \"Could not find song with id {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")\n \n session.delete(song)\n session.commit\n \n message = \"deleted song {}\".format(id)\n data = json.dumps({\"message\": message})\n return Response(data, 404, mimetype=\"application/json\")", "def delete_artist(artist_id):\n\n print('DELETING ARTIST NUMBER: ' + artist_id)\n err = False\n\n try:\n print('DELETING ARTIST NUMBER: ' + artist_id)\n result = Artist.query.filter(Artist.id==artist_id)\n result = result[0]\n\n db.session.delete(result)\n db.session.commit()\n\n\n db.session.commit()\n except:\n err = True\n db.session.rollback()\n finally:\n db.session.close()\n if err:\n flash(\"An error occurred. Artist could not be deleted.\")\n else:\n flash(\"Artist successfully deleted.\")\n \n return redirect(url_for('index'))", "def delete_song(self):\r\n song_id = tuple(input(\"Give the melody id to be deleted:\\t\"))\r\n sql = \"SELECT file_title, form FROM songs WHERE id = %s\" # Check existence of song with given ID\r\n self.cursor.execute(sql, song_id)\r\n result = self.cursor.fetchall()\r\n if len(result) > 0:\r\n path = self.p_storage + \"/\" + result[0][0] + \".\" + result[0][\r\n 1] # Find path of song by appending the name and format to the storage directory path\r\n os.remove(path) # Remove song from directory\r\n sql = \"DELETE FROM songs WHERE id = %s\" # Delete song from database\r\n self.cursor.execute(sql, song_id)\r\n self.cnx.commit()\r\n print(self.cursor.rowcount, \"record(s) deleted\")\r\n else:\r\n print(\"Give a valid id...\")", "def delete_song(_id):\r\n Song.query.filter_by(id=_id).delete()\r\n # filter song by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete_record(uuid):\n\n collection[uuid].delete()\n return redirect('/')" ]
[ "0.6953594", "0.6746213", "0.65705466", "0.63468224", "0.6179682", "0.6166458", "0.6075665", "0.60428214", "0.59959084", "0.5920831", "0.5845369", "0.58197093", "0.5800224", "0.5790172", "0.5773988", "0.5727949", "0.57239854", "0.5679066", "0.56446403", "0.5642042", "0.56376606", "0.5635415", "0.5620827", "0.56142664", "0.56003404", "0.5544256", "0.5516684", "0.54950553", "0.547557", "0.54661137" ]
0.6781372
1
Load (selected) image(s) from zip archive file
def load_zip_archive(archive_file, file_list=None, suffix_list=['jpeg', 'jpg', 'png'], as_float=False): if type(file_list) in [str]: file_list = [file_list] if type(suffix_list) in [str]: suffix_list = [suffix_list] if suffix_list and type(suffix_list) not in [set]: suffix_list = set(suffix_list) imgs = [] with ZipFile(archive_file, 'r') as zfa: for f_info in zfa.infolist(): if f_info.is_dir(): continue if suffix_list: i = f_info.filename.rfind('.') suffix = f_info.filename[i+1:].lower() if i >= 0 else '' if suffix not in suffix_list: continue if file_list: use_f = False for fl in file_list: if re.match(fl, f_info.filename): use_f = True break if not use_f: continue imgs.append((load_image(zfa.open(f_info)), f_info.filename)) return imgs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_zipped_image(zipfilename):\n\n # Read each image and append in a list\n img = []\n filenames = []\n with ZipFile(zipfilename) as archive:\n for entry in archive.infolist():\n with archive.open(entry) as file:\n tmp = Image.open(file)\n img.append(np.array(tmp))\n filenames.append(file.name)\n\n # Return the read images\n return img, filenames", "def load(self, image_zip, df):\n\n self.img = imutils.load_rgb_from_zip(image_zip, self.image_id)\n # TODO: FOR TESTING ONLY!!!!\n # self.img = 200 * np.ones(shape=self.img.shape, dtype=np.uint8)\n # self.img[:300,:300,:] = 100 * np.ones(3, dtype=np.uint8)\n # logging.info(\"DEBUG ON: \".format())\n\n # print(self.img)\n # print(self.img.shape)\n\n self.encoding = 'RGB'\n logging.info(\"Loaded {}, size {} \".format(self.image_id, self.img.shape))\n\n # TODO: (Actually just a note: the .copy() will suppress the SettingWithCopyWarning!\n self.records = df.loc[df.index == self.image_id, :].copy()\n assert isinstance(self.records, pd.DataFrame)\n\n self.records['ship_id'] = self.records.apply(lambda row: hash(row['EncodedPixels']), axis=1)\n self.records.set_index('ship_id', inplace=True)\n self.records.drop(['HasShip', 'Duplicated', 'Unique'], axis=1, inplace=True)\n\n logging.info(\"{} records selected for {}\".format(len(self.records), self.image_id))", "def extract_data(archive: ZipFile, dir_name: str) -> Data:\n with archive.open(f\"{dir_name}/caption.txt\", \"r\") as f:\n captions = f.readlines()\n data = []\n for line in captions:\n tmp = line.decode().strip().split()\n img_name = tmp[0]\n formula = tmp[1:]\n with archive.open(f\"{dir_name}/{img_name}.bmp\", \"r\") as f:\n # move image to memory immediately, avoid lazy loading, which will lead to None pointer error in loading\n img = Image.open(f).copy()\n data.append((img_name, img, formula))\n\n print(f\"Extract data from: {dir_name}, with data size: {len(data)}\")\n\n return data", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def fromZip(self, zip_location,extract_location):\n zip_file = zipfile.ZipFile(zip_location,'r')\n zip_file.extractall(extract_location)", "def load_zip_codes():\n logger.info('Loading zip code data')\n read_zips()", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def load(self, dirname):\n loaded_filenames = set()\n ini_filename = os.path.join(dirname, \"xpresser.ini\")\n if os.path.exists(ini_filename):\n config = ConfigParser.ConfigParser()\n config.read(ini_filename)\n for section_name in config.sections():\n if section_name.startswith(\"image \"):\n image_name = section_name.split(None, 1)[1]\n try:\n image_filename = config.get(section_name, \"filename\")\n except ConfigParser.NoOptionError:\n raise ImageDirError(\"Image %s missing filename option\"\n % image_name)\n image_filename = os.path.join(dirname, image_filename)\n if not os.path.exists(image_filename):\n raise ImageDirError(\"Image %s file not found: %s\" %\n (image_name, image_filename))\n try:\n image_similarity = config.getfloat(section_name,\n \"similarity\")\n except ConfigParser.NoOptionError:\n image_similarity = None\n except ValueError:\n value = config.get(section_name, \"similarity\")\n raise ImageDirError(\"Image %s has bad similarity: %s\"\n % (image_name, value))\n \n try:\n value = config.get(section_name, \"focus_delta\")\n match = CLICK_POSITION_RE.match(value)\n if not match:\n raise ImageDirError(\"Image %s has invalid click \"\n \"position: %s\" %\n (image_name, value))\n image_focus_delta = (int(match.group(\"x\")),\n int(match.group(\"y\")))\n except ConfigParser.NoOptionError:\n image_focus_delta = None\n image = Image(name=image_name,\n filename=image_filename,\n similarity=image_similarity,\n focus_delta=image_focus_delta)\n self._images[image_name] = image\n loaded_filenames.add(image_filename)\n\n # Load any other images implicitly with the default arguments.\n for basename in os.listdir(dirname):\n filename = os.path.join(dirname, basename)\n if filename not in loaded_filenames:\n ftype, fencoding = mimetypes.guess_type(filename)\n if ftype and ftype.startswith(\"image/\"):\n image_name = os.path.splitext(basename)[0]\n self._images[image_name] = Image(name=image_name,\n filename=filename)", "def download_multiple(select_files, savepath, id, ext):\n with zipfile.ZipFile(savepath + '/processed_images.zip', mode='w') as zf:\n\n for file in select_files:\n pro_img, _, _, _, _ = get_image_pair(file, id)\n output = io.BytesIO()\n pro_img.save(output, format=ext)\n filename = file + '.' + ext\n zf.writestr(filename, output.getvalue())", "def unzip_oxygen_files(zip_file):\n name_main_content = None\n name_left_menu = None\n list_img_files_to_save = list()\n\n files_unzipped = ZipFile(zip_file)\n for file_unzipped_name in files_unzipped.namelist():\n if not file_unzipped_name.startswith('__MACOSX'):\n if file_unzipped_name.endswith(\".jpeg\"):\n list_img_files_to_save.append(file_unzipped_name)\n elif file_unzipped_name.endswith(\".indexList.html\"):\n name_left_menu = file_unzipped_name\n elif file_unzipped_name.endswith(\"_xsd.html\"):\n name_main_content = file_unzipped_name\n\n return files_unzipped, name_left_menu, name_main_content, list_img_files_to_save", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def _unpack_archive(self):\n with zipfile.ZipFile(self._archive_full_path, 'r') as zip_ref:\n zip_ref.extractall(self._storage_path)\n\n _logger.debug('Archive has been unpacked.')", "def get_frame(self, indices: List[int]) -> List[np.ndarray]:\n if isinstance(indices, int):\n indices = [indices]\n img_list = []\n if self.frame_zip_fid is None:\n self._check_available(self.zip_path)\n self.frame_zip_fid = zipfile.ZipFile(self.zip_path, 'r')\n\n for idx in indices:\n file_name = self.frame_fmt.format(int(idx) + 1)\n img = self.load_image_from_zip(self.frame_zip_fid, file_name, cv2.IMREAD_COLOR)\n img_list.append(img)\n return img_list", "def load_image_series(filenames):\n\n # Cycle through each file\n ims = []\n all_filenames = []\n for file in filenames:\n\n # Get file extension\n _, ext = os.path.splitext(file)\n\n # If file is a zip file read with load_zipped_image\n if ext == '.zip':\n im, filename = load_zipped_image(file)\n ims.append(im)\n all_filenames.append(filename)\n\n else:\n ims.append(load_image(file))\n all_filenames.append(file)\n\n # Convert lists of lists into lists\n ims = flatten(ims)\n all_filenames = flatten([all_filenames])\n\n return ims, all_filenames", "def getROIobjects(zip_path, class_id, height=None, width=None): # image_path=None,\n if zip_path is None:\n zip_path = splitext(image_path)[0] + '.zip'\n \n # Must have a .zip file\n assert zip_path.lower().endswith('.zip'), \"Must be a .ZIP file\"\n \n# if image_path is not None:\n# img = openTifImage(image_path)\n \n# if len(img.shape) is 2:\n# shape = img.shape\n# elif len(img.shape) is 3:\n# shape = (img.shape[1],img.shape[2])\n# else:\n# shape = (height, width)\n \n shape = (height,width)\n img = np.zeros(shape, dtype=np.int32)\n \n # Read ROIs with read_roi\n roi_dict = read_roi.read_roi_zip(zip_path)\n rois = list(roi_dict.items())\n masks = []\n class_ids = []\n if len(rois) > 0:\n # Only continue if we have some ROIs\n for n, roi in enumerate(rois):\n #get x y arrays\n x = roi[1]['x']\n y = roi[1]['y']\n #create array of points for polygon path\n points = np.vstack((x, y)).T\n points = points.reshape((-1,1,2))\n mask = cv2.fillPoly(img.copy(),[points],True,1)\n masks.append(mask)\n class_ids.append(class_id)\n # convert lists to ndarrays\n masks = np.asarray(masks) \n masks = np.rollaxis(masks,0,3) # roll axis to return correct shape\n class_ids = np.asanyarray(class_ids)\n \n return masks, class_ids", "def _extract_images(self, filename):\n log.info('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' %\n (magic, filename))\n num_images = self._read32(bytestream)\n rows = self._read32(bytestream)\n cols = self._read32(bytestream)\n buf = bytestream.read(rows * cols * num_images)\n data = np.frombuffer(buf, dtype=np.uint8)\n data = data.reshape(num_images, rows, cols, 1)\n return data", "def loadFromFolder(self):\n stackData = tifffile.imread(self.path + '/*.tif')\n numChannels = stackData.shape[1] # assuming [slices][channels][x][y]\n numSlices = stackData.shape[0] # assuming [slices][channels][x][y]\n self._numChannels = numChannels\n self.header.header['numImages'] = numSlices\n print('loadFromFolder() stackData:', stackData.shape)\n for channel in range(numChannels):\n self._stackList[channel] = stackData[:, channel, :, :]\n self._makeMax(channel)", "def extract(archive_path, images_dir, test_zip=False):\n log(\"TRACE\", \"Attempting to extracted files from {}\".format(archive_path))\n with zipfile.ZipFile(archive_path) as images_zip:\n # Check that the Zip file is valid, in which case `testzip()` returns\n # None. If it's bad, that function will return a list of bad files\n try:\n if test_zip and images_zip.testzip():\n log(\"ERROR\", \"Could not extract the following invalid Zip file:\"\n \" {}\".format(archive_path))\n return []\n except OSError:\n log(\"ERROR\", \"Could not extract the following invalid Zip file:\"\n \" {}\".format(archive_path))\n return []\n images_zip.extractall(images_dir)\n archive_namelist = images_zip.namelist()\n log(\"TRACE\", \"Extracted files: {}\".format(archive_namelist))\n return archive_namelist", "def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)", "def load_file_in_archive(req_path, archive, archive_ext, zip_path, name, ext):\n BASE_DIR = ROOT\n full_path = u\"%s/%s.%s\" % (req_path, archive, archive_ext)\n zip_path = u\"%s/%s.%s\" % (zip_path, name, ext)\n full_real_path = get_real_path(BASE_DIR, full_path)\n logger.info(\"%s, %s\", full_real_path, [to_hex(c) for c in full_real_path])\n try:\n zip_path = unquote(zip_path).encode('utf-8')\n except Exception as e:\n logger.info(\"Failed to encode: %s\", zip_path)\n\n if ext == 'thm' or archive_ext not in archive_exts:\n logger.info(\"Unsupported file\")\n return ('', 204)\n\n if os.path.exists(full_real_path.encode('utf-8')):\n logger.info(\"File doesn't exist: %s\", full_real_path)\n return ('', 204)\n\n #Only zip files are supported <path>/file.zip/1/01.jpg\n ## Render single file\n with zipfile.ZipFile(full_real_path) as zf:\n for name in zf.namelist():\n encoded_name = name.decode('euc-kr').encode('utf-8')\n logger.info(\"%s(%s), %s(%s), %s, %s\", encoded_name, type(encoded_name), zip_path, type(zip_path),\n [to_hex(c) for c in name], [to_hex(c) for c in zip_path])\n if encoded_name == zip_path:\n with zf.open(name) as f:\n bytesIO = BytesIO()\n bytesIO.write(f.read())\n bytesIO.seek(0)\n return flask.send_file(bytesIO, attachment_filename=os.path.basename(zip_path), as_attachment=True)\n logger.error(\"No file Name: %s\", zip_path)\n return ('', 204)", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def load_mnist (images_fn_gz, labels_fn_gz, digits=None, path=None, asbytes=False, selection=None, return_labels=True, return_indices=False):\n\n # We can skip the labels file only if digits aren't specified and labels aren't asked for\n if return_labels or digits is not None:\n flbl = gzip.open (labels_fn_gz, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = gzip.open(images_fn_gz, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection] \n N = len(indices)\n\n images = zeros((N, rows, cols), dtype=uint8)\n\n if return_labels:\n labels = zeros((N), dtype=int8)\n for i, index in enumerate(indices):\n images[i] = array(images_raw[ indices[i]*rows*cols : (indices[i]+1)*rows*cols ]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n else:\n return ret", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def get_img_array(myzipfile, imgid, shape=(299,299)):\n img_arr = np.zeros(shape=(512, 512, 3), dtype=np.float32)\n img_green = Image.open(myzipfile.open(f'{imgid}_green.png'))\n img_blue = Image.open(myzipfile.open(f'{imgid}_blue.png'))\n img_red = Image.open(myzipfile.open(f'{imgid}_red.png'))\n img_yellow = Image.open(myzipfile.open(f'{imgid}_yellow.png'))\n img_arr[:,:,0] = np.divide(np.array(img_green), 255)\n img_arr[:,:,1] = np.divide(np.array(img_blue), 255)/2 + np.divide(np.array(img_yellow), 255)/2\n img_arr[:,:,2] = np.divide(np.array(img_red), 255)/2 + np.divide(np.array(img_red), 255)/2\n img_arr = cv2.resize(img_arr, shape)\n return img_arr", "def load_image(self, image_id):\n \n # load image infos\n \n info = self.image_info[image_id]\n patch_path = info['path']\n width = info['width']\n height = info['height']\n impath = os.path.join(patch_path,\"images\")\n file_list = os.listdir(impath) \n channels = info['channels']\n \n image = []\n \n # stack channels to be loaded.\n \n for channel in channels:\n \n if channel == \"none\":\n channel_image = skimage.img_as_ubyte(np.zeros( (height,width) ) )\n \n else:\n channel_image_name = [x for x in file_list if channel in x][0] \n channel_image_path = os.path.join(impath, channel_image_name)\n channel_image = skimage.io.imread(channel_image_path)\n channel_image = skimage.img_as_ubyte(channel_image)\n image.append(channel_image)\n \n image = np.stack(image, axis=2)\n \n return image", "def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images", "def run_zip_analysis(filepath, ID, method):\n with zipfile.ZipFile(filepath[0]) as zf:\n for entry in zf.namelist():\n if not entry.startswith(\"__\"): # Get rid hidden files in zip\n with zf.open(entry) as file:\n data = file.read()\n fh = io.BytesIO(data)\n Image.open(fh)\n\n filename, extension = get_file_name(file.name)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename,\n extension, fh.getvalue())\n err, msg = check_msg(msg)\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg" ]
[ "0.76349974", "0.6311234", "0.6153032", "0.59051687", "0.5845452", "0.5826613", "0.5734629", "0.5730039", "0.5701617", "0.5680505", "0.5641235", "0.56268257", "0.5621598", "0.56099635", "0.55637586", "0.5561061", "0.55251", "0.5472375", "0.54687905", "0.5451084", "0.5445658", "0.5429179", "0.541356", "0.5402294", "0.53982985", "0.5397163", "0.5394863", "0.53741765", "0.5373714", "0.5371165" ]
0.67017627
1
Create a Meter from dict object.
def from_dict(cls, d: Dict, method: SerializationType = SerializationType.Status) -> "Meter": name = d["name"] min_value = d["min_value"] max_value = d["max_value"] meter = Meter( name=name, min_value=min_value, max_value=max_value, ) if method == SerializationType.Status: value = d["value"] meter.value = value return meter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, d):\n return loadd(d, cls)", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, data):\n return cls(**data)", "def fromdict(cls,datadict):\n return cls(fmetric=datadict.get('fmetric'),\n fhost=datadict.get('fhost'),\n fvalue=datadict.get('fvalue'),\n ftime=datadict.get('ftime'),\n funit=datadict.get('funit'),\n finfo=datadict.get('finfo'))", "def from_dict(self, d):\n return Grid(**d)", "def from_dict(\n cls, d: typing.Mapping[str, typing.Union[str, float]]\n ) -> \"VLEPoint\":\n\n composition = Composition(p=d[\"composition\"], type=d[\"composition_type\"])\n return cls(\n composition=composition,\n pressures=(d[\"first_component_pressure\"], d[\"second_component_pressure\"]),\n temperature=d[\"temperature\"],\n )", "def from_dict(cls, dct):\n return cls(**dct)", "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n height = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Height')) if dictionary.get('Height') else None\n length = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Length')) if dictionary.get('Length') else None\n weight = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Weight')) if dictionary.get('Weight') else None\n width = awsecommerceservice.models.decimal_with_units.DecimalWithUnits.from_dictionary(dictionary.get('Width')) if dictionary.get('Width') else None\n\n # Return an object of this model\n return cls(height,\n length,\n weight,\n width)", "def _from_dict(cls, d):\n confidence = d.get(\"confidence\", None)\n constant = d.get(\"constant\", False)\n tags = d.get(\"tags\", None)\n return cls(\n d[\"name\"],\n d[\"value\"],\n confidence=confidence,\n constant=constant,\n tags=tags,\n )", "def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)", "def from_dict(cls, dct):\n pass", "def from_dict(cls, data: Dict[str, any]):\n return cls(**data)", "def from_dict(dct):\n if any(k not in dct for k in ['measurement', 'tags', 'fields']):\n return dct\n return Point(\n dct['measurement'],\n dct['tags'],\n dct['fields'],\n dct.get('time'),\n )", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)" ]
[ "0.6572882", "0.6530952", "0.6431576", "0.63849896", "0.6335211", "0.630731", "0.6291888", "0.6274567", "0.62618166", "0.62144405", "0.6184855", "0.6163284", "0.61541444", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396" ]
0.80224687
0
Check that all valence terms have been assigned.
def _check_all_valence_terms_assigned( handler, assigned_terms, topology, valence_terms, ): if len(assigned_terms) == len(valence_terms): return # Convert the valence term to a valence dictionary to make sure # the order of atom indices doesn't matter for comparison. valence_terms_dict = assigned_terms.__class__() for atoms in valence_terms: atom_indices = (topology.atom_index(a) for a in atoms) valence_terms_dict[atom_indices] = atoms # Check that both valence dictionaries have the same keys (i.e. terms). assigned_terms_set = set(assigned_terms.keys()) valence_terms_set = set(valence_terms_dict.keys()) unassigned_terms = valence_terms_set.difference(assigned_terms_set) not_found_terms = assigned_terms_set.difference(valence_terms_set) # Raise an error if there are unassigned terms. err_msg = "" if len(unassigned_terms) > 0: unassigned_atom_tuples = [] unassigned_str = "" for unassigned_tuple in unassigned_terms: unassigned_str += "\n- Topology indices " + str(unassigned_tuple) unassigned_str += ": names and elements " unassigned_atoms = [] # Pull and add additional helpful info on missing terms for atom_idx in unassigned_tuple: atom = topology.atom(atom_idx) unassigned_atoms.append(atom) unassigned_str += f"({atom.name} {atom.symbol}), " unassigned_atom_tuples.append(tuple(unassigned_atoms)) err_msg += ( "{parameter_handler} was not able to find parameters for the following valence terms:\n" "{unassigned_str}" ).format( parameter_handler=handler.__class__.__name__, unassigned_str=unassigned_str, ) if len(not_found_terms) > 0: if err_msg != "": err_msg += "\n" not_found_str = "\n- ".join([str(x) for x in not_found_terms]) err_msg += ( "{parameter_handler} assigned terms that were not found in the topology:\n" "- {not_found_str}" ).format( parameter_handler=handler.__class__.__name__, not_found_str=not_found_str, ) if err_msg: err_msg += "\n" if isinstance(handler, BondHandler): exception_class = UnassignedBondError elif isinstance(handler, AngleHandler): exception_class = UnassignedAngleError elif isinstance(handler, (ProperTorsionHandler, ImproperTorsionHandler)): exception_class = UnassignedTorsionError else: raise RuntimeError( f"Could not find an exception class for handler {handler}", ) exception = exception_class(err_msg) exception.unassigned_topology_atom_tuples = unassigned_atom_tuples exception.handler_class = handler.__class__ raise exception
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing", "def _check_assigned(self):\n\n if self.values is None and self.lazy:\n raise ValueError(\"This instance has not been assigned any data.\")", "def __checkNrVars(self):\n variables = set()\n for q in self.__quantifierList:\n for var in q.getVariableNames():\n variables.add(\"%s\" % var)\n for c in self.__clauseList:\n for var in c.getVariableNames():\n variables.add(\"%s\" % var)\n \n return len(variables)", "def check_all(self):\n return self.check_rs() + self.check_hgvs()", "def assignment_complete(self, assignment):\n # for each variable in the crossword\n for variable in self.crossword.variables:\n # if the variable is not assigned a value\n if variable not in assignment:\n # the crossword is not complete\n return False\n return True", "def test_accept_all_terms_required(api, account, given_terms):\n api.terms.get_required_terms.return_value = given_terms\n api.terms.accept_terms.reset_mock()\n account.accept_all_terms()\n api.terms.get_required_terms.assert_called()\n api.terms.get_all_terms.assert_not_called()\n\n call_count = custom_st.count_terms(given_terms)\n assert api.terms.accept_terms.call_count == call_count", "def all(self):\n for v in self.sects.values():\n if not np.all(v):\n return False\n if self.is_full():\n return True\n else:\n return np.all(self.defval)", "def checkValName(self):\n valLength = len(self.val)\n if valLength == 0:\n try:\n valsLength = len(self.val)\n if valsLength == 0:\n self.val = self.vals\n except Exception:\n print \"No value set\"", "def __check_all_config_values_set(cfg: __Config):\n unset_values = [key for key, val in cfg.__dict__.items() if val is None]\n if len(unset_values) > 0:\n print(f\"WARNING: The following config variables have not been set: {unset_values}\")", "def check_all_constraints(csp) :\n\n for constraint in csp.get_all_constraints():\n assigned1 = csp.get_assigned_value(constraint.var1)\n assigned2 = csp.get_assigned_value(constraint.var2)\n check = constraint.check(assigned1,assigned2)\n if check==False and assigned1!=None and assigned2!=None:\n return False \n return True", "def test_accept_all_terms_optional(api, account, given_terms):\n api.terms.get_all_terms.return_value = given_terms\n api.terms.accept_terms.reset_mock()\n account.accept_all_terms(optional=True)\n api.terms.get_required_terms.assert_not_called()\n api.terms.get_all_terms.assert_called()\n\n call_count = custom_st.count_terms(given_terms)\n assert api.terms.accept_terms.call_count == call_count", "def solved(self):\n if not self.all_variables_assigned():\n return False\n for constraint in self.constraints:\n if not constraint.satisfied(*[self.var_dict[name] for name in constraint.var_names]):\n return False\n return True", "def test_verify_all_dependencies(self):\n\n for condition in self.all_references():\n result = self.verify_dependencies(condition)\n\n if result:\n self.ill_defined[condition] = result\n else:\n self.well_defined.add(condition)\n\n return self.ill_defined", "def has_evaluations(self):\n for _ in self.evaluations:\n return True\n return False", "def guarantee_initialized_variables(self):\n\n global_vars = tf.global_variables()\n is_not_initialized = self.sess.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\n for x in ['[#] Initialized: ' + str(i.name) for i in\n not_initialized_vars]:\n print(x)\n\n if len(not_initialized_vars):\n self.sess.run(tf.variables_initializer(not_initialized_vars))\n return True\n else:\n return False", "def check_all_constraints(csp) :\n constraints=csp.get_all_constraints()\n for constraint in constraints:\n var1 = constraint.var1\n var2 = constraint.var2\n val1=csp.get_assigned_value(var1)\n val2=csp.get_assigned_value(var2)\n if val1!=None and val2!=None:\n if not constraint.check(val1,val2):\n return False\n return True", "def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())", "def check_all(c):", "def check_prerequisites(self):\n self.courses_not_completed = self.prerequisite_set - set(self.user_courses.keys())", "def evals(self):\n\t\tpass", "def check_emission_factors(cls, values):\n for v in values.values():\n if isinstance(v, list):\n assert len(v) > 0, \"Emission factors must not be an empty list\"\n return values", "def _check_determinancy(self, values, errors, combo):\n val, err = self.used_vars(values, errors, combo)\n n, m = len(val), len(self.get_equations(combo))\n\n if n != m:\n if m > n:\n s = '>'\n t = 'remove'\n v = err\n else:\n s = '<'\n t = 'add'\n v = val\n\n a = abs(n - m)\n\n raise ValueError('Indeterminant system:: Number of equations ({}) '\n '{} number of unknowns ({}). To correct, {} ({}) errors in {} '\n 'or adjust the input equations.'.format(m, s, n, t, a, v))", "def _check_if_satisfiable(self):\n # Search for a satisfying assignment\n all_variables = self.all_variables()\n\n # Try to find some assignment of the constrained vars\n counter = count()\n next_count = next(counter)\n queue = [(0, 0, next_count, {})]\n\n while queue:\n num_attempts, _, _, assignments = hq.heappop(queue)\n num_attempts += 1\n # Full assignment?\n # keep out of loop for empty constraint edge case\n if len(assignments) == len(all_variables):\n return True\n for v in sorted(all_variables - set(assignments.keys())):\n if isinstance(v, DiscreteVariable):\n possible_assignments = self.get_possible_assignments(v)\n else:\n possible_assignments = [v.sample() \\\n for _ in range(10*(1+num_attempts))]\n for assignment in possible_assignments:\n new_assignments = assignments.copy()\n new_assignments[v] = assignment\n # Constraint violated\n if not self.check(new_assignments):\n continue\n # Finish early\n if len(new_assignments) == len(all_variables):\n return True\n next_count = next(counter)\n hq.heappush(queue, (num_attempts, -len(new_assignments),\n -next_count, new_assignments))\n\n if next_count > gc.max_satisfy_tries:\n import ipdb; ipdb.set_trace()\n break\n\n return False", "def check_solved(self, values):\n if values == None: #Forward_checking determines that values state is invalid -> set false, check if false here.\n return False\n\n for box in values.keys():\n if len(values[box]) != 1:\n return False\n return True", "def _check_inputs(self):\n\n self._check_resident_prefs()\n self._check_hospital_prefs()", "def unused_evals(self):\n\t\treturn self.Evals - self.nFES", "def complete(self):\n return all((constraint.satisfied() for constraint in self.constraints))", "def _check_initialized(self):\n check_is_fitted(self, 'estimators_')", "def _check_mandatory(self):\n for subtoken in self.subtokens:\n if subtoken.mandatory != 0:\n self.mandatory = np.random.uniform()\n return\n self.mandatory = 0" ]
[ "0.6390863", "0.6386517", "0.6137061", "0.61295885", "0.61179125", "0.60907435", "0.60848904", "0.5932524", "0.5888458", "0.58481896", "0.5843493", "0.57511234", "0.5699283", "0.569033", "0.56810856", "0.56619775", "0.5629971", "0.561073", "0.5604263", "0.55888325", "0.55798185", "0.5576629", "0.5573932", "0.55599195", "0.55544776", "0.5552126", "0.5549668", "0.55243784", "0.5517663", "0.5502451" ]
0.7500235
0
Return a subset of `supported_parameters` that are meant to be included in potentials.
def potential_parameters(cls): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def supported_parameters(cls):\n raise NotImplementedError()", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"k\", \"periodicity\", \"phase\", \"idivf\"]", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"k\", \"periodicity\", \"phase\", \"idivf\", \"k_bondorder\"]", "def potential_parameters(cls):\n return [\"k\", \"length\"]", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def potential_parameters(cls):\n return [\"length\", \"distance\"]", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"k\", \"length\", \"k_bondorder\", \"length_bondorder\"]", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"length\", \"distance\"]", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def get_resource_params():\n return Parameter.list()", "def supported_parameters(cls):\n return [\"smirks\", \"id\", \"k\", \"angle\"]", "def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters", "def _check_parameters_support(self, parameters=()):\n for parameter in parameters:\n assert parameter in self._supported, \"Estimation %s is not implemented yet\" % parameter", "def get_parameters_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([p for p, _ in self.expert_knowledge])))", "def supported_qparams(self):\n import re\n return re.findall(r\"\\$\\$\\{(\\w+)\\}\", self.QTEMPLATE)", "def get_resource_params(self):\n return Parameter.list()", "def get_resource_params(self):\n return Parameter.list()", "def get_optional_parameters(self) -> list:\n results = []\n if self.no_params or self.params_required:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def get_resource_params(self):\n return SBE37Parameter.list()", "def _required_parameters(self) -> RequiredParameters:\n return RequiredParameters([])", "def _required_parameters(self) -> RequiredParameters:\n return RequiredParameters([])", "def parameter_combinations(cls, raw=False):\r\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\r\n 1.0, 1.5, 2.0],\r\n 'n_neighbors': [3, 5, 7]}\r\n return cls.generate_parameter_combinations(parameter_combinations, raw)", "def f_supports(self, data):\n return type(data) in pypetconstants.PARAMETER_SUPPORTED_DATA", "def parameter_combinations(cls, raw=False):\r\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\r\n 1.0, 1.5, 2.0],\r\n 'n_neighbors': [5, 7, 9],\r\n 't': [0.3, 0.5, 0.8]}\r\n return cls.generate_parameter_combinations(parameter_combinations, raw)", "def parameters_present(options, **kwargs):\n synonyms = options['synonyms']\n call_graph = options['call_graph']\n\n result = _Result()\n\n for node, edges in call_graph:\n for edge in edges:\n call_config = edge.settings\n\n for req_param, req_value in kwargs.items():\n found = False\n\n for param, value in call_config.items():\n if synonyms.are_synonyms(param, req_param):\n found = True\n break\n\n if not found:\n result.add(node, edge, req_param)\n\n return result", "def get_non_essential_params(cls, candidate: str):\n return cls.get_params(candidate, gen_empty_fingerprint(), list(), False, False)", "def parameters(self, requires_grad_only=True):\n filter_cond = lambda param: param.requires_grad if requires_grad_only else True\n return (param for param in super().parameters() if filter_cond(param))" ]
[ "0.61632377", "0.60749733", "0.60749733", "0.6045904", "0.5958558", "0.59476894", "0.5931817", "0.5903901", "0.5861894", "0.58335835", "0.5776701", "0.5754737", "0.56521624", "0.5636197", "0.56098294", "0.5578262", "0.55694485", "0.55178815", "0.5493202", "0.5493202", "0.54491764", "0.5433263", "0.5374233", "0.5374233", "0.53720874", "0.5365478", "0.5333575", "0.53314114", "0.5330214", "0.53186554" ]
0.63488674
0
Creates a RFXtrxDSMR asyncio protocol.
def create_rfxtrx_dsmr_protocol(dsmr_version, telegram_callback, loop=None, **kwargs): protocol = _create_dsmr_protocol(dsmr_version, telegram_callback, RFXtrxDSMRProtocol, loop, **kwargs) return protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfxtrx_tcp_dsmr_reader(host, port, dsmr_version,\n telegram_callback, loop=None,\n keep_alive_interval=None):\n if not loop:\n loop = asyncio.get_event_loop()\n protocol, _ = create_rfxtrx_dsmr_protocol(\n dsmr_version, telegram_callback, loop=loop,\n keep_alive_interval=keep_alive_interval)\n conn = loop.create_connection(protocol, host, port)\n return conn", "async def rfxtrx_dsmr_connection_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n with patch(\n \"homeassistant.components.dsmr.sensor.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.sensor.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "async def rfxtrx_dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "def create_rfxtrx_dsmr_reader(port, dsmr_version, telegram_callback, loop=None):\n protocol, serial_settings = create_rfxtrx_dsmr_protocol(\n dsmr_version, telegram_callback, loop=None)\n serial_settings['url'] = port\n\n conn = create_serial_connection(loop, protocol, **serial_settings)\n return conn", "async def main():\r\n\r\n await rvr.wake()\r\n\r\n # Give RVR time to wake up\r\n await asyncio.sleep(2)\r\n\r\n if debug: print(\"Starting imu handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.imu,\r\n handler=imu_handler\r\n )\r\n if debug: print(\"Starting color handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.color_detection,\r\n handler=color_detected_handler\r\n )\r\n if debug: print(\"Starting accelerometer handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.accelerometer,\r\n handler=accelerometer_handler\r\n )\r\n if debug: print(\"Starting ambient light handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.ambient_light,\r\n handler=ambient_light_handler\r\n )\r\n if debug: print(\"Starting encoder handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.encoders,\r\n handler=encoder_handler\r\n )\r\n if debug: print(\"Starting sensor control\")\r\n\r\n #await rvr.sensor_control.start(interval=250)\r\n await rvr.sensor_control.start(interval=1000)\r\n\r\n if debug: print(\"Ros listener spinning up\")\r\n #await spin_ros()\r\n if debug: print(\"spin complete\")\r\n\r\n\r\n\r\n # The asyncio loop will run forever to allow infinite streaming.\r", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_connection(\n protocol_factory, host=self.host, port=self.port)\n event_loop.run_until_complete(coro)", "def asyncinit(cls):\r\n __new__ = cls.__new__\r\n\r\n async def init(obj, *arg, **kwarg):\r\n await obj.__init__(*arg, **kwarg)\r\n return obj\r\n\r\n def new(cls, *arg, **kwarg):\r\n obj = __new__(cls, *arg, **kwarg)\r\n coro = init(obj, *arg, **kwarg)\r\n return coro\r\n\r\n cls.__new__ = new\r\n return cls", "def buildProtocol(self, addr):\r\n p = RCERobotProtocol(self._connection)\r\n p.factory = self\r\n return p", "async def dsmr_connection_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=DSMRProtocol)\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n with patch(\n \"homeassistant.components.dsmr.sensor.create_dsmr_reader\", connection_factory\n ), patch(\n \"homeassistant.components.dsmr.sensor.create_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "async def connection_factory(*args, **kwargs):\n return (transport, protocol)", "async def connection_factory(*args, **kwargs):\n return (transport, protocol)", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_server(protocol_factory, port=self.port)\n event_loop.run_until_complete(coro)", "async def dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=DSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n if args[1] == \"5L\":\n protocol.telegram = {\n LUXEMBOURG_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject(\n [{\"value\": \"123456789\", \"unit\": \"\"}]\n ),\n }\n if args[1] == \"5S\":\n protocol.telegram = {\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n if args[1] == \"Q3D\":\n protocol.telegram = {\n Q3D_EQUIPMENT_IDENTIFIER: CosemObject(\n [{\"value\": \"12345678\", \"unit\": \"\"}]\n ),\n }\n\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "def __init__(self, shell=TerminalShell, stream=TelnetStream,\n encoding='utf-8', log=logging, force_binary=False,\n waiter_connected=None, waiter_closed=None):\n self.log = log\n self.force_binary = force_binary\n self._shell_factory = shell\n self._stream_factory = stream\n self._default_encoding = encoding\n self._loop = asyncio.get_event_loop()\n\n #: session environment as S.env['key'], defaults empty string value\n self._env = collections.defaultdict(str, **self.default_env)\n\n #: toggled when transport is shutting down\n self._closing = False\n\n #: datetime of last byte received\n self._last_received = None\n\n #: datetime of connection made\n self._connected = None\n\n #: future result stores value of gethostbyaddr(sever_ip)\n self._server_host = asyncio.Future()\n\n #: server_fqdn is result of socket.getfqdn() of server_host\n self._server_fqdn = asyncio.Future()\n\n #: values for properties ``server_ip`` and ``server_port``\n self._server_ip = None\n self._server_port = None\n\n #: waiter is a Future that completes when connection is closed.\n if waiter_closed is None:\n waiter_closed = asyncio.Future()\n self.waiter_closed = waiter_closed\n\n if waiter_connected is None:\n waiter_connected = asyncio.Future()\n self.waiter_connected = waiter_connected", "def buildProtocol(self, addr):\r\n p = RobotWebSocketProtocol(self._realm)\r\n p.factory = self\r\n return p", "def create_event() -> abc.Event:\n return get_asynclib().Event()", "def buildProtocol(self, addr):\n\treactor.callLater(1, self.timesync)\n return KinectServer(self)", "def __init__(self, loop: AbstractEventLoop, response: bytes) -> None:\n super().__init__(loop, response, ResponseMessageType.CREATE_SCHEDULE)", "def async_io_factory(host=\"127.0.0.1\", port=Defaults.TLSPort, sslctx=None,\n server_hostname=None, framer=None, source_address=None,\n timeout=None, **kwargs):\n import asyncio\n from pymodbus.client.asynchronous.async_io import init_tls_client\n loop = kwargs.get(\"loop\") or asyncio.new_event_loop()\n proto_cls = kwargs.get(\"proto_cls\", None)\n if not loop.is_running():\n asyncio.set_event_loop(loop)\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n client = loop.run_until_complete(asyncio.gather(cor))[0]\n else:\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n future = asyncio.run_coroutine_threadsafe(cor, loop=loop)\n client = future.result()\n\n return loop, client", "def create(\n transport_handler: Callable[[bytes], Awaitable[bytes]],\n lcd: Dict[str, Any],\n) -> \"MessageProcessingModel[V2CEncodingResult, Any]\":\n return V2CMPM(transport_handler, lcd)", "def __init__(self, *, _initialized_with_create=False):\n assert _initialized_with_create, \"Please use DHTProtocol.create coroutine to spawn new protocol instances\"\n super().__init__()", "def __call__(self):\n \n p = self.protocol()\n p.factory = self\n return p", "def _build_protocol(self):\n self._protocol = SBE19Protocol(Prompt, NEWLINE, self._driver_event)", "async def init(self) -> None:", "async def init(self) -> None:", "async def start_aio(self):\n\n # pick the desired transport and then setup read and write to\n # point to the correct method for the transport\n\n # check if user specified a socket transport\n if self.ip_address:\n self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop)\n await self.socket.start()\n # set the read and write handles\n self.read = self.socket.read\n self.write = self.socket.write\n for i in range(0, len(self.ip_handshake)):\n await self.read()\n\n else:\n try:\n self.serial_port = PymataSerial(self.com_port, 57600,\n self.sleep_tune,\n self.log_output)\n\n # set the read and write handles\n self.read = self.serial_port.read\n self.write = self.serial_port.write\n\n except serial.SerialException:\n if self.log_output:\n log_string = 'Cannot instantiate serial interface: ' + \\\n self.com_port\n logging.exception(log_string)\n else:\n print(\n 'Cannot instantiate serial interface: ' + self.com_port)\n sys.exit(0)\n\n # wait for arduino to go through a reset cycle if need be\n time.sleep(self.arduino_wait)\n\n # register the get_command method with the event loop\n self.loop = asyncio.get_event_loop()\n self.the_task = self.loop.create_task(self._command_dispatcher())\n\n # get arduino firmware version and print it\n firmware_version = await self.get_firmware_version()\n if not firmware_version:\n if self.log_output:\n log_string = '*** Firmware Version retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Firmware Version retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.stop()\n loop.close()\n sys.exit(0)\n except RuntimeError:\n self.the_task.cancel()\n time.sleep(1)\n # this suppresses the Event Loop Is Running message,\n # which may be a bug in python 3.4.3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n if self.log_output:\n log_string = \"\\nArduino Firmware ID: \" + firmware_version\n logging.exception(log_string)\n else:\n print(\"\\nArduino Firmware ID: \" + firmware_version)\n\n # get an analog pin map\n asyncio.ensure_future(self.get_analog_map())\n\n # try to get an analog report. if it comes back as none - shutdown\n # report = await self.get_analog_map()\n report = await self.get_analog_map()\n if not report:\n if self.log_output:\n log_string = '*** Analog map retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Analog map retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.stop()\n loop.close()\n sys.exit(0)\n except RuntimeError:\n self.the_task.cancel()\n time.sleep(1)\n # this suppresses the Event Loop Is Running message,\n # which may be a bug in python 3.4.3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n\n # custom assemble the pin lists\n for pin in report:\n digital_data = PinData()\n self.digital_pins.append(digital_data)\n if pin != Constants.IGNORE:\n analog_data = PinData()\n self.analog_pins.append(analog_data)\n\n if self.log_output:\n log_string = 'Auto-discovery complete. Found ' + \\\n str(len(self.digital_pins)) + ' Digital Pins and ' + \\\n str(len(self.analog_pins)) + ' Analog Pins'\n logging.info(log_string)\n else:\n print('{} {} {} {} {}'.format('Auto-discovery complete. Found',\n len(self.digital_pins),\n 'Digital Pins and',\n len(self.analog_pins),\n 'Analog Pins\\n\\n'))", "async def generate(self, prompt: str):\n result_handle = await self.handle.generate.remote(prompt)\n return await result_handle", "def __new__(\n cls, platform: str, variant: Optional[str] = None, **kwargs: Dict[Any, Any]\n ) -> \"AsyncScrapli\":\n LOG.debug(\"Scrapli factory initialized\")\n\n if kwargs.get(\"transport\", \"system\") not in ASYNCIO_TRANSPORTS:\n raise ScrapliException(\"Use `Scrapli` if using a synchronous transport!\")\n\n if not isinstance(platform, str):\n raise ScrapliException(f\"Argument `platform` must be `str` got `{type(platform)}`\")\n\n final_driver, additional_kwargs = _get_driver(\n platform=platform, variant=variant, _async=True\n )\n\n # at this point will need to merge the additional kwargs in (for community drivers),\n # ensure that kwargs passed by user supersede the ones coming from community platform\n if additional_kwargs:\n final_kwargs = {**additional_kwargs, **kwargs}\n else:\n final_kwargs = kwargs\n\n # mypy was displeased about NetworkDriver not being callable, fix later probably :)\n final_conn: \"AsyncScrapli\" = final_driver(**final_kwargs) # type: ignore\n return final_conn", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_Create'))", "async def connect(self):\n\n self.rtm = await api_call('rtm.start') # Start the connection\n assert self.rtm['ok'], self.rtm['error']\n\n with aiohttp.ClientSession() as client:\n async with client.ws_connect(self.rtm[\"url\"]) as ws:\n async for msg in ws:\n assert msg.tp == aiohttp.MsgType.text\n message = json.loads(msg.data)\n asyncio.ensure_future(self.process(message))" ]
[ "0.59781486", "0.5941775", "0.5809846", "0.57470286", "0.5497206", "0.5259741", "0.5149718", "0.51136464", "0.50592905", "0.49969345", "0.49969345", "0.49862215", "0.49718344", "0.49435478", "0.49141228", "0.48773542", "0.48727253", "0.4815847", "0.4808103", "0.4757261", "0.47526616", "0.47313622", "0.4695995", "0.46715543", "0.46715543", "0.4666971", "0.46357018", "0.4623885", "0.46151152", "0.460713" ]
0.6974144
0
Creates a DSMR asyncio protocol coroutine using a RFXtrx serial port.
def create_rfxtrx_dsmr_reader(port, dsmr_version, telegram_callback, loop=None): protocol, serial_settings = create_rfxtrx_dsmr_protocol( dsmr_version, telegram_callback, loop=None) serial_settings['url'] = port conn = create_serial_connection(loop, protocol, **serial_settings) return conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfxtrx_tcp_dsmr_reader(host, port, dsmr_version,\n telegram_callback, loop=None,\n keep_alive_interval=None):\n if not loop:\n loop = asyncio.get_event_loop()\n protocol, _ = create_rfxtrx_dsmr_protocol(\n dsmr_version, telegram_callback, loop=loop,\n keep_alive_interval=keep_alive_interval)\n conn = loop.create_connection(protocol, host, port)\n return conn", "def create_rfxtrx_dsmr_protocol(dsmr_version, telegram_callback, loop=None, **kwargs):\n protocol = _create_dsmr_protocol(dsmr_version, telegram_callback,\n RFXtrxDSMRProtocol, loop, **kwargs)\n return protocol", "async def main():\r\n\r\n await rvr.wake()\r\n\r\n # Give RVR time to wake up\r\n await asyncio.sleep(2)\r\n\r\n if debug: print(\"Starting imu handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.imu,\r\n handler=imu_handler\r\n )\r\n if debug: print(\"Starting color handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.color_detection,\r\n handler=color_detected_handler\r\n )\r\n if debug: print(\"Starting accelerometer handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.accelerometer,\r\n handler=accelerometer_handler\r\n )\r\n if debug: print(\"Starting ambient light handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.ambient_light,\r\n handler=ambient_light_handler\r\n )\r\n if debug: print(\"Starting encoder handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.encoders,\r\n handler=encoder_handler\r\n )\r\n if debug: print(\"Starting sensor control\")\r\n\r\n #await rvr.sensor_control.start(interval=250)\r\n await rvr.sensor_control.start(interval=1000)\r\n\r\n if debug: print(\"Ros listener spinning up\")\r\n #await spin_ros()\r\n if debug: print(\"spin complete\")\r\n\r\n\r\n\r\n # The asyncio loop will run forever to allow infinite streaming.\r", "async def rfxtrx_dsmr_connection_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n with patch(\n \"homeassistant.components.dsmr.sensor.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.sensor.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "async def start_aio(self):\n\n # pick the desired transport and then setup read and write to\n # point to the correct method for the transport\n\n # check if user specified a socket transport\n if self.ip_address:\n self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop)\n await self.socket.start()\n # set the read and write handles\n self.read = self.socket.read\n self.write = self.socket.write\n for i in range(0, len(self.ip_handshake)):\n await self.read()\n\n else:\n try:\n self.serial_port = PymataSerial(self.com_port, 57600,\n self.sleep_tune,\n self.log_output)\n\n # set the read and write handles\n self.read = self.serial_port.read\n self.write = self.serial_port.write\n\n except serial.SerialException:\n if self.log_output:\n log_string = 'Cannot instantiate serial interface: ' + \\\n self.com_port\n logging.exception(log_string)\n else:\n print(\n 'Cannot instantiate serial interface: ' + self.com_port)\n sys.exit(0)\n\n # wait for arduino to go through a reset cycle if need be\n time.sleep(self.arduino_wait)\n\n # register the get_command method with the event loop\n self.loop = asyncio.get_event_loop()\n self.the_task = self.loop.create_task(self._command_dispatcher())\n\n # get arduino firmware version and print it\n firmware_version = await self.get_firmware_version()\n if not firmware_version:\n if self.log_output:\n log_string = '*** Firmware Version retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Firmware Version retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.stop()\n loop.close()\n sys.exit(0)\n except RuntimeError:\n self.the_task.cancel()\n time.sleep(1)\n # this suppresses the Event Loop Is Running message,\n # which may be a bug in python 3.4.3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n if self.log_output:\n log_string = \"\\nArduino Firmware ID: \" + firmware_version\n logging.exception(log_string)\n else:\n print(\"\\nArduino Firmware ID: \" + firmware_version)\n\n # get an analog pin map\n asyncio.ensure_future(self.get_analog_map())\n\n # try to get an analog report. if it comes back as none - shutdown\n # report = await self.get_analog_map()\n report = await self.get_analog_map()\n if not report:\n if self.log_output:\n log_string = '*** Analog map retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Analog map retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.stop()\n loop.close()\n sys.exit(0)\n except RuntimeError:\n self.the_task.cancel()\n time.sleep(1)\n # this suppresses the Event Loop Is Running message,\n # which may be a bug in python 3.4.3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n\n # custom assemble the pin lists\n for pin in report:\n digital_data = PinData()\n self.digital_pins.append(digital_data)\n if pin != Constants.IGNORE:\n analog_data = PinData()\n self.analog_pins.append(analog_data)\n\n if self.log_output:\n log_string = 'Auto-discovery complete. Found ' + \\\n str(len(self.digital_pins)) + ' Digital Pins and ' + \\\n str(len(self.analog_pins)) + ' Analog Pins'\n logging.info(log_string)\n else:\n print('{} {} {} {} {}'.format('Auto-discovery complete. Found',\n len(self.digital_pins),\n 'Digital Pins and',\n len(self.analog_pins),\n 'Analog Pins\\n\\n'))", "async def assign_serial_port(self, serial_manager):\n #logstring(\"you are now in assign_serial_port\")\n if serial_manager is None:\n return\n\n if serial_manager.CurrentPort is None:\n return\n\n self._clear_stored_response_dictionaries()\n\n self.serial_manager = serial_manager\n self.serial_port = serial_manager.CurrentPort\n #to remove, only used for testing right now\n self.serial_port_name = serial_manager.CurrentPort.com_port\n\n # set the read and write handles\n self.read = self.serial_manager.CurrentPort.read\n self.write = self.serial_manager.CurrentPort.write\n\n self._valid_target_exists = True\n\n\n #keeps a direct reference to the pins\n self._digital_pins_directly = []\n self._analog_pins_directly = []\n\n #and ports\n self._ports_directly = []\n\n # wait for arduino to go through a reset cycle if need be\n logstring(\"Waiting for 2 seconds...\")\n time.sleep(self.arduino_wait)\n #logstring(\"time is up!\")\n #await asyncio.sleep(self.arduino_wait)\n\n # register the get_command method with the event loop\n self.loop = asyncio.get_event_loop()\n\n logstring(\"Setting up Firmata on port {}\".format(self.serial_port.com_port))\n\n # get arduino firmware version and print it\n logstring(\"Checking Firmware version\")\n firmware_version = await self.get_firmware_version()\n logstring(\"Finished checking Firmware version\")\n if not firmware_version:\n logerr('*** Firmware Version retrieval timed out. ***')\n logerr('Firmata not found')\n try:\n # attempt to autoload firmata here, if fails again, mark the port as error\n self.disconnect_port_due_to_error()\n return\n except RuntimeError:\n self.disconnect_port_due_to_error()\n return\n except TypeError:\n self.disconnect_port_due_to_error()\n return\n logstring(\"\\nFirmware ID: \" + firmware_version)\n logstring(\"On port {}\".format(self.serial_port_name))\n # get an analog pin map\n\n # try to get an analog report. if it comes back as none - shutdown\n # report = await self.get_analog_map()\n logstring(\"Fetching analog mapping\")\n analogreport = await self.get_analog_map()\n #logstring(\"got analog map\")\n if not analogreport:\n logerr('*** Analog map retrieval timed out. ***')\n logerr('Analog Pin Mapping not found')\n try:\n # attempt to autoload firmata here, if fails again, mark the port as error\n self.disconnect_port_due_to_error()\n return\n except RuntimeError:\n self.disconnect_port_due_to_error()\n return\n except TypeError:\n self.disconnect_port_due_to_error()\n return\n\n capabilityreport = await self.get_capability_report()\n if not capabilityreport:\n logerr('*** Capability Report retrieval timed out. ***')\n logerr('Capability Report not found')\n try:\n # attempt to autoload firmata here, if fails again, mark the port as error\n self.disconnect_port_due_to_error()\n return\n except RuntimeError:\n self.disconnect_port_due_to_error()\n return\n except TypeError:\n self.disconnect_port_due_to_error()\n return\n # custom assemble the pin lists\n pininfo = iter(capabilityreport)\n\n\n self._nested_objects = []\n\n\n for i, analogpinmapping in enumerate(analogreport):\n #set up the data structure that captures data that comes from Firmata\n digital_data = PinData()\n self.digital_pins.append(digital_data)\n HasAnalog = False\n analog_data = PinData()\n self.analog_pins.append(analog_data)\n if analogpinmapping != Constants.IGNORE:\n self.analog_pins_analog_numbering.append(analog_data)\n HasAnalog = True\n #set up the data structure that captures data to be sent to Firmata\n port_num = math.floor(i/8)\n pin_num_within_port = i%8\n HasInput = False\n HasOutput = False\n HasPullup = False\n HasAnalog2 = False\n AnalogResolution = 0\n AnalogPinNum = 127\n HasPWM = False\n PWMResolution = 0\n HasI2C = False\n try:\n nextbyte = next(pininfo)\n while nextbyte != 127: #127 signals the end of the information for a pin\n resolutionbyte = next(pininfo)\n if nextbyte == Constants.INPUT:\n HasInput = True\n if nextbyte == Constants.OUTPUT:\n HasOutput = True\n if nextbyte == Constants.PULLUP:\n HasPullup = True\n if nextbyte == Constants.ANALOG:\n HasAnalog2 = True\n AnalogResolution = resolutionbyte\n AnalogPinNum = analogpinmapping\n if nextbyte == Constants.PWM:\n HasPWM = True,\n PWMResolution=14\n if nextbyte == Constants.SERVO:\n pass\n #nothing to do. we treat it like an OUTPUT\n #resolution is fixed...may do something with this\n #in the future if there are issues with some platform?\n if nextbyte == Constants.I2C:\n HasI2C = True\n nextbyte = next(pininfo)\n except StopIteration:\n pass\n\n if HasAnalog2 != HasAnalog:\n #this really shouldn't happen, but might as well catch it anyway\n raise Exception(\"The Analog Pin Map disagrees with the Capabilty Report as to whether pin {} is an analog pin\".format(i))\n\n #this sets the pin number 0-7 within each port\n if pin_num_within_port == 0: #Yay, new port, create it:\n current_port = Port(\"Port {}\".format(port_num),\n port_num)\n self._nested_objects.append(current_port)\n self._ports_directly.append(current_port)\n\n newpin = Pin(ID = \"Pin {} of Port {} hasanalog = {}\".format(pin_num_within_port,\n port_num, HasAnalog),\n PinNum = i, HasInput=HasInput,\n HasPullup=HasPullup, HasOutput=HasOutput,\n HasAnalog=HasAnalog, AnalogPinNum=analogpinmapping,\n AnalogResolution=AnalogResolution, HasPWM=HasPWM,\n PWMResolution=PWMResolution, HasI2C=HasI2C)\n current_port.pins.append(newpin)\n self._digital_pins_directly.append(newpin)\n logstring(\"Appending a new pin {} len {}\".format(newpin._ID, len(self._digital_pins_directly)))\n if HasAnalog:\n self._analog_pins_directly.append(newpin)\n\n\n\n logstring('Auto-discovery complete. Found ' + \\\n str(len(self.digital_pins)) + ' Digital Pins and ' + \\\n str(len(self.analog_pins_analog_numbering)) + ' Analog Pins')\n\n self._numpins = len(self.digital_pins)\n self._numports = math.ceil(self._numpins/8)\n\n\n self.KeepAlive = KeepAlive(\"Keep Alive\")\n self._nested_objects.append(self.KeepAlive)\n\n self.Tone = Tone(\"Tone\", self._numpins)\n self._nested_objects.append(self.Tone)\n\n #self.EncoderConfig = EncoderConfig(\"Encoder Config\", self._numpins)\n #self._nested_objects.append(self.EncoderConfig)", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_connection(\n protocol_factory, host=self.host, port=self.port)\n event_loop.run_until_complete(coro)", "def __init__(self, port, baud_rate):\n self.rfid_serial_port = serial.Serial(port, baud_rate)", "async def rfxtrx_dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "def __init__(self, com_port, rts_cts=True, baud_rate=None, *args, **kwargs):\n self.connection = serial.Serial()\n self.connection.port = com_port\n if baud_rate is None:\n baud_rate = Erika.DEFAULT_BAUD_RATE\n self.connection.baudrate = baud_rate\n self.connection.rtscts = rts_cts\n self.steps_per_char = 12", "async def dsmr_connection_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=DSMRProtocol)\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n with patch(\n \"homeassistant.components.dsmr.sensor.create_dsmr_reader\", connection_factory\n ), patch(\n \"homeassistant.components.dsmr.sensor.create_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "def __init__(self, port='/dev/ttyAMA0', baudrate=115200, status_callback=None):\n\n self.respose_regex = re.compile(\n '^\\\n\\\\$(?P<status>(OK)|(NK))( (?P<args>.*?))?(:(?P<seq>[0123456789ABCDEF]{2}))?\\\\^(?P<csum>[0123456789ABCDEF]{2})\\\n\\r$',\n re.IGNORECASE\n )\n\n self.new_status = None\n self.sync = False\n self.callback = status_callback\n self.s = serial.Serial(port=port, baudrate=baudrate, timeout=STANDARD_SERIAL_TIMEOUT)\n\n # thread loop related stuff\n self.sync_thread = None\n self.stop_thread = None\n self.write_allowed = None\n self.newline_available = None\n\n # The first call may be wrong because other characters may have been\n # written on the serial port before initializing this class\n # That's why there is this \"try\" and this second \"echo\"\n try:\n self.echo(False)\n except EvseError:\n self.echo(False)", "def start(self):\n\n # check if user specified a socket transport\n if self.ip_address:\n self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop)\n self.loop.run_until_complete((self.socket.start()))\n # set the read and write handles\n self.read = self.socket.read\n self.write = self.socket.write\n for i in range(0, len(self.ip_handshake)):\n self.loop.run_until_complete((self.read()))\n else:\n try:\n self.serial_port = PymataSerial(self.com_port, 57600,\n self.sleep_tune,\n self.log_output)\n # set the read and write handles\n self.read = self.serial_port.read\n self.write = self.serial_port.write\n except serial.SerialException:\n if self.log_output:\n log_string = 'Cannot instantiate serial interface: ' \\\n + self.com_port\n logging.exception(log_string)\n else:\n print(\n 'Cannot instantiate serial interface: ' + self.com_port)\n sys.exit(0)\n\n # wait for arduino to go through a reset cycle if need be\n time.sleep(self.arduino_wait)\n\n # register the get_command method with the event loop\n # self.loop = asyncio.get_event_loop()\n self.the_task = self.loop.create_task(self._command_dispatcher())\n\n # get arduino firmware version and print it\n asyncio.ensure_future(self.get_firmware_version())\n\n firmware_version = self.loop.run_until_complete(self.get_firmware_version())\n if self.log_output:\n log_string = \"\\nArduino Firmware ID: \" + firmware_version\n logging.exception(log_string)\n else:\n print(\"\\nArduino Firmware ID: \" + firmware_version)\n\n # get an analog pin map\n asyncio.ensure_future(self.get_analog_map())\n\n # try to get an analog report. if it comes back as none - shutdown\n report = self.loop.run_until_complete(self.get_analog_map())\n if not report:\n if self.log_output:\n log_string = '*** Analog map retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Analog map retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.close()\n loop.stop()\n sys.exit(0)\n except RuntimeError:\n # this suppresses the Event Loop Is Running message, which may\n # be a bug in python 3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n\n # custom assemble the pin lists\n for pin in report:\n digital_data = PinData()\n self.digital_pins.append(digital_data)\n if pin != Constants.IGNORE:\n analog_data = PinData()\n self.analog_pins.append(analog_data)\n\n if self.log_output:\n log_string = 'Auto-discovery complete. Found ' + \\\n str(len(self.digital_pins)) + ' Digital Pins and ' + \\\n str(len(self.analog_pins)) + ' Analog Pins'\n logging.info(log_string)\n else:\n print('{} {} {} {} {}'.format('Auto-discovery complete. Found',\n len(self.digital_pins),\n 'Digital Pins and',\n len(self.analog_pins),\n 'Analog Pins\\n\\n'))", "def run(self):\n self.read_from_serial()", "def __init__(self, serial_port='/dev/ttyACM0', baud_rate=9600,\n read_timeout=5):\n self.conn = serial.Serial(serial_port, baud_rate)\n self.conn.timeout = read_timeout # Timeout for readline()", "def connect(self):\n self.arduino = Serial(self.port, self.baud_rate, timeout=self.timeout)", "def kXR_attn_asyncrd(self, streamid=None, status=None, dlen=None, actnum=None,\n port=None, host=None, token=None):\n response_struct = get_struct('ServerResponseHeader') + \\\n get_struct('ServerResponseBody_Attn_asyncrd')\n if not host: host = ''\n else: host += (token if token else '')\n params = \\\n {'streamid': streamid if streamid else 0,\n 'status' : status if status else get_responseid('kXR_attn'),\n 'dlen' : dlen if dlen else len(host),\n 'actnum' : actnum if actnum else get_attncode('kXR_asyncrd'),\n 'port' : port if port else 0,\n 'host' : host}\n return self.mh.build_message(response_struct, params)", "def _createMaster(self):\n port = self.from_config['port']\n baudrate = self.from_config['baudrate']\n master_port = Serial(port, baudrate=baudrate)\n master = modbus_rtu.RtuMaster(master_port)\n master.set_verbose(True)\n master.set_timeout(10)#BRFIX\n assert master._serial.timeout == 10\n return master", "def open_serial(self):\n self.port = serial.Serial(\n self.device,\n baudrate=SERIAL_BAUD,\n timeout=5.0,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n xonxoff=False,\n rtscts=False,\n dsrdtr=False)\n\t\n self.port.flushInput()\n self.port.flushOutput()", "def run():\n o = CliOptions()\n try:\n o.parseOptions()\n except usage.UsageError, errortext:\n logging.error('%s %s' % (sys.argv[0], errortext))\n logging.info('Try %s --help for usage details' % sys.argv[0])\n raise SystemExit, 1\n\n baudrate = o.opts['baudrate']\n port = o.opts['port']\n logging.debug('About to open port %s' % port)\n s = SerialPort(Echo(), port, reactor, baudrate=baudrate)\n reactor.run()", "async def client(host, port, loop):\n try:\n reader, writer = await asyncio.open_connection(host, port, loop=loop)\n # send want game\n writer.write(b\"\\0\\0\")\n card_msg = await reader.readexactly(27)\n #print(card_msg)\n myscore = 0\n for card in card_msg[1:]:\n #print(card)\n writer.write(bytes([Command.PLAYCARD.value, card]))\n result = await reader.readexactly(2)\n if result[1] == Result.WIN.value:\n myscore += 1\n elif result[1] == Result.LOSE.value:\n myscore -= 1\n if myscore > 0:\n result = \"won\"\n elif myscore < 0:\n result = \"lost\"\n else:\n result = \"drew\"\n logging.debug(\"Game complete, I %s\", result)\n writer.close()\n return 1\n except ConnectionResetError:\n logging.error(\"ConnectionResetError\")\n return 0\n except asyncio.streams.IncompleteReadError:\n logging.error(\"asyncio.streams.IncompleteReadError\")\n return 0\n except OSError:\n logging.error(\"OSError\")\n return 0", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_server(protocol_factory, port=self.port)\n event_loop.run_until_complete(coro)", "def connect(self):\n self.x10g_rdma = RdmaUDP(self.local_ip, self.local_port,\n self.rdma_ip, self.rdma_port,\n 9000, 1, self.debug,\n unique_cmd_no)\n self.x10g_rdma.setDebug(self.debug)\n self.x10g_rdma.ack = False # True\n return self.x10g_rdma.error_OK", "def connect(self, mach) -> channel.Channel:\n self.console_uart = self.servo_get_tty()\n return mach.open_channel(\"picocom\", \"-q\", \"-b\", \"115200\",\n self.console_uart)", "def run(self) -> None:\n loop = switch_to_uvloop()\n\n with ThreadPoolExecutor(max_workers=1) as pipe_awaiter:\n async def _run():\n node = await DHTNode.create(\n initial_peers=list(self.initial_peers), listen_on=self.listen_on, parallel_rpc=self.parallel_rpc,\n num_workers=self.max_workers or 1, record_validator=self._record_validator,\n **self.kwargs)\n if node.port is not None:\n self._port.value = node.port\n self.ready.set()\n\n while True:\n method, args, kwargs = await loop.run_in_executor(pipe_awaiter, self._pipe.recv)\n asyncio.create_task(getattr(self, method)(node, *args, **kwargs))\n\n coro = _run()\n loop.run_until_complete(coro)", "def async_io_factory(host=\"127.0.0.1\", port=Defaults.TLSPort, sslctx=None,\n server_hostname=None, framer=None, source_address=None,\n timeout=None, **kwargs):\n import asyncio\n from pymodbus.client.asynchronous.async_io import init_tls_client\n loop = kwargs.get(\"loop\") or asyncio.new_event_loop()\n proto_cls = kwargs.get(\"proto_cls\", None)\n if not loop.is_running():\n asyncio.set_event_loop(loop)\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n client = loop.run_until_complete(asyncio.gather(cor))[0]\n else:\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n future = asyncio.run_coroutine_threadsafe(cor, loop=loop)\n client = future.result()\n\n return loop, client", "def __init__(self,port):\n\n \n self.instrument = serial.Serial(port, baudrate=115200, timeout= 0.002)\n print(\"Connected to power supply.\")\n self.async_query_buffer = [] #create an empty lis\n self.async_reply_buffer = []\n #self.identify()", "def create_serial_obj(portPath, baud_rate, tout):\n return serial.Serial(portPath, baud_rate, timeout = tout)", "def __init__(self, port='/dev/ttyUSB0', debug=False, dst=0x50):\n # Motor parameters\n self.__pos = -1\n self.__status = 0xFFFFFFFF\n self.__param_lock = threading.Lock()\n #\n self.__chan = 0x01 # Controller only has one channel, number 1.\n self.__debug = debug\n self.__src = 0x01 # 0x01 = PC Controller\n self.__dst = dst\n self.__ser = serial.Serial(port, 115200, timeout=0, rtscts=True)\n if not self.__ser.is_open:\n self.__ser.open()\n self.__thread = threading.Thread(target=self.__recv)\n self.__thread.setDaemon(True)\n self.__thread.start()", "def Start(self):\n self.CallClient(standard.ReadBuffer, next_state=\"WrongProcess\")" ]
[ "0.65169317", "0.61707854", "0.6049925", "0.59440464", "0.5872075", "0.55788165", "0.55407685", "0.55030054", "0.54878443", "0.54046285", "0.535371", "0.53511745", "0.5315994", "0.53064007", "0.52906173", "0.52837414", "0.5231402", "0.52189577", "0.5208599", "0.5160057", "0.5148855", "0.514542", "0.51368105", "0.5134158", "0.51046723", "0.5093312", "0.5079985", "0.50513476", "0.50432926", "0.5030448" ]
0.70299643
0
Creates a DSMR asyncio protocol coroutine using a RFXtrx TCP connection.
def create_rfxtrx_tcp_dsmr_reader(host, port, dsmr_version, telegram_callback, loop=None, keep_alive_interval=None): if not loop: loop = asyncio.get_event_loop() protocol, _ = create_rfxtrx_dsmr_protocol( dsmr_version, telegram_callback, loop=loop, keep_alive_interval=keep_alive_interval) conn = loop.create_connection(protocol, host, port) return conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfxtrx_dsmr_reader(port, dsmr_version, telegram_callback, loop=None):\n protocol, serial_settings = create_rfxtrx_dsmr_protocol(\n dsmr_version, telegram_callback, loop=None)\n serial_settings['url'] = port\n\n conn = create_serial_connection(loop, protocol, **serial_settings)\n return conn", "async def rfxtrx_dsmr_connection_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n with patch(\n \"homeassistant.components.dsmr.sensor.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.sensor.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_connection(\n protocol_factory, host=self.host, port=self.port)\n event_loop.run_until_complete(coro)", "def create_rfxtrx_dsmr_protocol(dsmr_version, telegram_callback, loop=None, **kwargs):\n protocol = _create_dsmr_protocol(dsmr_version, telegram_callback,\n RFXtrxDSMRProtocol, loop, **kwargs)\n return protocol", "async def form_input_tcp_connection_test(req, resp):\n logging.basicConfig(level=logging.DEBUG)\n tcp_endpoint = req.params['tcp-endpoint']\n tcp_port = req.params['tcp-port']\n loop = asyncio.get_running_loop()\n\n try:\n reader, writer = await asyncio.open_connection(host=tcp_endpoint, port=tcp_port)\n connection_info = f'Connection created to {tcp_endpoint} on port {tcp_port}' \n d = data.DinghyData(redis_host,\n domain_response_code=None,\n domain_response_time_ms=None,\n request_url=f'{tcp_endpoint}:{tcp_port}'\n )\n d.save_ping()\n resp.content = api.template(\n 'ping_response_tcp_conn.html',\n request=tcp_endpoint,\n port=tcp_port,\n connection_results = connection_info\n )\n except (asyncio.TimeoutError, ConnectionRefusedError):\n print(\"Network port not responding\")\n connection_info = f'Failed to connect to {tcp_endpoint} on port {tcp_port}' \n resp.status_code = api.status_codes.HTTP_402\n resp.content = api.template(\n 'ping_response_tcp_conn.html',\n request=tcp_endpoint,\n port=tcp_port,\n connection_results = connection_info\n )", "async def dsmr_connection_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=DSMRProtocol)\n\n async def connection_factory(*args, **kwargs):\n \"\"\"Return mocked out Asyncio classes.\"\"\"\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n with patch(\n \"homeassistant.components.dsmr.sensor.create_dsmr_reader\", connection_factory\n ), patch(\n \"homeassistant.components.dsmr.sensor.create_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "async def main():\r\n\r\n await rvr.wake()\r\n\r\n # Give RVR time to wake up\r\n await asyncio.sleep(2)\r\n\r\n if debug: print(\"Starting imu handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.imu,\r\n handler=imu_handler\r\n )\r\n if debug: print(\"Starting color handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.color_detection,\r\n handler=color_detected_handler\r\n )\r\n if debug: print(\"Starting accelerometer handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.accelerometer,\r\n handler=accelerometer_handler\r\n )\r\n if debug: print(\"Starting ambient light handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.ambient_light,\r\n handler=ambient_light_handler\r\n )\r\n if debug: print(\"Starting encoder handler\")\r\n await rvr.sensor_control.add_sensor_data_handler(\r\n service=RvrStreamingServices.encoders,\r\n handler=encoder_handler\r\n )\r\n if debug: print(\"Starting sensor control\")\r\n\r\n #await rvr.sensor_control.start(interval=250)\r\n await rvr.sensor_control.start(interval=1000)\r\n\r\n if debug: print(\"Ros listener spinning up\")\r\n #await spin_ros()\r\n if debug: print(\"spin complete\")\r\n\r\n\r\n\r\n # The asyncio loop will run forever to allow infinite streaming.\r", "async def _connect(self):\n if not self._reader:\n self._reader = asyncio.create_task(self._read())", "async def rfxtrx_dsmr_connection_send_validate_fixture(hass):\n\n transport = MagicMock(spec=asyncio.Transport)\n protocol = MagicMock(spec=RFXtrxDSMRProtocol)\n\n protocol.telegram = {\n EQUIPMENT_IDENTIFIER: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n EQUIPMENT_IDENTIFIER_GAS: CosemObject([{\"value\": \"123456789\", \"unit\": \"\"}]),\n P1_MESSAGE_TIMESTAMP: CosemObject([{\"value\": \"12345678\", \"unit\": \"\"}]),\n }\n\n async def connection_factory(*args, **kwargs):\n return (transport, protocol)\n\n connection_factory = MagicMock(wraps=connection_factory)\n\n async def wait_closed():\n if isinstance(connection_factory.call_args_list[0][0][2], str):\n # TCP\n telegram_callback = connection_factory.call_args_list[0][0][3]\n else:\n # Serial\n telegram_callback = connection_factory.call_args_list[0][0][2]\n\n telegram_callback(protocol.telegram)\n\n protocol.wait_closed = wait_closed\n\n with patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_dsmr_reader\",\n connection_factory,\n ), patch(\n \"homeassistant.components.dsmr.config_flow.create_rfxtrx_tcp_dsmr_reader\",\n connection_factory,\n ):\n yield (connection_factory, transport, protocol)", "async def _mk_http_connection(self) -> ClientSession:\n if self._ssl_context is not None:\n connector = TCPConnector(ssl=self._ssl_context)\n base_url = f'https://{self._netloc}/'\n else:\n connector = TCPConnector()\n base_url = f'http://{self._netloc}/'\n\n return ClientSession(base_url, connector=connector, timeout=ClientTimeout(self._socket_timeout))", "def async_io_factory(host=\"127.0.0.1\", port=Defaults.TLSPort, sslctx=None,\n server_hostname=None, framer=None, source_address=None,\n timeout=None, **kwargs):\n import asyncio\n from pymodbus.client.asynchronous.async_io import init_tls_client\n loop = kwargs.get(\"loop\") or asyncio.new_event_loop()\n proto_cls = kwargs.get(\"proto_cls\", None)\n if not loop.is_running():\n asyncio.set_event_loop(loop)\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n client = loop.run_until_complete(asyncio.gather(cor))[0]\n else:\n cor = init_tls_client(proto_cls, loop, host, port, sslctx, server_hostname,\n framer)\n future = asyncio.run_coroutine_threadsafe(cor, loop=loop)\n client = future.result()\n\n return loop, client", "def run(self) -> None:\n loop = switch_to_uvloop()\n\n with ThreadPoolExecutor(max_workers=1) as pipe_awaiter:\n async def _run():\n node = await DHTNode.create(\n initial_peers=list(self.initial_peers), listen_on=self.listen_on, parallel_rpc=self.parallel_rpc,\n num_workers=self.max_workers or 1, record_validator=self._record_validator,\n **self.kwargs)\n if node.port is not None:\n self._port.value = node.port\n self.ready.set()\n\n while True:\n method, args, kwargs = await loop.run_in_executor(pipe_awaiter, self._pipe.recv)\n asyncio.create_task(getattr(self, method)(node, *args, **kwargs))\n\n coro = _run()\n loop.run_until_complete(coro)", "async def connect(addr: Address,\n **kwargs\n ) -> 'Connection':\n reader, writer = await asyncio.open_connection(addr.host, addr.port,\n **kwargs)\n return Connection(reader, writer)", "async def connect(self) -> None:\n buffer = bytes()\n with trio.socket.socket() as client_sock:\n self.socket = client_sock\n self.address = await self.socket.resolve_remote_address((self.host, self.port))\n await client_sock.connect(self.address)\n async with trio.open_nursery() as nursery:\n nursery.spawn(self.connection_made)\n while True:\n if not self.socket._sock._closed:\n data = await client_sock.recv(self.bufsize)\n if not data:\n break\n buffer += data\n pts = buffer.split(b\"\\n\")\n buffer = pts.pop()\n for el in pts:\n nursery.spawn(self.data_received, el)\n else:\n break\n nursery.spawn(self.connection_lost)", "async def tickle_tcp(self, co):\n\n # SPIVY 'technically' just uses HTTP over a TCP socket and\n # does not have an 'Accept-Encoding' header.\n url = f\"{co.protocol}://{co.ip}:{co.port}/{self.uri_string}\"\n http_data = f\"POST {url} HTTP/1.1\\r\\n\" \\\n f\"Cookie: id={self.cookie_id}\\r\\n\" \\\n f\"Content-Length: {str(len(self.payload))}\\r\\n\" \\\n f\"\\r\\n\"\n\n http_request = http_data.encode('utf-8') + self.payload\n\n try:\n async with timeout(self.timeout):\n reader, writer = await asyncio.open_connection(co.ip, co.port)\n writer.write(http_request)\n await writer.drain()\n\n response = await reader.read(0x200)\n writer.close()\n await writer.wait_closed()\n\n if await self.eval_response(response):\n co.success = True\n\n except asyncio.TimeoutError:\n log.debug('Failure: TimeoutError: %s:%s:%s' %\n (co.protocol, co.ip, co.port))\n\n except ConnectionRefusedError:\n log.debug('Failure: ConnectionRefusedError: %s:%s:%s' %\n (co.protocol, co.ip, co.port))\n\n except Exception as e:\n log.debug('General failure: %s: %s:%s:%s' %\n (str(e), co.protocol, co.ip, co.port))\n\n return co", "async def async_connect_socket(streamer_obj: class_definition_and_manipulation.StreamerObj) -> None:\r\n reader, writer = await asyncio.open_connection(encryption_key.cfg_host,\r\n int(encryption_key.cfg_port))\r\n\r\n writer.write(f'CAP REQ :twitch.tv/membership twitch.tv/tags twitch.tv/commands\\r\\n'.encode('utf-8'))\r\n print(f\"Connecting to socket for {streamer_obj.name}\")\r\n\r\n writer.write(\"PASS {}\\r\\n\".format(encryption_key.decrypted_pass).encode('utf-8')) # password\r\n writer.write(\"NICK #zerg3rrbot\\r\\n\".encode('utf-8')) # bot name\r\n writer.write(f\"JOIN #{streamer_obj.name}\\r\\n\".encode('utf-8'))\r\n\r\n await writer.drain()\r\n streamer_obj.stream_socket_writer = writer\r\n streamer_obj.stream_socket_reader = reader", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_server(protocol_factory, port=self.port)\n event_loop.run_until_complete(coro)", "async def connect(self):\n\n self.rtm = await api_call('rtm.start') # Start the connection\n assert self.rtm['ok'], self.rtm['error']\n\n with aiohttp.ClientSession() as client:\n async with client.ws_connect(self.rtm[\"url\"]) as ws:\n async for msg in ws:\n assert msg.tp == aiohttp.MsgType.text\n message = json.loads(msg.data)\n asyncio.ensure_future(self.process(message))", "def __init__(self, shell=TerminalShell, stream=TelnetStream,\n encoding='utf-8', log=logging, force_binary=False,\n waiter_connected=None, waiter_closed=None):\n self.log = log\n self.force_binary = force_binary\n self._shell_factory = shell\n self._stream_factory = stream\n self._default_encoding = encoding\n self._loop = asyncio.get_event_loop()\n\n #: session environment as S.env['key'], defaults empty string value\n self._env = collections.defaultdict(str, **self.default_env)\n\n #: toggled when transport is shutting down\n self._closing = False\n\n #: datetime of last byte received\n self._last_received = None\n\n #: datetime of connection made\n self._connected = None\n\n #: future result stores value of gethostbyaddr(sever_ip)\n self._server_host = asyncio.Future()\n\n #: server_fqdn is result of socket.getfqdn() of server_host\n self._server_fqdn = asyncio.Future()\n\n #: values for properties ``server_ip`` and ``server_port``\n self._server_ip = None\n self._server_port = None\n\n #: waiter is a Future that completes when connection is closed.\n if waiter_closed is None:\n waiter_closed = asyncio.Future()\n self.waiter_closed = waiter_closed\n\n if waiter_connected is None:\n waiter_connected = asyncio.Future()\n self.waiter_connected = waiter_connected", "async def start_aio(self):\n\n # pick the desired transport and then setup read and write to\n # point to the correct method for the transport\n\n # check if user specified a socket transport\n if self.ip_address:\n self.socket = PymataSocket(self.ip_address, self.ip_port, self.loop)\n await self.socket.start()\n # set the read and write handles\n self.read = self.socket.read\n self.write = self.socket.write\n for i in range(0, len(self.ip_handshake)):\n await self.read()\n\n else:\n try:\n self.serial_port = PymataSerial(self.com_port, 57600,\n self.sleep_tune,\n self.log_output)\n\n # set the read and write handles\n self.read = self.serial_port.read\n self.write = self.serial_port.write\n\n except serial.SerialException:\n if self.log_output:\n log_string = 'Cannot instantiate serial interface: ' + \\\n self.com_port\n logging.exception(log_string)\n else:\n print(\n 'Cannot instantiate serial interface: ' + self.com_port)\n sys.exit(0)\n\n # wait for arduino to go through a reset cycle if need be\n time.sleep(self.arduino_wait)\n\n # register the get_command method with the event loop\n self.loop = asyncio.get_event_loop()\n self.the_task = self.loop.create_task(self._command_dispatcher())\n\n # get arduino firmware version and print it\n firmware_version = await self.get_firmware_version()\n if not firmware_version:\n if self.log_output:\n log_string = '*** Firmware Version retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Firmware Version retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.stop()\n loop.close()\n sys.exit(0)\n except RuntimeError:\n self.the_task.cancel()\n time.sleep(1)\n # this suppresses the Event Loop Is Running message,\n # which may be a bug in python 3.4.3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n if self.log_output:\n log_string = \"\\nArduino Firmware ID: \" + firmware_version\n logging.exception(log_string)\n else:\n print(\"\\nArduino Firmware ID: \" + firmware_version)\n\n # get an analog pin map\n asyncio.ensure_future(self.get_analog_map())\n\n # try to get an analog report. if it comes back as none - shutdown\n # report = await self.get_analog_map()\n report = await self.get_analog_map()\n if not report:\n if self.log_output:\n log_string = '*** Analog map retrieval timed out. ***'\n\n logging.exception(log_string)\n log_string = '\\nDo you have Arduino connectivity and do you ' \\\n 'have a Firmata sketch uploaded to the board?'\n logging.exception(log_string)\n\n else:\n print('*** Analog map retrieval timed out. ***')\n print('\\nDo you have Arduino connectivity and do you have a '\n 'Firmata sketch uploaded to the board?')\n try:\n loop = self.loop\n for t in asyncio.Task.all_tasks(loop):\n t.cancel()\n loop.run_until_complete(asyncio.sleep(.1))\n loop.stop()\n loop.close()\n sys.exit(0)\n except RuntimeError:\n self.the_task.cancel()\n time.sleep(1)\n # this suppresses the Event Loop Is Running message,\n # which may be a bug in python 3.4.3\n sys.exit(0)\n except TypeError:\n sys.exit(0)\n\n # custom assemble the pin lists\n for pin in report:\n digital_data = PinData()\n self.digital_pins.append(digital_data)\n if pin != Constants.IGNORE:\n analog_data = PinData()\n self.analog_pins.append(analog_data)\n\n if self.log_output:\n log_string = 'Auto-discovery complete. Found ' + \\\n str(len(self.digital_pins)) + ' Digital Pins and ' + \\\n str(len(self.analog_pins)) + ' Analog Pins'\n logging.info(log_string)\n else:\n print('{} {} {} {} {}'.format('Auto-discovery complete. Found',\n len(self.digital_pins),\n 'Digital Pins and',\n len(self.analog_pins),\n 'Analog Pins\\n\\n'))", "async def connection_factory(*args, **kwargs):\n return (transport, protocol)", "async def connection_factory(*args, **kwargs):\n return (transport, protocol)", "async def main():\n client = Client(url='opc.tcp://localhost:4840/freeopcua/server/')\n async with client:\n idx = await client.get_namespace_index(uri=\"http://examples.freeopcua.github.io\")\n var = await client.nodes.objects.get_child([f\"{idx}:MyObject\", f\"{idx}:MyVariable\"])\n handler = SubscriptionHandler()\n # We create a Client Subscription.\n subscription = await client.create_subscription(500, handler)\n nodes = [\n var,\n client.get_node(ua.ObjectIds.Server_ServerStatus_CurrentTime),\n ]\n # We subscribe to data changes for two nodes (variables).\n await subscription.subscribe_data_change(nodes)\n # We let the subscription run for ten seconds\n await asyncio.sleep(10)\n # We delete the subscription (this un-subscribes from the data changes of the two variables).\n # This is optional since closing the connection will also delete all subscriptions.\n await subscription.delete()\n # After one second we exit the Client context manager - this will close the connection.\n await asyncio.sleep(1)", "async def connect(self):\n self.client = await asyncio_redis.Connection.create(\n host=self.host,\n port=self.port,\n db=self.database,\n auto_reconnect=self.reconnect,\n password=self.password,\n )", "async def client(host, port, loop):\n try:\n reader, writer = await asyncio.open_connection(host, port, loop=loop)\n # send want game\n writer.write(b\"\\0\\0\")\n card_msg = await reader.readexactly(27)\n #print(card_msg)\n myscore = 0\n for card in card_msg[1:]:\n #print(card)\n writer.write(bytes([Command.PLAYCARD.value, card]))\n result = await reader.readexactly(2)\n if result[1] == Result.WIN.value:\n myscore += 1\n elif result[1] == Result.LOSE.value:\n myscore -= 1\n if myscore > 0:\n result = \"won\"\n elif myscore < 0:\n result = \"lost\"\n else:\n result = \"drew\"\n logging.debug(\"Game complete, I %s\", result)\n writer.close()\n return 1\n except ConnectionResetError:\n logging.error(\"ConnectionResetError\")\n return 0\n except asyncio.streams.IncompleteReadError:\n logging.error(\"asyncio.streams.IncompleteReadError\")\n return 0\n except OSError:\n logging.error(\"OSError\")\n return 0", "def run_coroutine(self, coro: Callable[[DHT, DHTNode], Awaitable[ReturnType]],\n return_future: bool = False) -> Union[ReturnType, MPFuture[ReturnType]]:\n future, _future = MPFuture.make_pair()\n self.pipe.send(('_run_coroutine', [], dict(coro=coro, future=_future)))\n return future if return_future else future.result()", "async def connectTelnet(self):\n\n # Display info message\n log.info(\"connectTelnet\")\n\n try:\n\n # Prepare connection with Telnet\n conn = asyncio.open_connection(self.ip, self.port)\n\n except Exception as error:\n\n # Preparation to the connection failed\n\n # Display error message\n log.error(f\"connectTelnet: preparation to the connection failed: '{error}'\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: preparation to the connection success\")\n\n try:\n\n # Connection with Telnet\n self._reader, self._writer = await asyncio.wait_for(\n conn, timeout=self.timeout\n )\n\n except asyncio.TimeoutError:\n\n # Time out during connection\n\n # Display error message\n log.error(\"connectTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: connection success\")\n\n # Get prompt for the login\n prompt = self._telnet_connect_login\n\n # Get prompt for the password\n prompt_password = self._telnet_connect_password\n\n # By default a login is expected\n use_login = True\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)\n while True:\n\n # Display info message\n log.info(f\"connectTelnet: read data for prompt\")\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout\n )\n\n # Display info message\n log.info(f\"connectTelnet: byte_data: {byte_data}\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"connectTelnet: output: {output}\")\n\n # Prompt for the username found?\n if prompt in output:\n\n # Yes\n\n # Leave the loop\n break\n\n # Prompt for the password found?\n elif prompt_password in output:\n\n # Yes\n\n # That means only password is required\n use_login = False\n\n # Leave the loop\n break\n\n # Display info message\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n\n # Login to use?\n if use_login:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: sending login\")\n\n try:\n\n # Send login\n await self.send_command(self.username, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: login sent\")\n\n except Exception:\n\n # Problem with the login\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: sending password\")\n\n try:\n # Send password\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n except Exception:\n\n # Problem with the password\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: password sent\")\n\n # Find prompt\n self.prompt = self.find_prompt(str(output))\n\n # Display info message\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n\n # Password enable?\n if self.enable_mode:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: enable mode to be activated\")\n\n try:\n\n # Send enable command\n await self.send_command(self.cmd_enable, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: enable command sent\")\n\n # Display info message\n log.info(\"connectTelnet: sending enable password\")\n\n # Send enable password\n await self.telnet_send_command_with_unexpected_pattern(\n self.enable_password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n # Display info message\n log.info(\"connectTelnet: enable password sent\")\n\n except Exception:\n\n # Problem with the enable password\n\n # Display info message\n log.info(\"connectTelnet: enable password failure\")\n\n # Propagate the exception\n raise\n\n # Disable paging command available?\n if self.cmd_disable_paging:\n\n # Yes\n\n # Disable paging\n await self.disable_paging()", "async def _open_connection(self) -> None:\n self.logger.info(\n f\"Connecting to gpsd at {self.connection_args['host']}\" +\n (f\":{self.connection_args['port']}\"\n if self.connection_args['port'] else ''))\n self.reader, self.writer = await asyncio.wait_for(\n asyncio.open_connection(**self.connection_args),\n self.connection_timeout)\n # Set socket options\n sock = self.writer.get_extra_info('socket')\n if sock is not None:\n if 'SO_KEEPALIVE' in self.alive_opts:\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n self.alive_opts['SO_KEEPALIVE'])\n if hasattr(\n sock,\n 'TCP_KEEPIDLE') and 'TCP_KEEPIDLE' in self.alive_opts:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE, # pylint: disable=E1101\n self.alive_opts['TCP_KEEPIDLE'])\n if hasattr(\n sock,\n 'TCP_KEEPINTVL') and 'TCP_KEEPINTVL' in self.alive_opts:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL, # pylint: disable=E1101\n self.alive_opts['TCP_KEEPINTVL'])\n if hasattr(\n sock,\n 'TCP_KEEPCNT') and 'TCP_KEEPCNT' in self.alive_opts:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n self.alive_opts['TCP_KEEPCNT'])", "async def connect(self):\n raise NotImplementedError", "async def new_coro():\n try:\n await coro\n except asyncio.CancelledError:\n pass" ]
[ "0.65162337", "0.62807643", "0.6247426", "0.5982679", "0.5889128", "0.5806483", "0.57464033", "0.5730266", "0.566644", "0.5604687", "0.5599714", "0.5582977", "0.5576975", "0.55476266", "0.5544629", "0.5533422", "0.5531221", "0.54850537", "0.54660755", "0.5438965", "0.5341547", "0.5341547", "0.53381765", "0.53092325", "0.5295972", "0.5272448", "0.5254683", "0.5253075", "0.5230893", "0.51898193" ]
0.72272205
0
takes two images tilted with respect to one another and tries to find overlap img1 (as numpy array) img2 (as numpy array) tiltdiff (in degrees) negative, img1 is more compressed (tilted) positive, img2 is more compressed (tilted) picks1, list of particles picks for image 1
def getTiltedCoordinates(img1, img2, tiltdiff, picks1=[], angsearch=True, inittiltaxis=-7.2, msg=True): t0 = time.time() #shrink images bin = 2 binned1 = apImage.binImg(img1, bin) binned2 = apImage.binImg(img2, bin) #apImage.arrayToJpeg(binned1, "binned1.jpg") #apImage.arrayToJpeg(binned2, "binned2.jpg") filt1 = apImage.highPassFilter(binned1, apix=1.0, radius=20.0, localbin=4/bin) filt2 = apImage.highPassFilter(binned2, apix=1.0, radius=20.0, localbin=4/bin) #apImage.arrayToJpeg(filt1, "filt1.jpg") #apImage.arrayToJpeg(filt2, "filt2.jpg") if angsearch is True: bestsnr = 0 bestangle = None ### rough refine #for angle in [-6, -4, -2,]: # sys.stderr.write(".") # shift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False) # if snr > bestsnr: # bestsnr = snr # bestangle = angle bestangle = inittiltaxis if msg is True: apDisplay.printMsg("Best tilt axis angle= %.1f; SNR=%.2f"%(bestangle,bestsnr)) ### finer refine for angle in [bestangle-1, bestangle-0.5, bestangle+0.5, bestangle+1]: if msg is True: sys.stderr.write(".") shift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False) if snr > bestsnr: bestsnr = snr bestangle = angle if msg is True: apDisplay.printMsg("Best tilt axis angle= %.1f; SNR=%.2f"%(bestangle,bestsnr)) ### really fine refine for angle in [bestangle-0.2, bestangle-0.1, bestangle+0.1, bestangle+0.2]: if msg is True: sys.stderr.write(".") shift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False) if snr > bestsnr: bestsnr = snr bestangle = angle if msg is True: apDisplay.printMsg("Best tilt axis angle= %.1f; SNR=%.2f"%(bestangle,bestsnr)) shift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, bestangle, bin, msg=msg) if msg is True: apDisplay.printMsg("Best tilt axis angle= %.1f; SNR=%.2f"%(bestangle,bestsnr)) else: bestangle = 0.0 shift, xfactor, snr = getTiltedRotateShift(img1, img2, tiltdiff, bestangle, bin) if msg and min(abs(shift)) < min(img1.shape)/16.0: apDisplay.printWarning("Overlap was too close to the edge and possibly wrong.") ### case 1: find tilted center of first image center = numpy.asarray(img1.shape)/2.0 newpoint = translatePoint(center, center, shift, bestangle, xfactor) #print "newpoint=", newpoint halfsh = (center + newpoint)/2.0 origin = halfsh ### case 2: using a list of picks if len(picks1) > 1: #get center most pick dmin = origin[0]/2.0 for pick in picks1: da = numpy.hypot(pick[0]-halfsh[0], pick[1]-halfsh[1]) if da < dmin: dmin = da origin = pick # origin is pick from image 1 # newpart is pick from image 2 newpart = translatePoint(origin, center, shift, bestangle, xfactor) newpart2 = numpy.array([(origin[0]*xfactor-shift[0])*xfactor, origin[1]-shift[1]]) if msg is True: apDisplay.printMsg("origin=(%d,%d); newpart=(%.1f,%.1f); newpart2=(%.1f,%.1f)" %(origin[0],origin[1], newpart[0],newpart[1], newpart2[0],newpart2[1],)) apDisplay.printMsg("completed in "+apDisplay.timeString(time.time()-t0)) return origin, newpart, snr, bestangle ### check to make sure points are not off the edge while newpart[0] < 10: newpart += numpy.asarray((20,0)) origin += numpy.asarray((20,0)) while newpart[1] < 10: newpart += numpy.asarray((0,20)) origin += numpy.asarray((0,20)) while newpart[0] > img1.shape[0]-10: newpart -= numpy.asarray((20,0)) origin -= numpy.asarray((20,0)) while newpart[1] > img1.shape[1]-10: newpart -= numpy.asarray((0,20)) origin -= numpy.asarray((0,20)) return origin, newpart
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTiltedRotateShift(img1, img2, tiltdiff, angle=0, bin=1, msg=True):\n\n\t### untilt images by stretching and compressing\n\t# choose angle s/t compressFactor = 1/stretchFactor\n\t# this only works if one image is untilted (RCT) of both images are opposite tilt (OTR)\n\t#halftilt = abs(tiltdiff)/2.0\n\thalftiltrad = math.acos(math.sqrt(math.cos(abs(tiltdiff)/180.0*math.pi)))\n\t# go from zero tilt to half tilt\n\tcompressFactor = math.cos(halftiltrad)\n\t# go from max tilt to half tilt\n\tstretchFactor = math.cos(halftiltrad) / math.cos(abs(tiltdiff)/180.0*math.pi)\n\tif tiltdiff > 0:\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"compress image 1\")\n\t\tuntilt1 = transformImage(img1, compressFactor, angle)\n\t\tuntilt2 = transformImage(img2, stretchFactor, angle)\n\t\txfactor = compressFactor\n\telse:\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"stretch image 1\")\n\t\tuntilt1 = transformImage(img1, stretchFactor, angle)\n\t\tuntilt2 = transformImage(img2, compressFactor, angle)\n\t\txfactor = stretchFactor\n\n\t### filtering was done earlier\n\tfilt1 = untilt1\n\tfilt2 = untilt2\n\n\tif filt1.shape != filt2.shape:\n\t\tnewshape = ( max(filt1.shape[0],filt2.shape[0]), max(filt1.shape[1],filt2.shape[1]) )\n\t\tapDisplay.printMsg(\"Resizing images to: \"+str(newshape))\n\t\tfilt1 = apImage.frame_constant(filt1, newshape, filt1.mean())\n\t\tfilt2 = apImage.frame_constant(filt2, newshape, filt2.mean())\n\n\t### cross-correlate\n\tcc = correlator.cross_correlate(filt1, filt2, pad=True)\n\trad = min(cc.shape)/20.0\n\tcc = apImage.highPassFilter(cc, radius=rad)\n\tcc = apImage.normRange(cc)\n\tcc = blackEdges(cc)\n\tcc = apImage.normRange(cc)\n\tcc = blackEdges(cc)\n\tcc = apImage.normRange(cc)\n\tcc = apImage.lowPassFilter(cc, radius=10.0)\n\n\t#find peak\n\tpeakdict = peakfinder.findSubpixelPeak(cc, lpf=0)\n\t#import pprint\n\t#pprint.pprint(peak)\n\tpixpeak = peakdict['subpixel peak']\n\tif msg is True:\n\t\tapDisplay.printMsg(\"Pixel peak: \"+str(pixpeak))\n\t\tapImage.arrayToJpegPlusPeak(cc, \"guess-cross-ang\"+str(abs(angle))+\".jpg\", pixpeak)\n\n\trawpeak = numpy.array([pixpeak[1], pixpeak[0]]) #swap coord\n\tshift = numpy.asarray(correlator.wrap_coord(rawpeak, cc.shape))*bin\n\n\tif msg is True:\n\t\tapDisplay.printMsg(\"Found xy-shift btw two images\"\n\t\t\t+\";\\n\\t SNR= \"+str(round(peakdict['snr'],2))\n\t\t\t+\";\\n\\t halftilt= \"+str(round(halftiltrad*180/math.pi, 3))\n\t\t\t+\";\\n\\t compressFactor= \"+str(round(compressFactor, 3))\n\t\t\t+\";\\n\\t stretchFactor= \"+str(round(stretchFactor, 3))\n\t\t\t+\";\\n\\t xFactor= \"+str(round(xfactor, 3))\n\t\t\t+\";\\n\\t rawpeak= \"+str(numpy.around(rawpeak*bin, 1))\n\t\t\t+\";\\n\\t shift= \"+str(numpy.around(shift, 1))\n\t\t)\n\n\treturn shift, xfactor, peakdict['snr']", "def stitch(KPS1, KPS2, H1, H2, match): #---- stich image to previous one\r\n #--- projection image1 from plane to cylindrical ---\r\n total = np.minimum(match.shape[0],100); # total pairing number\r\n bin1 = match[0:total,0].astype(int); # feature no at image 1\r\n R1 = KPS1.keyz[bin1, 0]; # keypoint Y at image 1\r\n C1 = KPS1.keyz[bin1, 1]; # keypoint X at image 1\r\n V1, U1 = pano_tools.project_p2c_points(R1, C1, H1);\r\n #--- image 2 ---\r\n bin2 = match[0:total,1].astype(int); # feature no at image 2\r\n R2 = KPS2.keyz[bin2, 0]; # keypoint Y at image 2\r\n C2 = KPS2.keyz[bin2, 1]; # keypoint X at image 2\r\n Rc2 = H2[0]/2; Rp2= R2 - Rc2; \r\n Cc2 = H2[1]/2; Cp2= C2 - Cc2;\r\n #--- --- \r\n # {phi1,S1,TU1,TV1} = M*M matrix: which is derived by chosen 2 pairs \r\n # {phi0,S0,TU0,TV0} = scalar: which is initial guess by removing outlier\r\n # \r\n phi1,S1,TU1,TV1= pano_tools.derive_p2c_formula(U1,V1,Cp2,Rp2);\r\n seq,phi0,S0,TU0,TV0 = pano_tools.remove_ill_matched_pair(phi1,S1,TU1,TV1); \r\n #--- linear regression [not necessary] ---\r\n # U1X = U1[seq]; C2X = C2[seq]; V1X = V1[seq]; R2X = R2[seq]; \r\n # phi0,S0,TU0,TV0,Err= pano_tools.linear_regression(V1X,U1X,R2X,C2X, phi0,S0,TU0,TV0,H2)\r\n H2[3]= phi0; H2[4]= S0; H2[5]= TV0; H2[6]= TU0;", "def problem2():\n \n pts_array, feats_array = p2.load_pts_features('data/pts_feats.npz')\n\n # points and features for image1 and image2\n pts1, pts2 = pts_array\n fts1, fts2 = feats_array\n\n # Loading images\n img1 = Image.open('data/img1.png')\n img2 = Image.open('data/img2.png')\n\n im1 = np.array(img1)\n im2 = np.array(img2)\n\n plt.figure(1)\n plt.subplot(1, 2, 1)\n plt.imshow(im1)\n plt.plot(pts1[:, 0], pts1[:, 1], 'ro', markersize=1.3)\n plt.subplot(1, 2, 2)\n plt.imshow(im2)\n plt.plot(pts2[:, 0], pts2[:, 1], 'ro', markersize=1.3)\n\n # display algined image\n H, ix1, ix2 = p2.final_homography(pts1, pts2, feats_array[0],\n feats_array[1])\n\n pts1 = pts1[ix1]\n pts2 = pts2[ix2]\n\n plt.figure(2)\n plt.subplot(1, 3, 1).set_title('Image 1')\n plt.imshow(im1)\n plt.plot(pts1[:, 0],\n pts1[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 2).set_title('Image 2')\n plt.imshow(im2)\n plt.plot(pts2[:, 0],\n pts2[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 3).set_title('Algined image 1')\n\n H_inv = np.linalg.inv(H)\n H_inv /= H_inv[2, 2]\n im3 = img1.transform(size=(im1.shape[1], im1.shape[0]),\n method=Image.PERSPECTIVE,\n data=H_inv.ravel(),\n resample=Image.BICUBIC)\n\n plt.show()", "def match(image1,image2,threshold,useRansac=False,t_orientation=30,t_scale=0.5):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n #\r\n # REPLACE THIS CODE WITH YOUR SOLUTION (ASSIGNMENT 5, QUESTION 3)\r\n #\r\n # Generate five random matches (for testing purposes)\r\n # matched_pairs = []\r\n # num = 5\r\n # for i in range(num):\r\n # matched_pairs.append([keypoints1[i],keypoints2[i]])\r\n # return DisplayMatches(im1, im2, matched_pairs)\r\n\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n\r\n #q3\r\n matched_pairs = []\r\n between_angles = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))\r\n for i, row in enumerate(between_angles):\r\n \tratio = sorted(row)[0] / sorted(row)[1]\r\n \tif ratio <= threshold:\r\n\t \tmatched_pairs.append([keypoints1[i], keypoints2[np.where(row == sorted(row)[0])[0][0]]])\r\n # print(matched_pairs)\r\n if useRansac is False:\r\n return DisplayMatches(im1, im2, matched_pairs)\r\n\t# \r\n\r\n #q4\r\n repetition = 10\r\n subsets = [[]] * repetition\r\n for i in range(repetition):\r\n r = random.randint(0, len(matched_pairs))\r\n for match in matched_pairs:\r\n ds1, ds2 = matched_pairs[r][1][2]/matched_pairs[r][0][2], match[1][2]/match[0][2]\r\n do1, do2 = (matched_pairs[r][1][3]-matched_pairs[r][0][3]), (match[1][3]-match[0][3])\r\n if abs(ds2 - ds1) <= t_scale * ds1 and abs(do2 - do1) % (2 * math.pi) <= t_orientation:\r\n subsets[i].append(match)\r\n\r\n max_i, max_len = 0, subsets[0]\r\n for i in range(10):\r\n l = len(subsets[i])\r\n if l > max_len:\r\n max_len = l\r\n max_i = i\r\n\r\n im3 = DisplayMatches(im1, im2, subsets[max_i])\r\n return im3", "def registration(im1, im2, num = 10, opt = 'py', outputPath = 'None'):\n\n # determin which one is the right side of the breast\n b_size = 5\n n_row, n_col = im1.shape\n side = 0\n if np.sum(im1[0:b_size,0:b_size]) < np.sum(im1[0:b_size,n_col-b_size:n_col]):\n side = 1 \n\n # flip the right side image\n if side == 1:\n im1 = np.fliplr(im1)\n else:\n im2 = np.fliplr(im2) \n\n # find edges of both images\n edge1 = findEdge(im1)\n edge2 = findEdge(im2)\n\n # tune edges of both side\n edge1 = tuneEdge(edge1,im1.shape)\n edge2 = tuneEdge(edge2,im2.shape)\n\n # samping from both side\n points1 = contour_sampling(edge1, num)\n points2 = contour_sampling(edge2, num)\n\n # for debugging .........................\n sam_im1 = np.zeros(im1.shape,np.float32)\n for point in points1:\n sam_im1[point[0],point[1]] = 1\n\n sam_im2 = np.zeros(im2.shape,np.float32)\n for point in points2:\n sam_im2[point[0],point[1]] = 1\n \n selem = disk(15)\n dilated1 = ndimage.convolve(sam_im1, selem, mode='constant', cval=0)\n dilated2 = ndimage.convolve(sam_im2, selem, mode='constant', cval=0)\n\n points1 = np.asarray(points1)\n points2 = np.asarray(points2)\n \n # Thin Plate Spline interpolation\n dst = np.zeros(im1.shape)\n # im1 as source\n if opt == 'py': \n tps = TPSpline.TPSpline()\n tps.setCorrespondences(points1, points2)\n dst = tps.warpImage(im1)\n return dst\n\n if opt == 'c':\n print \"Please run the interpolation with C++ exe file!\"\n print \"./TPSpline /home/yanbin/Tomosynthesis/libs/TPSpline/test/ps.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/pd.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/5016_test.tif /home/yanbin/Tomosynthesis/libs/TPSpline/test/dst.tif\"\n np.savetxt(outputPath + 'ps.txt', points1, '%d', delimiter=' ') # X is an array\n np.savetxt(outputPath + 'pd.txt', points2, '%d', delimiter=' ') # X is an array\n tiffLib.imsave(outputPath + 'im1.tif',im1)\n return None", "def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources", "def viz2(img1, interest_points1, img2, interest_points2, matches, PATCH_SIZE, threshold, min_sigma, max_sigma, num_sigma):\n \n\n\tfig = plt.figure(figsize=(10,5))\n\tax1 = fig.add_subplot(121)\n\tax2 = fig.add_subplot(122)\n\n #adding the two images to axes \n\tax1.imshow(img1, cmap='gray')\n\tax2.imshow(img2, cmap='gray')\n\n\tpositionimg1 = ax1.get_position()\n\tnew_pos = [positionimg1.x0+0.09, positionimg1.y0+0.025, \\\n\t\tpositionimg1.width / 1.1, positionimg1.height / 1.1] \n\tax1.set_position(new_pos)\n\n\tx1 = [a[1] for a in interest_points1] #blob detection x axis\n\ty1 = [a[0] for a in interest_points1] #blob detection y axis\n\ts1 = [a[2] for a in interest_points1] #blob detected at sigma \n \n\tx2 = [a[1] for a in interest_points2] #blob detection x axis\n\ty2 = [a[0] for a in interest_points2] #blob detection y axis\n\ts2 = [a[2] for a in interest_points2] #blob detected at sigma \n \n\tdifferences = [a[2] for a in matches]\n\n\n\tweighted_differences = normalize(differences)\n\n #iterating through the input list of matches\n\tfor coordinates, difference in zip(matches, weighted_differences):\n\t\tcord_a = (coordinates[0][1], coordinates[0][0]) #extracting coordinates for interest point in img1\n\t\tcord_b = (coordinates[1][1], coordinates[1][0]) #extracting coordinates for interest point in img2\n\t\tif difference <=0.33:\n\t\t\tcolor = \"green\"\n\t\telif difference > 0.33 and difference <= 0.66:\n\t\t\tcolor = \"yellow\"\n\t\telse:\n\t\t\tcolor = \"red\"\n\n\t#defining the path from cord_a to cord_b\n\t\tcon = ConnectionPatch(xyA=cord_a, xyB=cord_b, coordsA=\"data\", coordsB=\"data\",\n\t\t\t\t\t\t\t axesA=ax2, axesB=ax1, color=color) #arrowstyle='->')\n\t#adding line to axes2 \n\t\tax2.add_artist(con)\n\n #showing the image // can be changed to saving the image locally \n\tfor x, y, s in zip(x1, y1, s1):\n\t\tax1.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img1\n\tfor x, y, s in zip(x2, y2, s2):\n\t\tax2.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img2\n\tax1.axis('off')\n\tax2.axis('off')\n\ttitle = 'Patch Size=' + str(PATCH_SIZE) + ', Threshold=' + str(threshold) + ', min sigma=' + \\\n\tstr(min_sigma) + ', max sigma=' + str(max_sigma) + ', num sigma=' + str(num_sigma)\n\tplt.title(title, x=+0.1)\n\t#plt.show()\n\tplt.savefig(title+'.png')\n\n\n\treturn", "def match3(img1, img2, coordinates1, coordinates2, PATCH_SIZE, threshold=0.7):\n\n\t#creating patches for all points from img1 and img2\n\tcoord1_patches = [make_patch(coordinate, PATCH_SIZE, img1) for coordinate in coordinates1]\n\tcoord2_patches = [make_patch(coordinate, PATCH_SIZE, img2) for coordinate in coordinates2]\n\n\t# creating a matrix with dissimilarity measures for all pairs\n\tall_matches = np.zeros((len(coordinates1), len(coordinates2)))\n\n\tfor (x, y), _ in np.ndenumerate(all_matches):\n\t\tall_matches[x,y] = count_difference(coord1_patches[x], coord2_patches[y])\n\n\t#looking for best left-to-right and right-to-left matches\n\tmatches = []\n\t#left-to-right\n\tfor i, coord1 in enumerate(coordinates1):\n\t\tbest_ltr_match = np.argmin(all_matches[i, :]) #best left-to-right match for coord1\n\t\tbest_rtl_match = np.argmin(all_matches[:, best_ltr_match]) #best match for a best match\n\t\tif (i == best_rtl_match): #hurray, there is a super match\n\n\t\t\tmatches.append([coord1, coordinates2[best_ltr_match], all_matches[i, best_ltr_match]])\n\t\n\treturn matches", "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def retrieve_overlap(img1, img2, lbl1=1, lbl2=1):\n xlen, ylen, zlen = img1.GetSize()\n\n # Make sure that our images are equal in size to prevent weird invisible bugs\n xlen2, ylen2, zlen2 = img2.GetSize()\n assert xlen == xlen2 and ylen == ylen2 and zlen == zlen2\n\n # Copy our image as to not alter the original data\n new_image = img1[:, :, :]\n for z in xrange(zlen):\n for y in xrange(ylen):\n for x in xrange(xlen):\n # Set any bit with overlap to 1, else set it to 0\n overlap = img1.GetPixel(x, y, z) == lbl1 and img2.GetPixel(x, y, z) == lbl2\n if overlap:\n new_image.SetPixel(x, y, z, 1)\n else:\n new_image.SetPixel(x, y, z, 0)\n return new_image", "def pyrBlend(img_1: np.ndarray, img_2: np.ndarray, mask: np.ndarray, levels: int) -> (np.ndarray, np.ndarray):\r\n img_1_lap = laplaceianReduce(img_1, levels)\r\n img_2_lap = laplaceianReduce(img_2, levels)\r\n mask_gauss = gaussianPyr(mask)\r\n\r\n merge = (img_1_lap[levels - 1] * mask_gauss[levels - 1]) + ((1 - mask_gauss[levels - 1]) * img_2_lap[levels - 1])\r\n gaussian = gaussianKer(5)\r\n for i in range(levels - 2, -1, -1):\r\n merge = gaussExpand(merge, gaussian)\r\n merge = merge + (img_1_lap[i] * mask_gauss[i]) + ((1 - mask_gauss[i]) * img_2_lap[i])\r\n\r\n img_1 = cropPic(img_1, levels)\r\n img_2 = cropPic(img_2, levels)\r\n naive = (img_1 * mask_gauss[0]) + ((1 - mask_gauss[0]) * img_2)\r\n\r\n\r\n return naive, merge", "def vimage(cat1, cat2, dmax, psize, fwhm):\n\n NHALF = int(dmax/psize)\n NSIDE = 2*NHALF+1\n mshift = (NHALF+0.5)*psize\n img = np.zeros((NSIDE,NSIDE))\n x2s, y2s = cat2[:,0], cat2[:,1]\n for x1, y1 in cat1:\n ok = (x2s > x1-mshift) & (x2s < x1+mshift) & \\\n (y2s > y1-mshift) & (y2s < y1+mshift)\n for x2, y2 in cat2[ok]:\n ix = NHALF+int(round((x2-x1)/psize))\n iy = NHALF+int(round((y2-y1)/psize))\n img[iy,ix] += 1\n\n # smooth image\n img = gaussian_filter(img,fwhm/psize/2.3548,mode='constant')\n\n # identify maximum pixel\n ind = np.arange(NSIDE)\n ix, iy = np.meshgrid(ind, ind)\n peak = img == img.max()\n #if len(ix[peak]) > 1:\n # raise Exception(\"Found more than one maximum pixel\")\n\n # now have first approximation to the shift\n ixp = ix[peak][0]\n iyp = iy[peak][0]\n xp = psize*(ixp-NHALF)\n yp = psize*(iyp-NHALF)\n if ixp == 0 or ixp == NSIDE-1 or iyp == 0 or iyp == NSIDE-1:\n # max pixel at edge of array. Just return pixel position\n # as \"refined\" position\n xr = xp\n yr = yp\n\n else:\n # Make a quadratic approx to refine the peak position.\n # Estimate first and second partial derivatives from\n # 3x3 pixels centred on peak\n fx = (img[iyp,ixp+1] - img[iyp,ixp-1])/2.\n fy = (img[iyp+1,ixp] - img[iyp-1,ixp])/2.\n fxx = img[iyp,ixp-1] + img[iyp,ixp+1] - 2*img[iyp,ixp]\n fyy = img[iyp-1,ixp] + img[iyp+1,ixp] - 2*img[iyp,ixp]\n fxy = (img[iyp+1,ixp+1] + img[iyp-1,ixp-1] -\n img[iyp+1,ixp-1] - img[iyp-1,ixp+1])/4.\n b = np.array((fx,fy)).T\n A = np.array(((fxx,fxy),(fxy,fyy)))\n x = solve(A,b)\n xr = xp - psize*x[0]\n yr = yp - psize*x[1]\n return (img, xp,yp,xr,yr)", "def transform_images(img1,img2):", "def find_matching_points(img1, img2, max_pix_movement=50, normalize=True, show=False):\n\n # Initiate ORB detector\n orb = cv2.ORB_create()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1, None)\n kp2, des2 = orb.detectAndCompute(img2, None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1,des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n # Draw first 10 matches.\n if show:\n img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:500], None,flags=2)\n plt.imshow(img3),plt.show()\n # Get the matching keypoints for each of the images\n\n list_kp1 = []\n list_kp2 = []\n for mat in matches:\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n list_kp1.append(kp1[img1_idx].pt)\n list_kp2.append(kp2[img2_idx].pt)\n\n n_kp1, n_kp2 = np.float32(list_kp1), np.float32(list_kp2)\n n_kp1 /= np.asarray([img1.shape[1], img1.shape[0]], np.float32)\n n_kp2 /= np.asarray([img2.shape[1], img2.shape[0]], np.float32)\n n_kp1 = n_kp1 * 2. - 1.\n n_kp2 = n_kp2 * 2. - 1.\n\n return np.int32(list_kp1), np.int32(list_kp2), n_kp1, n_kp2", "def blending_example1():\n pic_desert = read_image(relpath(\"./externals/pic_desert.jpg\"), 2)\n pic_pool = read_image(relpath(\"./externals/pic_swim.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_desert.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n print(pic_desert.shape[2])\n [R1, G1, B1] = np.dsplit(pic_desert, pic_desert.shape[2])\n [R2, G2, B2] = np.dsplit(pic_pool, pic_pool.shape[2])\n R1 = np.reshape(R1, (512,1024))\n R2 = np.reshape(R2, (512,1024))\n G1 = np.reshape(G1, (512,1024))\n G2 = np.reshape(G2, (512,1024))\n B1 = np.reshape(B1, (512,1024))\n B2 = np.reshape(B2, (512,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_desert)\n ax2.imshow(pic_pool)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_desert, pic_pool, mask, new_pic", "def pair_images():\n # TODO: maybe implement some way to skip frames if queue is too long\n queue_a = xy_imgs\n queue_b = z_imgs\n if len(queue_a) == 0 or len(queue_b) == 0:\n return\n a_prev = None\n b_prev = None\n a = queue_a[0]\n b = queue_b[0]\n if a.ts < b.ts:\n while a.ts < b.ts:\n a_prev = queue_a.popleft()\n if len(queue_a) == 0:\n if b.within_threshold(a_prev):\n yield process_images(a_prev, b)\n return\n a = queue_a[0]\n closest_a = b.closest_to(a, a_prev)\n if closest_a is not None:\n yield process_images(closest_a, b)\n else:\n while b.ts < a.ts:\n b_prev = queue_b.popleft()\n if len(queue_b) == 0:\n if a.within_threshold(b_prev):\n yield process_images(a, b_prev)\n return\n b = queue_b[0]\n closest_b = a.closest_to(b, b_prev)\n if closest_b is not None:\n yield process_images(a, closest_b)", "def draw_matches(img1, kp1, img2, kp2, matches, inliers, ignore_indexes, filter_by_dist=True, color=None):\n # We're drawing them side by side. Get dimensions accordingly.\n # Handle both color and grayscale images.\n\n\n\n img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2RGB)\n img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2RGB)\n if len(img1.shape) == 3:\n new_shape = (img1.shape[0] + img2.shape[0], max(img1.shape[1], img2.shape[1]), img1.shape[2])\n elif len(img1.shape) == 2:\n new_shape = (img1.shape[0] + img2.shape[0], max(img1.shape[1], img2.shape[1]))\n new_img = np.zeros(new_shape, type(img1.flat[0])) \n # Place images onto the new image.\n new_img[0:img1.shape[0],0:img1.shape[1]] = img1\n new_img[img1.shape[0]:img1.shape[0]+img2.shape[0],0:img1.shape[1]] = img2\n \n # Draw lines between matches. Make sure to offset kp coords in second image appropriately.\n r = 1\n thickness = 1\n if color:\n c = color\n\n # print(new_img.shape)\n distances = []\n for m in matches:\n distances.append(m.distance)\n \n dist_threshold = min(distances) * 2\n # print(dist_threshold)\n \n for i, m in enumerate(matches):\n if inliers:\n if not i in inliers:\n continue\n if ignore_indexes:\n if i in ignore_indexes:\n continue\n if filter_by_dist:\n if m.distance > 50:\n continue\n \n # Generate random color for RGB/BGR and grayscale images as needed.\n if not color: \n c = tuple(np.random.randint(0,256,3)) if len(img1.shape) == 3 else np.random.randint(0,256)\n c = ( int (c [ 0 ]), int (c [ 1 ]), int (c [ 2 ])) \n \n # So the keypoint locs are stored as a tuple of floats. cv2.line(), like most other things,\n # wants locs as a tuple of ints.\n\n try:\n end1 = tuple(np.round(kp1[m.queryIdx].pt).astype(int))\n end2 = tuple(np.round(kp2[m.trainIdx].pt).astype(int) + np.array([ 0, img1.shape[0]]))\n cv2.line(new_img, end1, end2, c, thickness)\n cv2.circle(new_img, end1, r, c, thickness)\n cv2.circle(new_img, end2, r, c, thickness)\n except:\n continue\n \n\n return new_img", "def coarse_to_fine(im1, im2, pyramid1, pyramid2, n_iter=3):\n assert im1.shape == im2.shape\n \n u = np.zeros_like(im1)\n v = np.zeros_like(im1)\n\n #\n # Your code here\n #\n\n # Code inspired by:\n # https://www.youtube.com/watch?v=FhlbUHhNpD4\n\n # descending indices of the gaussian pyramid \n # (from small to big image)\n K = len(pyramid1)\n levels = np.arange(K - 1, -1, -1) # [K - 1, K - 2, ..., 0]\n\n #######################\n # intial estimation #\n #######################\n\n ##################################################\n # print('Level [{}]'.format(K - 1))\n\n # get coarsest images from the gaussian pyramid\n # at level (k - 1)\n im1_k = pyramid1[-1].copy()\n im2_k = pyramid2[-1].copy()\n\n # iterative LK-Algorithm (refine the motion)\n uk, vk = iter_LK(im1_k, im2_k, n_iter)\n\n # expand image to new resolution\n uk_exp = expand(uk) * 2\n vk_exp = expand(vk) * 2\n ###################################################\n\n\n ##############################\n # coarse to fine iteritons #\n ##############################\n\n # iterate over [ K-2, K-3, ..., 0 ] -> all scales except the smallest\n for k in levels[1:]: \n # print('Level [{}]'.format(k))\n\n # get images of current scale from the pyramid\n im1_k = pyramid1[k]\n im2_k = pyramid2[k]\n\n # warp im1 image from pyramid1\n im1_k_warp = warp(im1_k, uk_exp, vk_exp)\n\n # apply LK for iterative refinement on current resolution\n uk, vk = iter_LK(im1_k_warp, im2_k, n_iter)\n\n # add current motion fields (uk, vk) from iter_LK to \n # previous expanded motion fields (uk_exp, vk_exp)\n uk = uk + uk_exp\n vk = vk + vk_exp\n\n # expand the motion fields (uk, vk) to the next higher\n # scale in the gaussian pyramid (to use in next iteration)\n if k > 0: # all other level\n uk_exp = expand(uk) * 2\n vk_exp = expand(vk) * 2\n else: # last level 0 (no expansion needed -> original scale)\n uk_exp = uk\n vk_exp = vk\n\n # print('[', k, ']: uk_exp = ', uk_exp.shape, ' | vk_exp = ', vk_exp.shape)\n\n # print('[FINAL]: u = ', uk_exp.shape, ' | v = ', vk_exp.shape)\n\n # set the final parameter \n # (LK at original resolution)\n u = uk_exp\n v = vk_exp\n\n assert u.shape == im1.shape and \\\n v.shape == im1.shape\n return u, v", "def comparekp (left, right, kp1, kp2):\n subplot (121)\n arx = array ([kp1.pt[0]])\n ary = array ([kp1.pt[1]])\n hold(True)\n imshow(left)\n scatter (arx, ary)\n\n subplot (122)\n arx = array ([kp2.pt[0]])\n ary = array ([kp2.pt[1]])\n hold(True)\n imshow(right)\n scatter (arx, ary)\n\n show()", "def stitch_multiple_images(imgs, desc_func=simple_descriptor, patch_size=5):\n # Detect keypoints in each image\n keypoints = [] # keypoints[i] corresponds to imgs[i]\n for img in imgs:\n kypnts = corner_peaks(harris_corners(img, window_size=3),\n threshold_rel=0.05,\n exclude_border=8)\n keypoints.append(kypnts)\n # Describe keypoints\n descriptors = [] # descriptors[i] corresponds to keypoints[i]\n for i, kypnts in enumerate(keypoints):\n desc = describe_keypoints(imgs[i], kypnts,\n desc_func=desc_func,\n patch_size=patch_size)\n descriptors.append(desc)\n # Match keypoints in neighboring images\n matches = [] # matches[i] corresponds to matches between\n # descriptors[i] and descriptors[i+1]\n for i in range(len(imgs)-1):\n mtchs = match_descriptors(descriptors[i], descriptors[i+1], 0.7)\n matches.append(mtchs)\n\n ### YOUR CODE HERE\n raise NotImplementedError() # Delete this line\n ### END YOUR CODE\n\n return panorama", "def get_motion(frame1k, frame2k, frame_count):\n frame1 = frame1k.copy()\n frame2 = frame2k.copy()\n\n global limb_coords, init_coords, num_blocks\n cv2.imwrite(\"thisImageAnalyse.png\", frame2)\n block_size = 3\n block_rad = int(block_size/2)\n\n def get_SSD():\n \"\"\" applies SSD formula to search area\n :return SSD value\"\"\"\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)\n\n # for each body part\n b = 0\n while b < 5:\n avg_x = 0.0\n avg_y = 0.0\n new_x = 0.0\n new_y = 0.0\n a = 0\n # for each block on body part (9 total)\n while a < num_blocks:\n found = False\n search_rad = 5\n while found is False:\n center_y1 = int(init_coords[b][a][0])\n center_x1 = int(init_coords[b][a][1])\n min_SSD = 999999\n # for pythagoras to ensure closest block gets picked when equality occurs of SSD value\n min_d = 999999\n # this finds the center of the block to compare\n for factor_y in range(-search_rad, search_rad + 1):\n center_y2 = center_y1 + block_size*factor_y\n y_dist = center_y1 - abs(center_y2)\n for factor_x in range(-search_rad, search_rad + 1):\n center_x2 = center_x1 + block_size*factor_x\n x_dist = center_x1 - abs(center_x2)\n # pythagoras\n d = math.sqrt((y_dist**2 + x_dist**2))\n if d < min_d:\n min_d = d\n\n SSD = get_SSD()\n if frame2[center_y2][center_x2][1] != 0 and frame2[center_y2][center_x2][2] != 0:\n found = True\n if SSD < min_SSD:\n min_SSD = SSD\n new_y = center_y2\n new_x = center_x2\n elif SSD == min_SSD and d < min_d:\n new_y = center_y2\n new_x = center_x2\n if found is False:\n # if no block is found repeat the search, increasing the search size by 1\n search_rad += 1\n # draw extracted vectors\n cv2.arrowedLine(frame1k, (int(center_x1), int(center_y1)), (int(new_x), int(new_y)), (150, 200, 30), 1, 4, 0, 0.3)\n avg_x += new_x\n avg_y += new_y\n init_coords[b][a][0] = new_y\n init_coords[b][a][1] = new_x\n a += 1\n cv2.imwrite('monkeyFrames/contrast_enhanced%d.png' % frame_count, frame1k)\n limb_coords[b][frame_count][0] = int(avg_y/num_blocks)\n limb_coords[b][frame_count][1] = int(avg_x/num_blocks)\n b += 1", "def concat_images(imga, imgb, xoffset=0, yoffset=0, direction='horizontal',\n ontop=True, adjust_z=False, center_offset=True):\n if direction == 'horizontal':\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n offset = (abs(yoffset), abs(xoffset))\n\n if center_offset:\n new_offset = np.subtract(center_a, np.add(center_b, offset))\n\n if (max_dim == imgb.shape).all():\n tmp = np.copy(imgb)\n imgb = np.copy(imga)\n imga = np.copy(tmp)\n ontop = toggle(ontop)\n xoffset *= -1\n yoffset *= -1\n\n # elif not (max_dim == imga.shape).all():\n # for i, m in enumerate(max_dim):\n # if m not in imga.shape:\n # new_offset[i] = center_a[i] - (center_b[i] + offset[i])\n # else:\n # new_offset[i] = center_a[i] + offset[i] - center_b[i]\n\n new_offset[new_offset > 0] = 0\n center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(new_offset)), np.nan)\n\n Sa0 = slice(int(center_new[0] - imga.shape[0]/2 + 0.5),\n int(center_new[0] + imga.shape[0]/2 + 0.5))\n Sa1 = slice(int(center_new[1] - imga.shape[1]/2 + 0.5),\n int(center_new[1] + imga.shape[1]/2 + 0.5))\n Sb0 = slice(int(center_new[0] + abs(yoffset) - imgb.shape[0]/2 + 0.5),\n int(center_new[0] + abs(yoffset) + imgb.shape[0]/2 + 0.5))\n Sb1 = slice(int(center_new[1] + abs(xoffset) - imgb.shape[1]/2 + 0.5),\n int(center_new[1] + abs(xoffset) + imgb.shape[1]/2 + 0.5))\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n\n imga = imga[::ydir, ::xdir]\n imgb = imgb[::ydir, ::xdir]\n\n if adjust_z:\n top_img = 1 * new_img\n top_img[Sa0, Sa1] = imga\n top_img[Sb0, Sb1] = imgb\n low_img = 1 * new_img\n low_img[Sb0, Sb1] = imgb\n low_img[Sa0, Sa1] = imga\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb -= add\n\n if ontop:\n new_img[Sa0, Sa1] = imga\n new_img[Sb0, Sb1] = imgb\n else:\n new_img[Sb0, Sb1] = imgb\n new_img[Sa0, Sa1] = imga\n\n return new_img[::ydir, ::xdir]", "def tracklet_fast(g1: nx.graph, g2: nx.graph, seg_img1: np.ndarray, seg_img2: np.ndarray, maxtrackid: int, time: int, \n linelist: list, tracksavedir: str, cellcenter1: np.ndarray, cellcenter2: np.ndarray) \\\n -> (int, list):\n f1 = {}\n f2 = {}\n dict_associate = {}\n loc1 = g1.degree(weight=\"weight\")\n loc2 = g2.degree(weight=\"weight\")\n new_seg_img2 = np.zeros(seg_img2.shape)\n\n for ele1 in loc1:\n cell = ele1[0]\n f1[cell] = [cellcenter1[cell], ele1[1]]\n\n for ele2 in loc2:\n cell = ele2[0]\n f2[cell] = [cellcenter2[cell], ele2[1]]\n\n for cell in f2.keys():\n tmp_center = f2[cell][0]\n min_distance = seg_img2.shape[0]**2 + seg_img2.shape[1]**2 + seg_img2.shape[2]**2\n\n for ref_cell in f1.keys():\n ref_tmp_center = f1[ref_cell][0]\n distance = (tmp_center[0]-ref_tmp_center[0])**2 + (tmp_center[1] -\n ref_tmp_center[1])**2 + (tmp_center[2]-ref_tmp_center[2])**2\n if distance < min_distance:\n dict_associate[cell] = ref_cell\n min_distance = distance\n\n inverse_dict_ass = {}\n\n for cell in dict_associate:\n if dict_associate[cell] in inverse_dict_ass:\n inverse_dict_ass[dict_associate[cell]].append(cell)\n else:\n inverse_dict_ass[dict_associate[cell]] = [cell]\n\n maxtrackid = max(maxtrackid, max(inverse_dict_ass.keys()))\n\n for cell in inverse_dict_ass.keys():\n if len(inverse_dict_ass[cell]) > 1:\n for cellin2 in inverse_dict_ass[cell]:\n maxtrackid = maxtrackid + 1\n new_seg_img2[seg_img2 == cellin2] = maxtrackid\n string = '{} {} {} {}'.format(maxtrackid, time+1, time+1, cell)\n linelist.append(string)\n else:\n cellin2 = inverse_dict_ass[cell][0]\n new_seg_img2[seg_img2 == cellin2] = cell\n i = 0\n\n for line in linelist:\n i = i+1\n if i == cell:\n list_tmp = line.split()\n new_string = '{} {} {} {}'.format(list_tmp[0], list_tmp[1], time+1, list_tmp[3])\n linelist[i-1] = new_string\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n thread1 = executor.submit(save_img_as_tiff, seg_img1, time, tracksavedir)\n thread2 = executor.submit(save_img_as_tiff, new_seg_img2, time+1, tracksavedir)\n\n return maxtrackid, linelist", "def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());", "def main(im1_filename: Path, im2_filename: Path) -> None:\n im1 = np.array(Image.open(im1_filename).convert(\"RGB\"))\n im2 = np.array(Image.open(im2_filename).convert(\"RGB\"))\n\n im1 = im1[:, :, ::-1]\n id_face_loc = get_bounding_boxes(im1)\n im1 = im1[:, :, ::-1]\n face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, \"large\")[0]\n\n im2 = im2[:, :, ::-1]\n cam_face_loc = get_bounding_boxes(im2)\n im2 = im2[:, :, ::-1]\n face_encodings2 = face_recognition.face_encodings(im2, cam_face_loc, 10, \"large\")[0]\n\n dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]\n if dist < 0.5:\n print(f\"[+] These images belong to the same person! ({dist})\")\n else:\n print(f\"[-] These images do not belong to the same person! ({dist})\")", "def blending_example2():\n pic_earth = read_image(relpath(\"./externals/pic_earth.jpg\"), 2)\n pic_asteroid = read_image(relpath(\"./externals/pic_asteroid.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_asteroid.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n [R1, G1, B1] = np.dsplit(pic_earth, pic_earth.shape[2])\n [R2, G2, B2] = np.dsplit(pic_asteroid, pic_asteroid.shape[2])\n R1 = np.reshape(R1, (1024,1024))\n R2 = np.reshape(R2, (1024,1024))\n G1 = np.reshape(G1, (1024,1024))\n G2 = np.reshape(G2, (1024,1024))\n B1 = np.reshape(B1, (1024,1024))\n B2 = np.reshape(B2, (1024,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_earth)\n ax2.imshow(pic_asteroid)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_earth, pic_asteroid, mask, new_pic", "def draw_matches(im1, im2, im1_pts, im2_pts, inlier_mask=None):\n height1, width1 = im1.shape[:2]\n height2, width2 = im2.shape[:2]\n canvas_height = max(height1, height2)\n canvas_width = width1 + width2\n\n canvas = np.zeros((canvas_height, canvas_width, 3), im1.dtype)\n canvas[:height1, :width1, :] = im1\n canvas[:height2, width1:width1+width2, :] = im2\n\n im2_pts_adj = im2_pts.copy()\n im2_pts_adj[:, 0] += width1\n\n if inlier_mask is None:\n inlier_mask = np.ones(im1_pts.shape[0], dtype=np.bool)\n\n # Converts all to integer for plotting\n im1_pts = im1_pts.astype(np.int32)\n im2_pts_adj = im2_pts_adj.astype(np.int32)\n\n # Draw points\n all_pts = np.concatenate([im1_pts, im2_pts_adj], axis=0)\n for pt in all_pts:\n cv2.circle(canvas, (pt[0], pt[1]), 4, _COLOR_BLUE, 2)\n\n # Draw lines\n for i in range(im1_pts.shape[0]):\n pt1 = tuple(im1_pts[i, :])\n pt2 = tuple(im2_pts_adj[i, :])\n color = _COLOR_GREEN if inlier_mask[i] else _COLOR_RED\n cv2.line(canvas, pt1, pt2, color, 2)\n\n return canvas", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def diff_image_feature(image0, image1):\n return 0", "def get_slice(P1, P2, name):\n \n centre_dist = distance_3D(P1, P2)\n plot_img = np.zeros((ceil(centre_dist / 2. + 1), centre_dist + 2 ))\n Xrange = np.arange(-centre_dist / 4., centre_dist / 4. + 1)\n \n # time goes along the vector between P1 and P2\n # since it might be at an angle, I can't loop in 1\n # pixel increments - this will miss certain slices. Therefore,\n # I need to loop through by 1/cosA, where A is angle between\n # the xy plane and vector P1->P2\n sampling = sample_rate(P1, P2)\n \n for time in np.linspace(0, centre_dist + 1,\n centre_dist * sampling):\n # Go up along the line\n new_pt = vector_3D(P1, P2, time)\n old_pt = vector_3D(P1, P2, time - centre_dist / 2. * sampling)\n\n if time == 0:\n input_file = name % int(round(new_pt[2], 0))\n img = io.imread(input_file)\n \n # Check if the previous slice is the same as the next\n # don't load it again if it is - save computation time\n if int(round(new_pt[2], 0)) != int(round(old_pt[2], 0)):\n \n input_file = name % int(round(new_pt[2], 0))\n img = io.imread(input_file)\n \n for X in Xrange:\n \n # Get along the X direction for every height\n x, y, z = vector_perpendicular_3D(new_pt, P2, 1, 0, X)\n \n pixel_value = interpolation(x, y, img)\n \n plot_img[X + centre_dist / 4., time] = pixel_value\n else:\n for X in Xrange:\n \n # Get along the X direction for every height\n x, y, z = vector_perpendicular_3D(new_pt, P2, 1, 0, X)\n\n pixel_value = interpolation(x, y, img)\n \n plot_img[X + centre_dist / 4., time] = pixel_value\n \n return plot_img" ]
[ "0.65603113", "0.620282", "0.6117331", "0.6108788", "0.59726435", "0.5869135", "0.57968843", "0.5794731", "0.5768454", "0.57521975", "0.5680387", "0.5629242", "0.5622987", "0.5622519", "0.5594791", "0.55890703", "0.55723226", "0.55641454", "0.5550029", "0.5542601", "0.5538139", "0.5535449", "0.5526483", "0.550816", "0.54875404", "0.5486822", "0.5479989", "0.5476584", "0.54734856", "0.54559785" ]
0.7586962
0
takes two images tilted with respect to one another and tries to find overlap img1 (as numpy array) img2 (as numpy array) tiltdiff (in degrees) negative, img1 is more compressed (tilted) positive, img2 is more compressed (tilted)
def getTiltedRotateShift(img1, img2, tiltdiff, angle=0, bin=1, msg=True): ### untilt images by stretching and compressing # choose angle s/t compressFactor = 1/stretchFactor # this only works if one image is untilted (RCT) of both images are opposite tilt (OTR) #halftilt = abs(tiltdiff)/2.0 halftiltrad = math.acos(math.sqrt(math.cos(abs(tiltdiff)/180.0*math.pi))) # go from zero tilt to half tilt compressFactor = math.cos(halftiltrad) # go from max tilt to half tilt stretchFactor = math.cos(halftiltrad) / math.cos(abs(tiltdiff)/180.0*math.pi) if tiltdiff > 0: if msg is True: apDisplay.printMsg("compress image 1") untilt1 = transformImage(img1, compressFactor, angle) untilt2 = transformImage(img2, stretchFactor, angle) xfactor = compressFactor else: if msg is True: apDisplay.printMsg("stretch image 1") untilt1 = transformImage(img1, stretchFactor, angle) untilt2 = transformImage(img2, compressFactor, angle) xfactor = stretchFactor ### filtering was done earlier filt1 = untilt1 filt2 = untilt2 if filt1.shape != filt2.shape: newshape = ( max(filt1.shape[0],filt2.shape[0]), max(filt1.shape[1],filt2.shape[1]) ) apDisplay.printMsg("Resizing images to: "+str(newshape)) filt1 = apImage.frame_constant(filt1, newshape, filt1.mean()) filt2 = apImage.frame_constant(filt2, newshape, filt2.mean()) ### cross-correlate cc = correlator.cross_correlate(filt1, filt2, pad=True) rad = min(cc.shape)/20.0 cc = apImage.highPassFilter(cc, radius=rad) cc = apImage.normRange(cc) cc = blackEdges(cc) cc = apImage.normRange(cc) cc = blackEdges(cc) cc = apImage.normRange(cc) cc = apImage.lowPassFilter(cc, radius=10.0) #find peak peakdict = peakfinder.findSubpixelPeak(cc, lpf=0) #import pprint #pprint.pprint(peak) pixpeak = peakdict['subpixel peak'] if msg is True: apDisplay.printMsg("Pixel peak: "+str(pixpeak)) apImage.arrayToJpegPlusPeak(cc, "guess-cross-ang"+str(abs(angle))+".jpg", pixpeak) rawpeak = numpy.array([pixpeak[1], pixpeak[0]]) #swap coord shift = numpy.asarray(correlator.wrap_coord(rawpeak, cc.shape))*bin if msg is True: apDisplay.printMsg("Found xy-shift btw two images" +";\n\t SNR= "+str(round(peakdict['snr'],2)) +";\n\t halftilt= "+str(round(halftiltrad*180/math.pi, 3)) +";\n\t compressFactor= "+str(round(compressFactor, 3)) +";\n\t stretchFactor= "+str(round(stretchFactor, 3)) +";\n\t xFactor= "+str(round(xfactor, 3)) +";\n\t rawpeak= "+str(numpy.around(rawpeak*bin, 1)) +";\n\t shift= "+str(numpy.around(shift, 1)) ) return shift, xfactor, peakdict['snr']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTiltedCoordinates(img1, img2, tiltdiff, picks1=[], angsearch=True, inittiltaxis=-7.2, msg=True):\n\tt0 = time.time()\n\t#shrink images\n\tbin = 2\n\tbinned1 = apImage.binImg(img1, bin)\n\tbinned2 = apImage.binImg(img2, bin)\n\t#apImage.arrayToJpeg(binned1, \"binned1.jpg\")\n\t#apImage.arrayToJpeg(binned2, \"binned2.jpg\")\n\tfilt1 = apImage.highPassFilter(binned1, apix=1.0, radius=20.0, localbin=4/bin)\n\tfilt2 = apImage.highPassFilter(binned2, apix=1.0, radius=20.0, localbin=4/bin)\n\t#apImage.arrayToJpeg(filt1, \"filt1.jpg\")\n\t#apImage.arrayToJpeg(filt2, \"filt2.jpg\")\n\n\tif angsearch is True:\n\t\tbestsnr = 0\n\t\tbestangle = None\n\t\t### rough refine\n\t\t#for angle in [-6, -4, -2,]:\n\t\t#\tsys.stderr.write(\".\")\n\t\t#\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False)\n\t\t#\tif snr > bestsnr:\t\n\t\t#\t\tbestsnr = snr\n\t\t#\t\tbestangle = angle\n\t\tbestangle = inittiltaxis\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\t\t### finer refine\n\t\tfor angle in [bestangle-1, bestangle-0.5, bestangle+0.5, bestangle+1]:\n\t\t\tif msg is True:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False)\n\t\t\tif snr > bestsnr:\t\n\t\t\t\tbestsnr = snr\n\t\t\t\tbestangle = angle\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\t\t### really fine refine\n\t\tfor angle in [bestangle-0.2, bestangle-0.1, bestangle+0.1, bestangle+0.2]:\n\t\t\tif msg is True:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, angle, bin, msg=False)\n\t\t\tif snr > bestsnr:\t\n\t\t\t\tbestsnr = snr\n\t\t\t\tbestangle = angle\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\n\t\tshift, xfactor, snr = getTiltedRotateShift(filt1, filt2, tiltdiff, bestangle, bin, msg=msg)\n\t\tif msg is True:\n\t\t\tapDisplay.printMsg(\"Best tilt axis angle= %.1f; SNR=%.2f\"%(bestangle,bestsnr))\n\telse:\n\t\tbestangle = 0.0\n\t\tshift, xfactor, snr = getTiltedRotateShift(img1, img2, tiltdiff, bestangle, bin)\n\n\tif msg and min(abs(shift)) < min(img1.shape)/16.0:\n\t\tapDisplay.printWarning(\"Overlap was too close to the edge and possibly wrong.\")\n\n\t### case 1: find tilted center of first image\n\tcenter = numpy.asarray(img1.shape)/2.0\n\tnewpoint = translatePoint(center, center, shift, bestangle, xfactor)\n\t#print \"newpoint=\", newpoint\n\thalfsh = (center + newpoint)/2.0\n\torigin = halfsh\n\n\t### case 2: using a list of picks\n\tif len(picks1) > 1:\n\t\t#get center most pick\n\t\tdmin = origin[0]/2.0\n\t\tfor pick in picks1:\n\t\t\tda = numpy.hypot(pick[0]-halfsh[0], pick[1]-halfsh[1])\n\t\t\tif da < dmin:\n\t\t\t\tdmin = da\n\t\t\t\torigin = pick\n\n\t# origin is pick from image 1\n\t# newpart is pick from image 2\n\tnewpart = translatePoint(origin, center, shift, bestangle, xfactor)\n\tnewpart2 = numpy.array([(origin[0]*xfactor-shift[0])*xfactor, origin[1]-shift[1]])\n\tif msg is True:\n\t\tapDisplay.printMsg(\"origin=(%d,%d); newpart=(%.1f,%.1f); newpart2=(%.1f,%.1f)\"\n\t\t\t%(origin[0],origin[1], newpart[0],newpart[1], newpart2[0],newpart2[1],))\n\t\tapDisplay.printMsg(\"completed in \"+apDisplay.timeString(time.time()-t0))\n\n\treturn origin, newpart, snr, bestangle\n\n\t### check to make sure points are not off the edge\n\twhile newpart[0] < 10:\n\t\tnewpart += numpy.asarray((20,0))\n\t\torigin += numpy.asarray((20,0))\n\twhile newpart[1] < 10:\n\t\tnewpart += numpy.asarray((0,20))\n\t\torigin += numpy.asarray((0,20))\n\twhile newpart[0] > img1.shape[0]-10:\n\t\tnewpart -= numpy.asarray((20,0))\n\t\torigin -= numpy.asarray((20,0))\n\twhile newpart[1] > img1.shape[1]-10:\n\t\tnewpart -= numpy.asarray((0,20))\n\t\torigin -= numpy.asarray((0,20))\n\n\treturn origin, newpart", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def _diff_images(img_before, img_after):\n width_before, height_before = img_before.size\n width_after, height_after = img_after.size\n data_before = img_before.getdata()\n data_after = img_after.getdata()\n\n width, height = max(width_before, width_after), max(height_before, height_after)\n offset_ax = (width - width_before) // 2\n offset_ay = (height - height_before) // 2\n offset_bx = (width - width_after) // 2\n offset_by = (height - height_after) // 2\n\n diff = 0\n for y in range(height):\n for x in range(width):\n ax, ay = x - offset_ax, y - offset_ay\n bx, by = x - offset_bx, y - offset_by\n if (ax < 0 or bx < 0 or ax >= width_before or bx >= width_after or\n ay < 0 or by < 0 or ay >= height_before or by >= height_after):\n diff += 1\n else:\n if data_before[ax + ay *width_before] != data_after[bx + by * width_after]:\n diff += 1\n try:\n return round(diff / float(width * height), 4)\n except ZeroDivisionError:\n return 0.0", "def image_in_image(im1,im2,tp):\n # points to warp from\n m,n = im1.shape[:2]\n fp = array([[0,m,m,0],[0,0,n,n],[1,1,1,1]])\n # compute affine transform and apply\n H = homography.Haffine_from_points(tp,fp)\n im1_t = ndimage.affine_transform(im1,H[:2,:2],\n (H[0,2],H[1,2]),im2.shape[:2])\n alpha = (im1_t > 0)\n return (1-alpha)*im2 + alpha*im1_t", "def diff_image_feature(image0, image1):\n return 0", "def concat_images_corner(imga, imgb, xoffset=0, yoffset=0, direction='horizontal',\n ontop=True, adjust_z=False):\n if direction == 'horizontal':\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n offset = (abs(yoffset), abs(xoffset))\n tmp_offset = np.array(offset)\n\n # if (max_dim == imgb.shape).all():\n # tmp = np.copy(imgb)\n # imgb = np.copy(imga)\n # imga = np.copy(tmp)\n # ontop = toggle(ontop)\n # xoffset *= -1\n # yoffset *= -1\n\n # center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(offset)), np.nan)\n\n Sa0 = slice(0, imga.shape[0])\n Sa1 = slice(0, imga.shape[1])\n Sb0 = slice(abs(yoffset), abs(yoffset) + imgb.shape[0])\n Sb1 = slice(abs(xoffset), abs(xoffset) + imgb.shape[1])\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n\n imga = imga[::ydir, ::xdir]\n imgb = imgb[::ydir, ::xdir]\n\n if adjust_z:\n top_img = 1 * new_img\n top_img[Sa0, Sa1] = imga\n top_img[Sb0, Sb1] = imgb\n low_img = 1 * new_img\n low_img[Sb0, Sb1] = imgb\n low_img[Sa0, Sa1] = imga\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb -= add\n\n if ontop:\n new_img[Sa0, Sa1] = imga\n new_img[Sb0, Sb1] = imgb\n else:\n new_img[Sb0, Sb1] = imgb\n new_img[Sa0, Sa1] = imga\n\n return new_img[::ydir, ::xdir]", "def concat_images(imga, imgb, xoffset=0, yoffset=0, direction='horizontal',\n ontop=True, adjust_z=False, center_offset=True):\n if direction == 'horizontal':\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n offset = (abs(yoffset), abs(xoffset))\n\n if center_offset:\n new_offset = np.subtract(center_a, np.add(center_b, offset))\n\n if (max_dim == imgb.shape).all():\n tmp = np.copy(imgb)\n imgb = np.copy(imga)\n imga = np.copy(tmp)\n ontop = toggle(ontop)\n xoffset *= -1\n yoffset *= -1\n\n # elif not (max_dim == imga.shape).all():\n # for i, m in enumerate(max_dim):\n # if m not in imga.shape:\n # new_offset[i] = center_a[i] - (center_b[i] + offset[i])\n # else:\n # new_offset[i] = center_a[i] + offset[i] - center_b[i]\n\n new_offset[new_offset > 0] = 0\n center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(new_offset)), np.nan)\n\n Sa0 = slice(int(center_new[0] - imga.shape[0]/2 + 0.5),\n int(center_new[0] + imga.shape[0]/2 + 0.5))\n Sa1 = slice(int(center_new[1] - imga.shape[1]/2 + 0.5),\n int(center_new[1] + imga.shape[1]/2 + 0.5))\n Sb0 = slice(int(center_new[0] + abs(yoffset) - imgb.shape[0]/2 + 0.5),\n int(center_new[0] + abs(yoffset) + imgb.shape[0]/2 + 0.5))\n Sb1 = slice(int(center_new[1] + abs(xoffset) - imgb.shape[1]/2 + 0.5),\n int(center_new[1] + abs(xoffset) + imgb.shape[1]/2 + 0.5))\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n\n imga = imga[::ydir, ::xdir]\n imgb = imgb[::ydir, ::xdir]\n\n if adjust_z:\n top_img = 1 * new_img\n top_img[Sa0, Sa1] = imga\n top_img[Sb0, Sb1] = imgb\n low_img = 1 * new_img\n low_img[Sb0, Sb1] = imgb\n low_img[Sa0, Sa1] = imga\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb -= add\n\n if ontop:\n new_img[Sa0, Sa1] = imga\n new_img[Sb0, Sb1] = imgb\n else:\n new_img[Sb0, Sb1] = imgb\n new_img[Sa0, Sa1] = imga\n\n return new_img[::ydir, ::xdir]", "def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):\n if image1.shape != image2.shape:\n raise ValueError('Images must have the same shape.')\n\n img1 = img_as_float(image1)\n img2 = img_as_float(image2)\n\n if method == 'diff':\n comparison = np.abs(img2 - img1)\n elif method == 'blend':\n comparison = 0.5 * (img2 + img1)\n elif method == 'checkerboard':\n shapex, shapey = img1.shape\n mask = np.full((shapex, shapey), False)\n stepx = int(shapex / n_tiles[0])\n stepy = int(shapey / n_tiles[1])\n for i, j in product(range(n_tiles[0]), range(n_tiles[1])):\n if (i + j) % 2 == 0:\n mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True\n comparison = np.zeros_like(img1)\n comparison[mask] = img1[mask]\n comparison[~mask] = img2[~mask]\n else:\n raise ValueError('Wrong value for `method`. '\n 'Must be either \"diff\", \"blend\" or \"checkerboard\".')\n return comparison", "def transform_images(img1,img2):", "def img_compare(A, B):\r\n A = cv2.GaussianBlur(A, (5, 5), 5)\r\n B = cv2.GaussianBlur(B, (5, 5), 5)\r\n diff = cv2.absdiff(A, B) # absolute difference\r\n _, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)\r\n return np.sum(diff)", "def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100", "def compare_images(first_img_path, second_img_path):\n img1 = Image.open(first_img_path)\n img2 = Image.open(second_img_path)\n\n diff = ImageChops.difference(img1, img2)\n print(diff.getbbox())", "def antialiased(\n img: ImageSequence, x1: int, y1: int, width: int, height: int, img2: ImageSequence\n) -> bool:\n x0 = max(x1 - 1, 0)\n y0 = max(y1 - 1, 0)\n x2 = min(x1 + 1, width - 1)\n y2 = min(y1 + 1, height - 1)\n pos = (y1 * width + x1) * 4\n zeroes = int(x1 == x0 or x1 == x2 or y1 == y0 or y1 == y2)\n min_delta = max_delta = 0.0\n min_x = min_y = max_x = max_y = 0\n\n # go through 8 adjacent pixels\n for x in range(x0, x2 + 1):\n for y in range(y0, y2 + 1):\n if x == x1 and y == y1:\n continue\n\n # brightness delta between the center pixel and adjacent one\n delta = color_delta(img, img, pos, (y * width + x) * 4, True)\n\n # count the number of equal, darker and brighter adjacent pixels\n if delta == 0:\n zeroes += 1\n # if found more than 2 equal siblings, it's definitely not anti-aliasing\n if zeroes > 2:\n return False\n\n # remember the darkest pixel\n elif delta < min_delta:\n min_delta = delta\n min_x = x\n min_y = y\n\n # remember the brightest pixel\n elif delta > max_delta:\n max_delta = delta\n max_x = x\n max_y = y\n\n # if there are no both darker and brighter pixels among siblings, it's not anti-aliasing\n if min_delta == 0 or max_delta == 0:\n return False\n\n # if either the darkest or the brightest pixel has 3+ equal siblings in both images\n # (definitely not anti-aliased), this pixel is anti-aliased\n return (\n has_many_siblings(img, min_x, min_y, width, height)\n and has_many_siblings(img2, min_x, min_y, width, height)\n ) or (\n has_many_siblings(img, max_x, max_y, width, height)\n and has_many_siblings(img2, max_x, max_y, width, height)\n )", "def diffImages(imgA, imgB):\n bandsImgA = imgA.split()\n bandsImgB = imgB.split()\n\n absDiff = ImageMath.eval(\"convert(abs(a0-b0) + abs(a1-b1) + abs(a2-b2), 'L')\",\n a0 = bandsImgA[0], b0 = bandsImgB[0],\n a1 = bandsImgA[1], b1 = bandsImgB[1],\n a2 = bandsImgA[2], b2 = bandsImgB[2])\n bandsImgOut = [\n ImageMath.eval(\"convert(a + 2*diff, 'L')\", a = bandsImgA[0], diff = absDiff),\n ImageMath.eval(\"convert(a - diff, 'L')\", a = bandsImgA[1], diff = absDiff),\n ImageMath.eval(\"convert(a - diff, 'L')\", a = bandsImgA[2], diff = absDiff),\n ]\n\n return Image.merge('RGB', bandsImgOut)", "def retrieve_overlap(img1, img2, lbl1=1, lbl2=1):\n xlen, ylen, zlen = img1.GetSize()\n\n # Make sure that our images are equal in size to prevent weird invisible bugs\n xlen2, ylen2, zlen2 = img2.GetSize()\n assert xlen == xlen2 and ylen == ylen2 and zlen == zlen2\n\n # Copy our image as to not alter the original data\n new_image = img1[:, :, :]\n for z in xrange(zlen):\n for y in xrange(ylen):\n for x in xrange(xlen):\n # Set any bit with overlap to 1, else set it to 0\n overlap = img1.GetPixel(x, y, z) == lbl1 and img2.GetPixel(x, y, z) == lbl2\n if overlap:\n new_image.SetPixel(x, y, z, 1)\n else:\n new_image.SetPixel(x, y, z, 0)\n return new_image", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def calc_difference(ndvi_tile1, ndvi_tile2, output):\n \n #open dataset and get Affine transformation and bounding properties \n with rio.open(ndvi1) as src1:\n meta = src1.meta.copy()\n transform = src1.meta[\"transform\"]\n x = meta['width']\n y = meta['height']\n band1 = src1.read()\n \n #open dataset \n with rio.open(ndvi2) as src2:\n #read the band as ndarray with the same dimension of src1\n band2 = src2.read(out_shape=(src1.height, src1.width), \n resampling=rio.enums.Resampling.bilinear)\n #create destination for reprojection of src2\n dst_crs = {'init': 'EPSG:32632'}\n proj_band2 = np.empty(src1.shape, dtype=np.float32)\n #reproject the src2 to match src1\n warp.reproject(band2, destination=proj_band2, src_transform=src2.transform, src_crs=src2.crs, \n dst_transform=transform, dst_crs=dst_crs) \n \n #calculate difference between reprojected band2 and band1\n difference = np.subtract(proj_band2, band1)\n #create outfile\n outfile = output\n #write outfile with the properties and resolution of src1\n with rio.open(outfile, 'w', **meta) as dst:\n dst.write(difference, window=rio.windows.Window(col_off=0, row_off=0, width=x, height=y))\n\n return outfile", "def diffSmoothImages(imgA, imgB):\n\n smoothImgA = smoothImage(imgA)\n smoothImgB = smoothImage(imgB)\n\n return diffImages(smoothImgA, smoothImgB)", "def vimage(cat1, cat2, dmax, psize, fwhm):\n\n NHALF = int(dmax/psize)\n NSIDE = 2*NHALF+1\n mshift = (NHALF+0.5)*psize\n img = np.zeros((NSIDE,NSIDE))\n x2s, y2s = cat2[:,0], cat2[:,1]\n for x1, y1 in cat1:\n ok = (x2s > x1-mshift) & (x2s < x1+mshift) & \\\n (y2s > y1-mshift) & (y2s < y1+mshift)\n for x2, y2 in cat2[ok]:\n ix = NHALF+int(round((x2-x1)/psize))\n iy = NHALF+int(round((y2-y1)/psize))\n img[iy,ix] += 1\n\n # smooth image\n img = gaussian_filter(img,fwhm/psize/2.3548,mode='constant')\n\n # identify maximum pixel\n ind = np.arange(NSIDE)\n ix, iy = np.meshgrid(ind, ind)\n peak = img == img.max()\n #if len(ix[peak]) > 1:\n # raise Exception(\"Found more than one maximum pixel\")\n\n # now have first approximation to the shift\n ixp = ix[peak][0]\n iyp = iy[peak][0]\n xp = psize*(ixp-NHALF)\n yp = psize*(iyp-NHALF)\n if ixp == 0 or ixp == NSIDE-1 or iyp == 0 or iyp == NSIDE-1:\n # max pixel at edge of array. Just return pixel position\n # as \"refined\" position\n xr = xp\n yr = yp\n\n else:\n # Make a quadratic approx to refine the peak position.\n # Estimate first and second partial derivatives from\n # 3x3 pixels centred on peak\n fx = (img[iyp,ixp+1] - img[iyp,ixp-1])/2.\n fy = (img[iyp+1,ixp] - img[iyp-1,ixp])/2.\n fxx = img[iyp,ixp-1] + img[iyp,ixp+1] - 2*img[iyp,ixp]\n fyy = img[iyp-1,ixp] + img[iyp+1,ixp] - 2*img[iyp,ixp]\n fxy = (img[iyp+1,ixp+1] + img[iyp-1,ixp-1] -\n img[iyp+1,ixp-1] - img[iyp-1,ixp+1])/4.\n b = np.array((fx,fy)).T\n A = np.array(((fxx,fxy),(fxy,fyy)))\n x = solve(A,b)\n xr = xp - psize*x[0]\n yr = yp - psize*x[1]\n return (img, xp,yp,xr,yr)", "def concat_3dimages_corners(imga, imgb, xoffset=0, yoffset=0, zoffset=0,\n transpose=True, ontop=True, center_offset=True,\n adjust_z=(0, 1)):\n print(\"Concating images with reference point being the lower left corner\")\n if transpose:\n print(\"Transpose images\")\n imga = np.transpose(imga, axes=(0, 2, 1))\n imgb = np.transpose(imgb, axes=(0, 2, 1))\n\n offset = (abs(zoffset), abs(yoffset), abs(xoffset))\n max_dim = np.maximum.reduce([imga.shape, np.add(imgb.shape, offset)])\n\n # center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n # center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n\n # if (max_dim == imgb.shape).all():\n # tmp = np.copy(imgb)\n # imgb = np.copy(imga)\n # imga = np.copy(tmp)\n # ontop = toggle(ontop)\n # xoffset *= -1\n # yoffset *= -1\n # zoffset *= -1\n\n # tmp_offset = np.array(offset)\n # tmp_offset[tmp_offset > 0] = 0\n # new_img = np.full(np.add(max_dim, np.abs(offset)), np.nan)\n new_img = np.full(max_dim, np.nan)\n\n Sa0 = slice(0, imga.shape[0])\n Sa1 = slice(0, imga.shape[1])\n Sa2 = slice(0, imga.shape[2])\n Sb0 = slice(abs(zoffset), abs(zoffset) + imgb.shape[0])\n Sb1 = slice(abs(yoffset), abs(yoffset) + imgb.shape[1])\n Sb2 = slice(abs(xoffset), abs(xoffset) + imgb.shape[2])\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n zdir = np.sign(zoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n if zdir == 0:\n zdir = 1\n\n imga = imga[::zdir, ::ydir, ::xdir]\n imgb = imgb[::zdir, ::ydir, ::xdir]\n\n if adjust_z:\n for ix in adjust_z:\n top_img = 1 * new_img[ix]\n top_img[Sa1, Sa2] = imga[ix]\n top_img[Sb1, Sb2] = imgb[ix]\n low_img = 1 * new_img[ix]\n low_img[Sb1, Sb2] = imgb[ix]\n low_img[Sa1, Sa2] = imga[ix]\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb[ix] -= add\n\n print(\"new_img shape: \", new_img.shape)\n\n if ontop:\n new_img[Sa0, Sa1, Sa2] = imga\n new_img[Sb0, Sb1, Sb2] = imgb\n else:\n new_img[Sb0, Sb1, Sb2] = imgb\n new_img[Sa0, Sa1, Sa2] = imga\n\n new_img\n\n if transpose:\n print(\"Transpose back\")\n return np.transpose(new_img[::zdir, ::ydir, ::xdir], axes=(0, 2, 1))\n else:\n return new_img[::zdir, ::ydir, ::xdir]", "def image_pre_filtering(left_img: np.ndarray, right_img: np.ndarray) -> tuple:\n\n def clahe(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Contrast Limited Adaptive Histogram Equalization\n :param image: the image to be filtered\n :return: the image filtered with CLAHE\n \"\"\"\n clahe_filter = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n return clahe_filter.apply(image)\n\n def logarithmic(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Logarithmic Transform\n :param image: the image to be filtered\n :return: the image filtered with logarithmic transform\n \"\"\"\n c = max_disparity / math.log(1 + np.max(image))\n sigma = 1\n for i in range(0, image.shape[1]): # image width\n for j in range(0, image.shape[0]): # image height\n # compute logarithmic transform\n image[j, i] = int(c * math.log(1 + ((math.exp(sigma) - 1) * image[j, i])))\n return image\n\n def exponential(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform pre-processing - raise to the power, as this subjectively appears\n to improve subsequent disparity calculation\n :param image:\n :return:\n \"\"\"\n return np.power(image, 0.75).astype('uint8')\n\n def apply_filter(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Choose which filter to apply to both images, this could be a combination too\n :param image: the image to be filtered\n :return:\n \"\"\"\n # choose filters to apply\n return clahe(image)\n\n return apply_filter(left_img), apply_filter(right_img)", "def match3(img1, img2, coordinates1, coordinates2, PATCH_SIZE, threshold=0.7):\n\n\t#creating patches for all points from img1 and img2\n\tcoord1_patches = [make_patch(coordinate, PATCH_SIZE, img1) for coordinate in coordinates1]\n\tcoord2_patches = [make_patch(coordinate, PATCH_SIZE, img2) for coordinate in coordinates2]\n\n\t# creating a matrix with dissimilarity measures for all pairs\n\tall_matches = np.zeros((len(coordinates1), len(coordinates2)))\n\n\tfor (x, y), _ in np.ndenumerate(all_matches):\n\t\tall_matches[x,y] = count_difference(coord1_patches[x], coord2_patches[y])\n\n\t#looking for best left-to-right and right-to-left matches\n\tmatches = []\n\t#left-to-right\n\tfor i, coord1 in enumerate(coordinates1):\n\t\tbest_ltr_match = np.argmin(all_matches[i, :]) #best left-to-right match for coord1\n\t\tbest_rtl_match = np.argmin(all_matches[:, best_ltr_match]) #best match for a best match\n\t\tif (i == best_rtl_match): #hurray, there is a super match\n\n\t\t\tmatches.append([coord1, coordinates2[best_ltr_match], all_matches[i, best_ltr_match]])\n\t\n\treturn matches", "def image_align(first_image, second_image):\r\n\r\n high_diff = (second_image.shape[0] - first_image.shape[0]) // 2\r\n width_diff = (second_image.shape[1] - first_image.shape[1]) // 2\r\n\r\n align_image = second_image[high_diff: high_diff + first_image.shape[0],\r\n width_diff: width_diff + first_image.shape[1],\r\n :]\r\n\r\n\r\n assert align_image.shape == first_image.shape\r\n\r\n return align_image", "def stitch(KPS1, KPS2, H1, H2, match): #---- stich image to previous one\r\n #--- projection image1 from plane to cylindrical ---\r\n total = np.minimum(match.shape[0],100); # total pairing number\r\n bin1 = match[0:total,0].astype(int); # feature no at image 1\r\n R1 = KPS1.keyz[bin1, 0]; # keypoint Y at image 1\r\n C1 = KPS1.keyz[bin1, 1]; # keypoint X at image 1\r\n V1, U1 = pano_tools.project_p2c_points(R1, C1, H1);\r\n #--- image 2 ---\r\n bin2 = match[0:total,1].astype(int); # feature no at image 2\r\n R2 = KPS2.keyz[bin2, 0]; # keypoint Y at image 2\r\n C2 = KPS2.keyz[bin2, 1]; # keypoint X at image 2\r\n Rc2 = H2[0]/2; Rp2= R2 - Rc2; \r\n Cc2 = H2[1]/2; Cp2= C2 - Cc2;\r\n #--- --- \r\n # {phi1,S1,TU1,TV1} = M*M matrix: which is derived by chosen 2 pairs \r\n # {phi0,S0,TU0,TV0} = scalar: which is initial guess by removing outlier\r\n # \r\n phi1,S1,TU1,TV1= pano_tools.derive_p2c_formula(U1,V1,Cp2,Rp2);\r\n seq,phi0,S0,TU0,TV0 = pano_tools.remove_ill_matched_pair(phi1,S1,TU1,TV1); \r\n #--- linear regression [not necessary] ---\r\n # U1X = U1[seq]; C2X = C2[seq]; V1X = V1[seq]; R2X = R2[seq]; \r\n # phi0,S0,TU0,TV0,Err= pano_tools.linear_regression(V1X,U1X,R2X,C2X, phi0,S0,TU0,TV0,H2)\r\n H2[3]= phi0; H2[4]= S0; H2[5]= TV0; H2[6]= TU0;", "def blur3Diff(aImgkMin2, aImgkMin1, aImgk, aTimestamp, aROIPoints,\n aThreshold, aSaveImages=False):\n x0, y0, x1, y1 = xyFromROI(aROIPoints)\n\n ROIkMin2 = aImgkMin2[y0:y1, x0:x1]\n ROIkMin1 = aImgkMin1[y0:y1, x0:x1]\n ROIk = aImgk[y0:y1, x0:x1]\n\n # Create two sets of difference images, one from ROIkMin2, ROIkMin1 and one\n # from ROIkMin1 and ROI in order to compare the two diffs to evaluate which\n # region(s) are active/in motion in both of them.\n diffkMin1 = cv2.absdiff(ROIkMin2, ROIkMin1)\n diffk = cv2.absdiff(ROIkMin1, ROIk)\n\n # For each of the diff images we check each pixel and set it to 0 if below\n # a given threshold/intensity, else set it to 255\n # diffkMin1Return, diffkMin1 = cv2.threshold(diffkMin1, aThreshold, 255,\n # cv2.THRESH_BINARY)\n # diffkReturn, diffk = cv2.threshold(diffk, aThreshold, 255,\n # cv2.THRESH_BINARY)\n adaptiveThreshType = cv2.ADAPTIVE_THRESH_MEAN_C\n # adaptiveThreshType = cv2.ADAPTIVE_THRESH_GAUSSIAN_C\n adaptiveSize = 9\n\n gaussStdDev = 10\n\n diffkMin1 = cv2.GaussianBlur(diffkMin1, (5, 5), gaussStdDev)\n diffkMin1 = cv2.adaptiveThreshold(diffkMin1, 255,\n adaptiveThreshType,\n cv2.THRESH_BINARY,\n adaptiveSize, adaptiveSize)\n\n diffk = cv2.GaussianBlur(diffk, (5, 5), gaussStdDev)\n diffk = cv2.adaptiveThreshold(diffk, 255,\n adaptiveThreshType,\n cv2.THRESH_BINARY,\n adaptiveSize, adaptiveSize)\n\n # Now find the union of the regions detected moving in both diff images,\n # this gives us the movement present in frame k-1.\n motionkMin1 = cv2.bitwise_and(diffkMin1, diffk)\n\n # Now we can find movement in frame k by subtracting diffk (containing\n # movement in k, k-1) from motionkMin1 (containing movement in frame k-1)\n motion = cv2.absdiff(motionkMin1, diffk)\n\n # Remove specs that occur in the image before dilation\n # motion = removeSmallBlobs(motion, 5)\n\n # Now dilate and erode the produced images to fill in blobs and eliminate\n # small blobs (specks)\n motion = dilateThenErode(motion, aKernelSize=3, aIterationsDilate=4,\n aIterationsErode=2)\n\n # TODO Find a way to set this area as a function of... ROI? Look in to how\n # we can use the newly added getCutoffObjectArea() function to do this\n # NOTE: THE FUNCTION BELOW IS SLOW. AVOID IF POSSIBLE.\n # motion = removeSmallBlobs(motion, 200)\n\n motion = dilateThenErode(motion, aKernelSize=3, aIterationsDilate=4,\n aIterationsErode=2)\n\n # motion = removeSmallBlobs(motion, 200)\n\n return motion", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def get_opt_rotate(obj_img, back_img,\n back_center_x, back_center_y,\n obj_center_x, obj_center_y,\n prev_rot_angle=0.,\n is_erosion=False):\n width = obj_img.shape[0]\n rot_img = ndimage.rotate(obj_img, prev_rot_angle, reshape=False)\n induce_x, induce_y = int(back_center_x - obj_center_x), int(back_center_y - obj_center_y)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rot_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n angle_amount = 4.\n else:\n angle_amount = 16.\n # check combine_img.dtype; rot_img.dtype; back_img\n curr_angle = prev_rot_angle\n while angle_amount > 0.5:\n angle_amount /= 2.\n\n rotate_1 = ndimage.rotate(obj_img, curr_angle + angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y+width, induce_x:induce_x+width] -= rotate_1\n neg_count_1 = len(np.argwhere(combine_img < 0))\n\n rotate_2 = ndimage.rotate(obj_img, curr_angle - angle_amount, reshape=False)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= rotate_2\n neg_count_2 = len(np.argwhere(combine_img < 0))\n\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_angle = curr_angle + angle_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_angle = curr_angle - angle_amount\n # print(curr_angle)\n # print(neg_count, neg_count_1, neg_count_2)\n # print('Negative Pix Count Rotation: %d.' % neg_count)\n # print('Optimal Rotation: ', curr_angle)\n return curr_angle, neg_count", "def match(image1,image2,threshold,useRansac=False,t_orientation=30,t_scale=0.5):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n #\r\n # REPLACE THIS CODE WITH YOUR SOLUTION (ASSIGNMENT 5, QUESTION 3)\r\n #\r\n # Generate five random matches (for testing purposes)\r\n # matched_pairs = []\r\n # num = 5\r\n # for i in range(num):\r\n # matched_pairs.append([keypoints1[i],keypoints2[i]])\r\n # return DisplayMatches(im1, im2, matched_pairs)\r\n\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n\r\n #q3\r\n matched_pairs = []\r\n between_angles = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))\r\n for i, row in enumerate(between_angles):\r\n \tratio = sorted(row)[0] / sorted(row)[1]\r\n \tif ratio <= threshold:\r\n\t \tmatched_pairs.append([keypoints1[i], keypoints2[np.where(row == sorted(row)[0])[0][0]]])\r\n # print(matched_pairs)\r\n if useRansac is False:\r\n return DisplayMatches(im1, im2, matched_pairs)\r\n\t# \r\n\r\n #q4\r\n repetition = 10\r\n subsets = [[]] * repetition\r\n for i in range(repetition):\r\n r = random.randint(0, len(matched_pairs))\r\n for match in matched_pairs:\r\n ds1, ds2 = matched_pairs[r][1][2]/matched_pairs[r][0][2], match[1][2]/match[0][2]\r\n do1, do2 = (matched_pairs[r][1][3]-matched_pairs[r][0][3]), (match[1][3]-match[0][3])\r\n if abs(ds2 - ds1) <= t_scale * ds1 and abs(do2 - do1) % (2 * math.pi) <= t_orientation:\r\n subsets[i].append(match)\r\n\r\n max_i, max_len = 0, subsets[0]\r\n for i in range(10):\r\n l = len(subsets[i])\r\n if l > max_len:\r\n max_len = l\r\n max_i = i\r\n\r\n im3 = DisplayMatches(im1, im2, subsets[max_i])\r\n return im3", "def registration(im1, im2, num = 10, opt = 'py', outputPath = 'None'):\n\n # determin which one is the right side of the breast\n b_size = 5\n n_row, n_col = im1.shape\n side = 0\n if np.sum(im1[0:b_size,0:b_size]) < np.sum(im1[0:b_size,n_col-b_size:n_col]):\n side = 1 \n\n # flip the right side image\n if side == 1:\n im1 = np.fliplr(im1)\n else:\n im2 = np.fliplr(im2) \n\n # find edges of both images\n edge1 = findEdge(im1)\n edge2 = findEdge(im2)\n\n # tune edges of both side\n edge1 = tuneEdge(edge1,im1.shape)\n edge2 = tuneEdge(edge2,im2.shape)\n\n # samping from both side\n points1 = contour_sampling(edge1, num)\n points2 = contour_sampling(edge2, num)\n\n # for debugging .........................\n sam_im1 = np.zeros(im1.shape,np.float32)\n for point in points1:\n sam_im1[point[0],point[1]] = 1\n\n sam_im2 = np.zeros(im2.shape,np.float32)\n for point in points2:\n sam_im2[point[0],point[1]] = 1\n \n selem = disk(15)\n dilated1 = ndimage.convolve(sam_im1, selem, mode='constant', cval=0)\n dilated2 = ndimage.convolve(sam_im2, selem, mode='constant', cval=0)\n\n points1 = np.asarray(points1)\n points2 = np.asarray(points2)\n \n # Thin Plate Spline interpolation\n dst = np.zeros(im1.shape)\n # im1 as source\n if opt == 'py': \n tps = TPSpline.TPSpline()\n tps.setCorrespondences(points1, points2)\n dst = tps.warpImage(im1)\n return dst\n\n if opt == 'c':\n print \"Please run the interpolation with C++ exe file!\"\n print \"./TPSpline /home/yanbin/Tomosynthesis/libs/TPSpline/test/ps.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/pd.txt /home/yanbin/Tomosynthesis/libs/TPSpline/test/5016_test.tif /home/yanbin/Tomosynthesis/libs/TPSpline/test/dst.tif\"\n np.savetxt(outputPath + 'ps.txt', points1, '%d', delimiter=' ') # X is an array\n np.savetxt(outputPath + 'pd.txt', points2, '%d', delimiter=' ') # X is an array\n tiffLib.imsave(outputPath + 'im1.tif',im1)\n return None", "def concat_3dimages(imga, imgb, xoffset=0, yoffset=0, zoffset=0,\n transpose=True, ontop=True, center_offset=True,\n adjust_z=(0, 1)):\n if transpose:\n print(\"Transpose images\")\n imga = np.transpose(imga, axes=(0, 2, 1))\n imgb = np.transpose(imgb, axes=(0, 2, 1))\n\n max_dim = np.maximum.reduce([imga.shape, imgb.shape])\n\n center_a = np.array(np.divide(imga.shape, 2), dtype=int)\n center_b = np.array(np.divide(imgb.shape, 2), dtype=int)\n offset = (abs(zoffset), abs(yoffset), abs(xoffset))\n\n if center_offset:\n new_offset = np.subtract(center_a, np.add(center_b, offset))\n else:\n new_offset = np.array(offset)\n\n if (max_dim == imgb.shape).all():\n tmp = np.copy(imgb)\n imgb = np.copy(imga)\n imga = np.copy(tmp)\n ontop = toggle(ontop)\n xoffset *= -1\n yoffset *= -1\n zoffset *= -1\n\n new_offset[new_offset > 0] = 0\n center_new = np.array(np.divide(max_dim, 2), dtype=int)\n new_img = np.full(np.add(max_dim, np.abs(new_offset)), np.nan)\n\n Sa0 = slice(int(center_new[0] - imga.shape[0]/2 + 0.5),\n int(center_new[0] + imga.shape[0]/2 + 0.5))\n Sa1 = slice(int(center_new[1] - imga.shape[1]/2 + 0.5),\n int(center_new[1] + imga.shape[1]/2 + 0.5))\n Sa2 = slice(int(center_new[2] - imga.shape[2]/2 + 0.5),\n int(center_new[2] + imga.shape[2]/2 + 0.5))\n Sb0 = slice(int(center_new[0] + abs(zoffset) - imgb.shape[0]/2 + 0.5),\n int(center_new[0] + abs(zoffset) + imgb.shape[0]/2 + 0.5))\n Sb1 = slice(int(center_new[1] + abs(yoffset) - imgb.shape[1]/2 + 0.5),\n int(center_new[1] + abs(yoffset) + imgb.shape[1]/2 + 0.5))\n Sb2 = slice(int(center_new[2] + abs(xoffset) - imgb.shape[2]/2 + 0.5),\n int(center_new[2] + abs(xoffset) + imgb.shape[2]/2 + 0.5))\n\n xdir = np.sign(xoffset)\n ydir = np.sign(yoffset)\n zdir = np.sign(zoffset)\n\n if ydir == 0:\n ydir = 1\n if xdir == 0:\n xdir = 1\n if zdir == 0:\n zdir = 1\n\n imga = imga[::zdir, ::ydir, ::xdir]\n imgb = imgb[::zdir, ::ydir, ::xdir]\n\n if adjust_z:\n for ix in adjust_z:\n top_img = 1 * new_img[ix]\n top_img[Sa1, Sa2] = imga[ix]\n top_img[Sb1, Sb2] = imgb[ix]\n low_img = 1 * new_img[ix]\n low_img[Sb1, Sb2] = imgb[ix]\n low_img[Sa1, Sa2] = imga[ix]\n\n diff = top_img - low_img\n m = np.nanmean(diff)\n s = np.nanstd(diff)\n mask = np.abs(diff) < m + s\n diff[mask] = np.nan\n add = np.nanmean(diff)\n\n print(add)\n\n imgb[ix] -= add\n\n print(\"new_img shape: \", new_img.shape)\n\n if ontop:\n new_img[Sa0, Sa1, Sa2] = imga\n new_img[Sb0, Sb1, Sb2] = imgb\n else:\n new_img[Sb0, Sb1, Sb2] = imgb\n new_img[Sa0, Sa1, Sa2] = imga\n\n if transpose:\n print(\"Transpose back\")\n return np.transpose(new_img[::zdir, ::ydir, ::xdir], axes=(0, 2, 1))\n else:\n return new_img[::zdir, ::ydir, ::xdir]" ]
[ "0.7282096", "0.634349", "0.6294025", "0.6291031", "0.62548995", "0.62249696", "0.62239516", "0.6202705", "0.61410165", "0.6133832", "0.6100082", "0.608749", "0.6062438", "0.60573244", "0.6006357", "0.5999533", "0.59512717", "0.5941091", "0.59402233", "0.5899524", "0.58825576", "0.58824116", "0.5858907", "0.5797684", "0.5791337", "0.57752454", "0.5772077", "0.5762456", "0.57578975", "0.5754086" ]
0.70631665
1
rotates then stretches or compresses an image only along the xaxis
def transformImage(img, xfactor, angle=0, msg=False): if xfactor > 1.0: mystr = "_S" else: mystr = "_C" if msg is True: if xfactor > 1: apDisplay.printMsg("stretching image by "+str(round(xfactor,3))) else: apDisplay.printMsg("compressing image by "+str(round(xfactor,3))) ### image has swapped coordinates (y,x) from particles transMat = numpy.array([[ 1.0, 0.0 ], [ 0.0, 1.0/xfactor ]]) #print "transMat\n",transMat #apImage.arrayToJpeg(img, "img"+mystr+".jpg") stepimg = ndimage.rotate(img, -1.0*angle, mode='reflect') stepimg = apImage.frame_cut(stepimg, img.shape) #apImage.arrayToJpeg(stepimg, "rotate"+mystr+".jpg") newimg = ndimage.affine_transform(stepimg, transMat, mode='reflect') #apImage.arrayToJpeg(newimg, "last_transform"+mystr+".jpg") return newimg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self, previousimage):", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def _image_transform(self, img, source, title):\n conf = source.conf[title]\n \n xmin = conf.get('xmin', 0)\n ymin = conf.get('ymin', 0)\n\n xmax = img.shape[-1] + xmin\n ymax = img.shape[-2] + ymin\n if \"xmax\" in conf:\n if(conf['xmax'] <= xmin):\n logging.warning(\"xmax <= xmin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n xmax = conf['xmax']\n if \"ymax\" in conf:\n if(conf['ymax'] <= ymin):\n logging.warning(\"ymax <= ymin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n ymax = conf['ymax']\n\n \n translate_transform = QtGui.QTransform().translate(ymin, xmin)\n\n # The order of dimensions in the scale call is (y,x) as in the numpy\n # array the last dimension corresponds to the x.\n scale_transform = QtGui.QTransform().scale((ymax-ymin)/img.shape[-2],\n (xmax-xmin)/img.shape[-1])\n \n #rotate_transform = QtGui.QTransform()\n #if source.data_type[title] == 'image':\n # if \"angle\" in conf:\n # rotate_transform = QtGui.QTransform(numpy.cos(conf[\"angle\"]), numpy.sin(conf[\"angle\"]), -numpy.sin(conf[\"angle\"]), numpy.cos(conf[\"angle\"]), 0, 0)\n\n transpose_transform = QtGui.QTransform()\n if source.data_type[title] == 'image':\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n if(self.settingsWidget.ui.transpose.currentText() == 'Yes' or\n (self.settingsWidget.ui.transpose.currentText() == 'Auto' \n and \"transpose\" in conf)):\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n \n transform = scale_transform * translate_transform * transpose_transform\n #transform = scale_transform * translate_transform * rotate_transform * transpose_transform\n \n # print('|%f %f %f|' % (transform.m11(), transform.m12(), transform.m13()))\n # print('|%f %f %f|' % (transform.m21(), transform.m22(), transform.m23()))\n # print('|%f %f %f|' % (transform.m31(), transform.m32(), transform.m33()))\n return transform", "def _augment(img):\r\n return flip(img, axis=2)", "def rotateIMG(self):\n self.blit_image = pygame.transform.rotate(self.blit_image, self.blitHeading - 45)\n self.rect = self.blit_image.get_rect()\n self.rect.center = (int(self.pos.x), int(self.pos.y))", "def _augment(img):\n return flip(img, axis=2)", "def rotateImage(self):\n self.cnvImgOrig.rotate(\"./images/origPic.tiff\")\n self.cnvImgTest.rotate(\"./images/testPic.tiff\")", "def _spin(self):\n center= self.rect.center\n self.dizzy= self.dizzy + 10 #12\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original\n else:\n rotate= pygame.transform.rotate\n self.image= rotate(self.original, self.dizzy)\n self.rect= self.image.get_rect(center= center)", "def _spin(self):\n center = self.rect.center\n self.dizzy += 12 # rotate 12 degree clockwise\n\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original # reset the image to its original ones after rotated\n else:\n self.image = pygame.transform.rotate(self.original, self.dizzy)\n\n self.rect = self.image.get_rect()\n self.rect.center = center # make sure the image would not move when spinning", "def preprocess(self, image):\n if self.rotate == 0:\n return image\n\n angle = self.rotate * -90\n return image.rotate(angle, expand=True).crop((0, 0, self._w, self._h))", "def _rotate_image_90(self, img: ndarray, k: int) -> ndarray:\n if img.shape[0] < img.shape[1]:\n self.y = np.rot90(img, k)\n return self.y\n else:\n return img", "def flip(self, horizontally):\n\t\tself.currentPixbuf = self.currentPixbuf.flip(horizontally)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()", "def img_resize_rotate(file):\n orig_file = source_dir + file \n new_file = output_dir + file\n #print(\"source {} output {}\".format(orig_file, new_file))\n \n # Image.open('newauctionsheet.jpg').convert(mode=\"L\").show()\n img = Image.open( orig_file).resize((128,128)).rotate(90)\n new_img = img.convert('RGB').resize((128,128))\n new_img.save(output_dir + \"/\" + file, format=\"JPEG\")", "def rotate(self):\r\n self.rot = (self.vel.y * -3)\r\n if self.rot < -90:\r\n self.rot = -90\r\n \r\n new_image = pg.transform.rotate(self.bird_sprites[self.sprite_frame], self.rot)\r\n old_center = self.rect.center\r\n self.image = new_image\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n # self.animate()\r", "def adjust(self, image):\n ...", "def update(self):\r\n self.__calculate_position()\r\n self.__calculate_angle()\r\n self.image = pygame.transform.rotate(self.origin_image, self.angle)", "def image_rotation(x):\n rands = tf.truncated_normal([tf.shape(x)[0]], stddev=0.05)\n return images_rotate(x, rands, interpolation='BILINEAR')", "def rotate(self, angle):\n\t\tself.currentPixbuf = self.currentPixbuf.rotate_simple(angle)\n\t\tself.scaleCache[1] = 0\n\t\tgc.collect()\n\t\tself.autoScale()", "def rotate(self):\r\n # Rotate the image.\r\n self.image = pg.transform.rotozoom(self.orig_image, -self.angle, 1)\r\n # Rotate the offset vector.\r\n offset_rotated = self.offset.rotate(self.angle)\r\n print(\"offset_rotated:\", offset_rotated)\r\n # Create a new rect with the center of the sprite + the offset.\r\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)", "def mirrorImage(self):\n\n im = Image.open(self.ActivePhoto)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))\n print (\"Flipped image\")", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def zoom_augmentation():\n # Get the width and the height of the zoomed version\n x_len, y_len = np.random.randint(250, 350, size=2)\n # Get left upper ,right and lower bound of the pixels in the original image\n left = np.random.randint(x_size-x_len)\n upper = np.random.randint(y_size-y_len)\n right, lower = left + x_len, upper+y_len\n # Crops the box and resizes it to the original image size\n box = (left, upper, right, lower)\n return lambda image: image.transform(image.size, Image.EXTENT, box)", "def rotate(self):\n tmp = self.width\n self.width = self.height\n self.height = tmp\n self.rotated = not self.rotated", "def rotatedView(img, angle, enlarge=True, extend=Views.extendBorder):\n cx = img.dimension(0) / 2.0\n cy = img.dimension(1) / 2.0\n toCenter = AffineTransform2D()\n toCenter.translate(-cx, -cy)\n rotation = AffineTransform2D()\n # Step 1: place origin of rotation at the center of the image\n rotation.preConcatenate(toCenter)\n # Step 2: rotate around the Z axis\n rotation.rotate(radians(angle))\n # Step 3: undo translation to the center\n rotation.preConcatenate(toCenter.inverse())\n rotated = RV.transform(Views.interpolate(extend(img),\n NLinearInterpolatorFactory()), rotation)\n if enlarge:\n # Bounds:\n bounds = repeat((sys.maxint, 0)) # initial upper- and lower-bound values \n # for min, max to compare against \n transformed = zeros(2, 'f')\n for corner in product(*zip(repeat(0), Intervals.maxAsLongArray(img))):\n rotation.apply(corner, transformed)\n bounds = [(min(vmin, int(floor(v))), max(vmax, int(ceil(v))))\n for (vmin, vmax), v in zip(bounds, transformed)]\n minC, maxC = map(list, zip(*bounds)) # transpose list of 2 pairs\n # into 2 lists of 2 values\n imgRot = Views.zeroMin(Views.interval(rotated, minC, maxC))\n else:\n imgRot = Views.interval(rotated, img)\n return imgRot", "def TranslateX(img: Image, magnitude: float) -> Image:\n return img.transform(\n img.size,\n PIL.Image.AFFINE,\n (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),\n fillcolor=FILLCOLOR,\n )", "def draw_image(self):\n self.PDF.saveState()\n self.PDF.scale(1, -1)\n # self.PDF.drawImage(\n # LOGO, 490, -78, width=80, preserveAspectRatio=True, mask=\"auto\"\n # )\n self.PDF.restoreState()", "def image_augmentation(img):\n return np.fliplr(img)", "def shift_image(image, tx, ty, row_axis, col_axis, channel_axis, fill_mode='constant'):\n \n image = apply_affine_transform(image,\n tx=tx,\n ty=ty,\n row_axis=row_axis,\n col_axis=col_axis,\n channel_axis=channel_axis,\n fill_mode=fill_mode,\n cval=0.0,\n order=1)\n return image", "def _rotate(self, angle):\n self.rotatedImage = pygame.transform.rotozoom(self.needle, angle, 1.0)\n self.rotatedImageRectangle = self.rotatedImage.get_rect()\n\n # compensate for rotation of needle\n self.rotatedImageRectangle.center = (self.needlePos)\n self.rotatedImageRectangle.center += np.array([np.cos(math.radians(angle)) * self.needleOffset[0],\n -np.sin(math.radians(angle)) * self.needleOffset[1]])\n\n # blit images\n self._blit_images()", "def positionZoomed(self):\n rect_size = self._image.get_rect()\n\n if rect_size.width > 720:\n self.x = (int((720 - rect_size.width ) / 2))\n\n if rect_size.height > 720:\n self.y = (int((720 - rect_size.height) / 2))" ]
[ "0.65015787", "0.64518785", "0.6217382", "0.6127605", "0.60567856", "0.60441583", "0.6004752", "0.59420073", "0.59081256", "0.5899423", "0.58687973", "0.584643", "0.5801105", "0.57971156", "0.57939214", "0.5779098", "0.57559", "0.57457566", "0.5745267", "0.5711234", "0.5704889", "0.5703567", "0.56845725", "0.5680351", "0.56756824", "0.5671185", "0.56419826", "0.5638396", "0.5600632", "0.55943495" ]
0.6457804
1
Checks if name either starts with '/' or it is the last component of a stream.
def check_name(self, name: str): if name[0] == "/" or self.check_end_streaming(name): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_streaming(self, arg: str):\n if not arg:\n return False\n elif arg.startswith(\"sdo:\"):\n print(\"[check_streaming] File is for streaming\")\n tmp_list = arg.splitlines()\n tmp_list.pop(0)\n for x in tmp_list:\n if self.check_name(x) is False:\n return False\n return True\n else:\n return False", "def name_check(dirname):\r\n\tif dirname[-1] == \" \":\r\n\t\tdirname = dirname[:-1]\r\n\tif dirname[-1] != \"/\":\r\n\t\tdirname += \"/\"\r\n\treturn dirname", "def is_basic_name(name):\n if name is None:\n raise AdasDBError(\"Invalid name '%s'.\" % name)\n return name.find(PATH_SEPARATOR)", "def is_absolute_name(name):\n if name is None:\n raise AdasDBError(\"Invalid name '%s'.\" % name)\n return name.startswith(PATH_SEPARATOR)", "def is_path(path_or_stream):\n return isinstance(path_or_stream, anyconfig.compat.STR_TYPES)", "def validate_path_or_name(path_or_name: str, is_folder: bool=False) -> str:\n\n if is_folder:\n assert path_or_name.endswith('/')\n else:\n assert not path_or_name.endswith('/')\n\n return path_or_name", "def is_stream(self, stream_name: str) -> bool:\n return self._path_exist(stream_name=stream_name)", "def hasSuffix(self, s):\n node, off = self.followPath(s)\n if node is None:\n return False # fell off the tree\n if off is None:\n # finished on top of a node\n return '$' in node.out\n else:\n # finished at offset 'off' within an edge leading to 'node'\n return node.lab[off] == '$'", "def _check_name(self, symbol):\n if symbol.type == self.scanner.NAME:\n return True\n else:\n return False", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def names_singleton(self):\r\n if self.stream:\r\n return True\r\n else:\r\n return os.path.isfile(self.object_name)", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n return name.strip(\"<> \") in names_set", "def is_real_name(name):\n name = \"<\" + name.strip() + \">\"\n for real_name in names:\n if name == real_name:\n return True\n return False", "def is_filename(name):\n test = re.search(\"[A-Za-z0-9_-]+\\.xml$\", name)\n if test:\n return True\n else:\n return False", "def is_path(self, s):\n return True", "def is_node_name_ok(node_name):\n # 节点名不可包含`/`特殊字符\n node_name = node_name.strip('/')\n return node_name.find('/') == -1", "def names_container(self):\r\n if not self.stream:\r\n return os.path.isdir(self.object_name)\r\n else:\r\n return False", "def is_shortcut_name(name: str) -> bool:\n return name.count(config.name_separator) == 1", "def check_filename(basename):\n return len(basename) <= MAXIMUM_FILENAME_LENGTH", "def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None", "def command_basename_startswith(self, op):\n return self.__command_basename.startswith(op)", "def verify_name(name):\n try:\n if name.index(' '):\n return False\n except ValueError:\n return True", "def has_name(self):\n return self.unpack_word(0x2) != 0", "def _is_file_valid(name: str) -> bool:\n return not name.startswith(\".\")", "def test_invalid_stream_rename(self) -> None:\n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n stream = self.subscribe(user_profile, \"stream_name1\")\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n # Check for empty name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"\"})\n self.assert_json_error(result, \"Stream name can't be empty!\")\n # Check for long name\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"a\" * 61})\n self.assert_json_error(result, \"Stream name too long (limit: 60 characters).\")\n # Check for Cc characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\n\\rname\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")\n # Check for Cn characters\n result = self.client_patch(f\"/json/streams/{stream.id}\", {\"new_name\": \"test\\uFFFEame\"})\n self.assert_json_error(result, \"Invalid character in stream name, at position 5!\")", "def _validate_name(name):\r\n\tif HOST_NAME != name and len(name) > 0 and ZOOM_PHRASES[0] not in name and name not in WAITING_ROOM:\r\n\t\treturn True\r\n\treturn False", "def _check_name(self):\n\t\tpass", "def _is_valid_social_username(value):\n return '/' not in value", "def _assert_valid_name(name, container):\n container.file.name_validation(container.directory, name)" ]
[ "0.6286656", "0.61531717", "0.6124441", "0.6101799", "0.6014477", "0.598726", "0.58650726", "0.5787407", "0.57717556", "0.57680005", "0.5682801", "0.5670426", "0.5670426", "0.56299", "0.5595348", "0.55865085", "0.5574163", "0.55718523", "0.55560887", "0.5553138", "0.553181", "0.5497996", "0.5484245", "0.54822963", "0.54726017", "0.5453865", "0.5445702", "0.54154485", "0.5414466", "0.5406787" ]
0.81663287
0
Checks if the file is in the streaming format.
def check_streaming(self, arg: str): if not arg: return False elif arg.startswith("sdo:"): print("[check_streaming] File is for streaming") tmp_list = arg.splitlines() tmp_list.pop(0) for x in tmp_list: if self.check_name(x) is False: return False return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_streaming(self) -> bool:\n return self.stream_status == StreamStatus.STREAMING", "def is_stream(self):\r\n return self.stream", "def is_streaming(self):\n if self.dev:\n e = ctypes.POINTER(rs_error)()\n is_streaming = lrs.rs_is_device_streaming(self.dev, ctypes.byref(e))\n _check_error(e)\n return bool(is_streaming)\n else:\n return False", "def _HasStream(self, stream_name):\n try:\n file_object = self._zipfile.open(stream_name, 'r')\n except KeyError:\n return False\n\n file_object.close()\n return True", "def is_stream(self, stream_name: str) -> bool:\n return self._path_exist(stream_name=stream_name)", "def can_open_stream(self):\n return True", "def is_streaming_supported(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_streaming_supported\")", "def channel_is_streaming(self, channel_name = ''): \n \n self.get_stream(channel_name)\n stream_json = self.stream['stream']\n if stream_json is None:\n return False\n else:\n print(stream_json['channel']['name'])\n print(stream_json['game'])\n print(stream_json['viewers'])\n print(stream_json['created_at'])\n return True", "def is_open(self):\n return self.stream.is_open()", "def is_readable(self, content_type):\n return False", "def is_file(self):\n return self.type == \"file\"", "def is_file_parsable(cls, filepath, beginning=None):\n\n # TODO can we redirect the loc calls from here so they aren't shown\n # since we're just testing if parsable and an error message \n # would give the wrong impression\n # or restructure the way our logger works, maybe accept a logger in\n # the subsequent calls\n\n super().is_file_parsable(filepath)\n\n # MAGIC USAA doesn't use a header and the first line will do\n lines = [l for l in cls.yield_header(filepath, rows=1)]\n try:\n first_line = lines[0]\n except IndexError:\n logging.error(\"file line count is 0: %s\" % filepath)\n return False\n is_parsable = cls._run_parse_checks(first_line, filepath)\n\n # NOTE b/c USAA does not use a header, check a few properties of the data\n return is_parsable", "def detect(stream):\n try:\n parse(stream)\n return True\n except (xml.parsers.expat.ExpatError, TypeError):\n return False", "def parser_file(self, raw_file):\n try:\n with io.open(raw_file, 'r') as fd:\n #u_print(\" Processor.parser_file() - Streaming file {}\".format(raw_file))\n return self.parse_stream(fd)\n\n except IOError as e:\n u_print(\" ERROR I/O ({0}): {1}\".format(e.errno, e.strerror))\n return False\n\n except Exception as e:\n u_print(\" Unexpected error: \".format(e))\n raise", "def valid_video_file(file):\r\n return file.lower().endswith(('.ogg', '.webm'))", "def is_device_streaming(self, device_id):\n e = ctypes.POINTER(rs_error)()\n lrs.rs_get_device.restype = ctypes.POINTER(rs_device)\n dev = lrs.rs_get_device(self.ctx, device_id, ctypes.byref(e))\n _check_error(e)\n is_streaming = lrs.rs_is_device_streaming(dev, ctypes.byref(e))\n _check_error(e)\n return is_streaming", "def __check_for_video_file(self):\n formats = ('avi', 'mpg', 'mpeg', 'mp4')\n if os.path.splitext(self.args.input.name)[-1] in (\".%s\" % ext for ext in formats):\n # we got a valid (at least according to extension) file\n pass\n else:\n logging.critical(\"Input is not a video file. Only supports %s\" % \", \".join(formats))\n sys.exit(10)", "def readstream(self, stream):\n\n self._reset()\n\n try:\n # tokenize input stream\n self._lexer = SettingLexer()\n self._lexer.readstream(stream)\n\n # parse tokens into AST\n self._parse()\n return True\n\n except IOError:\n self._reset()\n return False", "def _is_livestream(url):\n # XXX: Not a very robust way for checking the source protocol.\n return urlparse(url).scheme.lower() == \"rtsp\"", "def can_be_transformed(\n self, object: Union[io.BufferedIOBase, io.RawIOBase, io.BytesIO, AnyPDFType]\n ) -> bool:\n return isinstance(object, Stream)", "def is_file(self):\n return self.tipo == 'file' or self.tipo is None", "def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False", "def _IsFile(self, file_message):\n message_type = file_message.message_type\n return (message_type == FileMessage.FILE_DOWNLOAD or\n message_type == FileMessage.FILE_UPLOAD or\n message_type == FileMessage.FILE_CLOUD_COPY or\n message_type == FileMessage.FILE_DAISY_COPY or\n message_type == FileMessage.FILE_LOCAL_COPY or\n message_type == FileMessage.FILE_REWRITE or\n message_type == FileMessage.FILE_HASH)", "def _check_stream_in_classic(self, fe_commit):\n if self._current_branch.stream_name:\n return\n\n depot_re = re.compile(r'^//([^/]+)/([^/]+)/.*$')\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_path(gwt_path).to_depot()\n m = depot_re.match(depot_path)\n if m:\n depot = m.group(1)\n if depot in self.stream_depots:\n stream = '//{}/{}'.format(m.group(1), m.group(2))\n human_msg = (\n _(\"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths in stream '{stream}' are read-only for branch '{b}'.\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , stream = stream\n , b = self._current_branch.branch_id ))\n raise PreflightException(human_msg)", "def isfile(line):\n return line and (line.strip()[:3] == \"FHS\" or isbatch(line))", "def closed(self):\n return self._stream is None", "def can_handle(file_io):\r\n\r\n try:\r\n file_io.seek(0)\r\n parsed = etree.parse(file_io)\r\n except XMLSyntaxError:\r\n # IF etree can't parse it, it's not our file.\r\n return False\r\n can_handle = False\r\n can_handle = DelXMLImporter._is_delicious_format(parsed,\r\n can_handle)\r\n\r\n # make sure we reset the file_io object so that we can use it again\r\n return can_handle", "def is_closed(self) -> bool:\n if __debug__:\n logger.debug(\"Checking if stream %s is closed\", str(self.id))\n\n # Ask for stream status\n req = StreamStatusRequest(self.id)\n DistroStreamClientHandler.request(req)\n\n req.wait_processed()\n error = req.get_error_code()\n if error != 0 and __debug__:\n logger.error(\"ERROR: Cannot retrieve stream status\")\n logger.error(\" - Internal Error Code: %s\", str(error))\n logger.error(\" - Internal Error Msg: %s\", str(req.get_error_msg()))\n\n return str2bool(req.get_response_msg())", "def maybe_text(instream):\n if instream.mode == 'w':\n # output binary streams *could* hold text\n # (this is not about the file type, but about the content)\n return True\n try:\n sample = instream.peek(_TEXT_SAMPLE_SIZE)\n except EnvironmentError:\n return None\n if set(sample) & set(_NON_TEXT_BYTES):\n logging.debug(\n 'Found unexpected bytes: identifying unknown input stream as binary.'\n )\n return False\n try:\n sample.decode('utf-8')\n except UnicodeDecodeError as err:\n # need to ensure we ignore errors due to clipping inside a utf-8 sequence\n if err.reason != 'unexpected end of data':\n logging.debug(\n 'Found non-UTF8: identifying unknown input stream as binary.'\n )\n return False\n logging.debug('Tentatively identifying unknown input stream as text.')\n return True", "def isFileObject(fileObj):\n if sys.version_info[0] == 2:\n return isinstance(fileObj, file)\n else:\n # for python 3:\n # has read() method for:\n # io.IOBase\n # io.BytesIO\n # io.StringIO\n # io.RawIOBase\n return hasattr(fileObj, 'read')" ]
[ "0.7130998", "0.710802", "0.6719451", "0.6635441", "0.6616881", "0.6497147", "0.6312404", "0.6073387", "0.60378087", "0.5994497", "0.5983883", "0.5969729", "0.5962305", "0.5950484", "0.5886839", "0.5875284", "0.58541244", "0.58405656", "0.58238244", "0.5818902", "0.57951295", "0.57700735", "0.5763222", "0.5737106", "0.5727938", "0.5714931", "0.57057923", "0.569859", "0.56964505", "0.5678413" ]
0.76708
0
Checks if interest is in the buffer and returns the content object if it is.
def check_buffer(self, interest_name: str): if str(interest_name) in self.get_next_buffer: return self.get_next_buffer[str(interest_name)] else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def has_content(self):\n raise NotImplementedError()", "def fetch_bmark_content(bid):\r\n trans = transaction.begin()\r\n\r\n if not bid:\r\n raise Exception('missing bookmark id')\r\n bmark = Bmark.query.get(bid)\r\n if not bmark:\r\n raise Exception('Bookmark not found: ' + str(bid))\r\n hashed = bmark.hashed\r\n\r\n try:\r\n read = ReadUrl.parse(hashed.url)\r\n except ValueError:\r\n # We hit this where urllib2 choked trying to get the protocol type of\r\n # this url to fetch it.\r\n logger.error('Could not parse url: ' + hashed.url)\r\n logger.error('exc')\r\n read = None\r\n\r\n if read:\r\n logger.debug(read)\r\n logger.debug(read.content)\r\n\r\n logger.debug(\"%s: %s %d %s %s\" % (\r\n hashed.hash_id,\r\n read.url,\r\n len(read.content) if read.content else -1,\r\n read.is_error(),\r\n read.status_message))\r\n\r\n if not read.is_image():\r\n if not bmark.readable:\r\n bmark.readable = Readable()\r\n\r\n bmark.readable.content = read.content\r\n else:\r\n if not bmark.readable:\r\n bmark.readable = Readable()\r\n bmark.readable.content = None\r\n\r\n # set some of the extra metadata\r\n bmark.readable.content_type = read.content_type\r\n bmark.readable.status_code = read.status\r\n bmark.readable.status_message = read.status_message\r\n trans.commit()\r\n fulltext_index_bookmark.delay(\r\n bid,\r\n read.content if read else None)\r\n else:\r\n logger.error(\r\n 'No readable record for bookmark: ',\r\n str(bid), str(bmark.hashed.url))\r\n\r\n # There was a failure reading the thing.\r\n bmark.readable = Readable()\r\n bmark.readable.status = '900'\r\n bmark.readable.status_message = (\r\n 'No readable record '\r\n 'during existing processing')\r\n trans.commit()", "def hasContents():", "def get_content_object(self):\r\n return self.content_object", "def test_given_content(self):\r\n\r\n file_path = os.path.dirname(__file__)\r\n html_content = open(os.path.join(file_path, 'readable_sample.html'))\r\n\r\n read = ReadContent.parse(html_content)\r\n\r\n self.assertTrue(\r\n read.status == 1, \"The status is 1: \" + str(read.status))\r\n self.assertTrue(not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(read.content is not None, \"Content should not be none\")\r\n self.assertTrue(\r\n 'Bookie' in read.content,\r\n u\"The word Bookie is in the content: \" + unicode(read.content))", "def check_for_correct_content(self, content_object: Content, content_name: str):\n if isinstance(content_name, Name):\n content_name = content_name.to_string() # inner comp is a name instead of a string\n # outter comp starts with sdo:\\n\n elif content_name.startswith(\"sdo:\\n\"):\n content_name = content_name[5:]\n\n content_object_name_as_string = content_object.name.components_to_string()\n if content_name == content_object_name_as_string:\n return True\n else:\n # Content is not from requested interest\n # if content is from this computation, store in buffer else put in self.queue_from_lower\n if content_object_name_as_string in self.sent_interests:\n self.get_next_buffer[content_object_name_as_string] = content_object\n else:\n self.queue_from_lower.put(content_object)\n return False", "def get_contents(self):\n return self.contents", "def has_contents(self):\n return len(self.byteruns())>0", "def read_content(self):\n pass", "def getContent(self) -> object:\n ...", "def get_content_from_queue_from_lower(self):\n queue_from_lower_entry = self.queue_from_lower.get()\n if isinstance(queue_from_lower_entry, list):\n if isinstance(queue_from_lower_entry[1], Nack):\n print(\"NACK:\", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason)\n return queue_from_lower_entry[1]\n else:\n if isinstance(queue_from_lower_entry, Nack):\n print(\"NACK:\", queue_from_lower_entry.interest, queue_from_lower_entry.reason)\n return queue_from_lower_entry", "def _getContent(self, excludeRead=False, continuation=None, loadLimit=20, since=None, until=None):\r\n return None", "def get_content_object(self, **kwargs):\r\n return self.content_object", "def _read_into_buffer(self):\n raise NotImplementedError()", "def get_content(self):\n return self.content", "def get_contents(self):\n with closing(self.open()) as handle:\n return handle.read()", "def is_readable(self, content_type):\n return False", "def get_at_content(gc):\n return 1 - gc", "def get_at_content(gc):\n return 1 - gc", "def __contains__(self, item):\n return item in self.contents", "def contentRaw(request):\n paste = Paste.get(request.matchdict['idContent'])\n # TODO type/mime\n return paste.content", "def decode_content(self, raw_content):\n try:\n obj = pickle.loads(raw_content)\n return obj\n except Exception:\n raise IkatsException(\"Failed to load picked object. Context={}\".format(str(self)))", "def _retrieve_blob(self, object_key):\n return self.s3_resource.Object(self.CVE_BUCKET, object_key).get()['Body'].read()", "def getContent(self):\n current_content = self.content\n return current_content", "def peek(self):\n if len(self) == 0:\n if self.none_for_empty:\n return None\n raise ValueError(\"Buffer is empty\")\n return self.buf[self.front]", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def __iter__(self):\n return self.contents.__iter__()", "def __iter__(self):\n return self.contents.__iter__()", "def get_content(self):\n return self.__response.content" ]
[ "0.611804", "0.5382398", "0.53668123", "0.53386456", "0.5272251", "0.51945955", "0.5134532", "0.51309985", "0.5036701", "0.501785", "0.49557382", "0.4912878", "0.4908423", "0.48984125", "0.48739424", "0.4873825", "0.48655587", "0.48433942", "0.4822918", "0.4822918", "0.48201054", "0.481292", "0.480439", "0.47856092", "0.47784394", "0.4761647", "0.47616184", "0.47590804", "0.47590804", "0.4749601" ]
0.5786912
1
Checks if the content from content_object corresponds to the interest requested with content_name. If the content is not relevant for this computation it is put to self.queue_from_lower again.
def check_for_correct_content(self, content_object: Content, content_name: str): if isinstance(content_name, Name): content_name = content_name.to_string() # inner comp is a name instead of a string # outter comp starts with sdo:\n elif content_name.startswith("sdo:\n"): content_name = content_name[5:] content_object_name_as_string = content_object.name.components_to_string() if content_name == content_object_name_as_string: return True else: # Content is not from requested interest # if content is from this computation, store in buffer else put in self.queue_from_lower if content_object_name_as_string in self.sent_interests: self.get_next_buffer[content_object_name_as_string] = content_object else: self.queue_from_lower.put(content_object) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def __continas__ (self, name):\n return name in self.containments", "def __call__(self, q: str = \"\"):\n if q:\n return self.fixed_content in q\n return False", "def _contains_in_self_or_parent(self, name: str) -> bool:\n return name in self", "def is_object_recognition_appropriate(self, name):\n if name not in self.objecttime or (time() - self.objecttime[name] > self.OBJECT_TIMEOUT):\n self.objecttime[name] = time()\n return True", "def already_processed(self, obj):\n\n key = self.get_cache_key(obj)\n return self.simple_cache.has_key(key)", "def check_buffer(self, interest_name: str):\n if str(interest_name) in self.get_next_buffer:\n return self.get_next_buffer[str(interest_name)]\n else:\n return False", "def __contains__(self, name):\n\n return name in self._wdict", "def __contains__(self, item):\n return item in self.contents", "def is_applicable(self, obj: _TransitionTrigger) -> bool:\n return obj.src.startswith(self.src)", "def _is_reserved_name(content_name: str) -> bool:\n return content_name in RESERVED_NAMES", "def search(self, word):\n length = len(word)\n if length == 1:\n for letter in string.ascii_lowercase:\n key = \"{}/{}\".format(1, letter)\n if key in self.origin and letter != word:\n return True\n return False\n\n key = \"{}/{}\".format(len(word), word[0])\n ls = self.origin.get(key, [])\n if len(ls) == 0:\n return False\n\n for origin in ls:\n if self.only_modify_one_char(word, origin):\n return True\n return False", "async def contains(self, ctx, *text):\n search = 100\n if text[-1].isdigit():\n text, search = text[:-1], int(text[-1])\n await self.run_purge(\n ctx, search, lambda m: \" \".join(text).casefold() in m.content.casefold()\n )", "def get_content_from_queue_from_lower(self):\n queue_from_lower_entry = self.queue_from_lower.get()\n if isinstance(queue_from_lower_entry, list):\n if isinstance(queue_from_lower_entry[1], Nack):\n print(\"NACK:\", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason)\n return queue_from_lower_entry[1]\n else:\n if isinstance(queue_from_lower_entry, Nack):\n print(\"NACK:\", queue_from_lower_entry.interest, queue_from_lower_entry.reason)\n return queue_from_lower_entry", "def __contains__(self, name):\n return name in self._tasks", "def _only_translate_if_referenced(self, object_: SituationObject) -> bool:\n return (\n object_.ontology_node == GROUND\n or object_.ontology_node == LEARNER\n or IS_SPEAKER in object_.properties\n or IS_ADDRESSEE in object_.properties\n )", "def __contains__(self, name):\r\n name = OrderBy(name).bare\r\n for order_by in self:\r\n if order_by.bare == name:\r\n return True\r\n return False", "def contains(name):", "def __contains__(self, rq):\n return rq in self._data", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def contained(name, data): # noqa: N805", "def __contains__(self, name: str) -> bool:\n ...", "def __contains__(self, query):\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n if query in self._words:\n return True\n elif query.lower() in self._words:\n return True\n else:\n return False", "def check_for_metatitle(self, interest_name: str):\n if interest_name.endswith(\"/streaming/p*\"):\n return True\n else:\n return False", "def is_like(self, q):\n q = q.lower()\n return q in self.title.lower() or q in self.url.lower() or q in self.media_type.lower()", "def __eq__(self, other):\n contentsmatchfail = False\n equal = False\n for i in self.contents:\n if i in other.contents:\n pass\n else:\n contentsmatchfail = True\n for i in other.contents:\n if i in self.contents:\n pass\n else:\n contentsmatchfail = True\n if self.name == other.name and self.name == other.name and contentsmatchfail == False:\n equal = True\n return equal", "def file_content_has_ransom_keywords(file_content):\n blacklisted_keywords = keywords_pattern()\n words = re.findall(re.compile('\\w+'), file_content.lower())\n blacklisted_words = blacklisted_keywords.intersection(words)\n if len(blacklisted_words) > len(blacklisted_keywords) / 2:\n return True\n else:\n return False", "def filter ( self, name, context ):\n return (name == self.name_last)", "def __can_add_entry_by_name(self, _wiki_entry):\n for ename in self.__exclude_name_list:\n if(_wiki_entry.find(ename) >= 0):\n return False\n return True", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False" ]
[ "0.5501636", "0.516063", "0.4942079", "0.48911357", "0.48543528", "0.48305845", "0.48142743", "0.47831565", "0.47767526", "0.47362074", "0.47147942", "0.464042", "0.4610563", "0.45970887", "0.45742327", "0.4517563", "0.4517236", "0.45113727", "0.44908625", "0.44677654", "0.44617477", "0.4425103", "0.44187158", "0.44050232", "0.44002184", "0.4399347", "0.43979216", "0.43741697", "0.43662405", "0.4356921" ]
0.8077127
0
Checks if a metatitle is present (ends with '/streaming/p').
def check_for_metatitle(self, interest_name: str): if interest_name.endswith("/streaming/p*"): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False", "def istitle(self) -> bool:\n pass", "def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True", "def handle_title(self, tag, attrs):\n self.title = 'present'", "def validateTitle(title):\n \n if not(title) or not(title.strip()):\n return \"You must supply a title.\"\n else:\n return None", "def check_story_exists(self) -> bool:\n title_check = self._soup.find(\"title\").string\n if title_check == u'FicWad: fresh-picked original and fan fiction':\n return False\n return True", "def is_bad_title(title):\n bad_examples = [\"under construction\", \"test page\", \"redirect\", \"index of\", \"none \", \"expired\", \"coming soon\",\n \"error \", \"domain pending\", \"at directnic\", \"pending validation\", \"website disabled\",\n \"US Zip Code Information\", # verified we need this, urls like 00000.us, 00001.us end up at zipcode.com\n \"domain default page\", \"non-existent domain\", \"v-webs hosting services\",\n \"be back soon\", \"something went wrong\", \"Lunarpages Web Hosting Placeholder Page\",\n \"Félicitations ! Votre domaine a bien été créé chez OVH !\", \"Domaine r&eacute;serv&eacute;\",\n \" - For Sale | Undeveloped\", \"Yahoo&#39;s Aabaco Small Business: Websites, Ecommerce, Email &amp; Local Listings\",\n \"service unavailable\", \"website disabled\", \"404 Not Found\", \"Not Found\", \"Page cannot be found\"\n ]\n for bad_title in bad_examples:\n if bad_title.lower() in title.lower():\n debug(bad_title)\n return hit(bad_title)\n\n exact_matches = [\"web hosting\", \"webhosting\"]\n for ma in exact_matches:\n if title.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").lower() == ma:\n debug(ma)\n return hit(ma)\n return False", "def song_has_lyrics():\n pass", "def getTitle(self):\n\n # print(self.soupObject.title.string)\n try:\n s = self.soupObject.find(\"meta\", attrs={\"name\": \"twitter:title\"})\n self.title = str(s['content'])\n self.title = self.title.replace(\"/\", \"\")\n self.title = self.title.strip()\n if not self.title:\n s = int(\"deliberateError\")\n\n # except\n except:\n self.title = \"Amazonsubtitles\"\n\n pass", "def dict_is_song(info_dict):\n if \"full album\" in info_dict[\"title\"].lower():\n return False\n if int(info_dict[\"duration\"]) > 7200:\n return False\n return True", "def has_metadata(self):\n if self.mimetype in Config.mimes_metadata:\n return True\n return False", "def _validate_title(self, attribute: attr.Attribute, value: str):\n\n if not isinstance(value, str) or len(value) <= 0:\n raise ValueError(\n f\"Window title must be a non-empty string, received {value!r}\"\n )", "def entry_has_keyword(keyword):\n\tdef filter_function(entry):\n\t\tif entry is None:\n\t\t\treturn False\n\t\ttitle = entry.get('title')\n\t\tif title is None:\n\t\t\treturn False\n\t\tif entry.get('issued') is None:\n\t\t\treturn False\n\t\treturn title.find(keyword) > -1\n\treturn filter_function", "def has_meta(self, meta_name):\n return self.has_meta_class(meta_name) or \\\n self.has_meta_function(meta_name)", "def verifyPageTitle(self, titleToVerify):\n try:\n actualTitle = self.getTitle()\n return self.util.verifyTextContains(actualTitle, titleToVerify)\n except:\n self.log.error(\"Failed to get page title\")\n print_stack()\n return False", "def has_story_title(self, title, stories):\n\tfor story in stories:\n\t break\n\telse:\n\t self.fail(\"Story with title '%s' not found\" % title)", "def has_story_title(self, title, stories):\n\tfor story in stories:\n\t break\n\telse:\n\t self.fail(\"Story with title '%s' not found\" % title)", "def is_subtitle(self):\n val = False\n if self.__dict__['codec_type']:\n if self.__dict__['codec_type'] == 'subtitle':\n val = True\n return val", "def SongTitle( path ):\n p = subprocess.Popen( ['ffprobe',path], stderr=subprocess.PIPE )\n\n output = p.communicate()[1].decode()\n if 'Invalid data found' in output:\n return None\n\n # find the first occurance of \"title : stuff\" with any number of spaces.\n res = re.search( r'title\\s+:\\s+([a-zA-Z0-9,\\(\\) ]+)', output )\n\n if res is None:\n return \"\"\n\n ret = res.group(1)\n\n return ret", "def is_subtitle(file):\n return file.lower().endswith(SUBTITLE_EXTENSIONS)", "def titleValidator(self, title):\n if type(title) != str:\n API.abort(400, error_messages[11]['Int_title'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_]+$)\", title) or title.isspace():\n API.abort(\n 400, error_messages[12]['wrong_format_title'])\n\n return True", "def contained_in_title(word, filename):\n title = get_title(filename)\n if word in title:\n return True\n else:\n return False", "def _get_youtube_data_name(self):\n data = search(self.name, self.disable_kw)\n\n # Handle if the data returned is None\n # That probably means the song wasn't found on YT\n # and we need to skip playing that song.\n if data is None:\n return False\n\n self.title = data.title\n self.URL = data.url\n self.stream_url = grab_link(data.url)\n return True", "def check_streaming(self, arg: str):\n if not arg:\n return False\n elif arg.startswith(\"sdo:\"):\n print(\"[check_streaming] File is for streaming\")\n tmp_list = arg.splitlines()\n tmp_list.pop(0)\n for x in tmp_list:\n if self.check_name(x) is False:\n return False\n return True\n else:\n return False", "def get_video(self):\n if self.parsing_template.video and self.parsing_template.video in self.headline.url:\n return True\n return False", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def test_extract_twitter_tittle_meta_00(input_, expected):\n actual = regex.match_twitter_title_meta(input_)\n assert actual == expected", "def is_manifest(location):\n return as_posixpath(location).lower().endswith('meta-inf/manifest.mf')", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE" ]
[ "0.6491945", "0.63269085", "0.63049346", "0.60652703", "0.5991051", "0.5728946", "0.57284164", "0.56637734", "0.56167585", "0.5610711", "0.55762154", "0.55655116", "0.5517774", "0.55089664", "0.5489789", "0.54672796", "0.546499", "0.546499", "0.5458705", "0.5458184", "0.5450383", "0.54456097", "0.5422919", "0.5406029", "0.5385906", "0.53737205", "0.5370091", "0.5361117", "0.53605187", "0.53466845" ]
0.8413392
0
Gets the negative amount of digits after the '/streaming/p' on the single name.
def get_amount_of_digits(self, name: str): x = -1 while name[x - 1].isdigit(): x -= 1 if name[:x].endswith("/streaming/p"): return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order", "def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0", "def get_following_name(self, name: Name):\n name = str(name)\n amount_of_digits = self.get_amount_of_digits(name)\n number = int(name[amount_of_digits:])\n number += 1\n following_name = name[:amount_of_digits]\n following_name += str(number)\n return following_name", "def _weight_name(self, name):\n return name.split('/')[-1]", "def _GetLastStreamNumber(self, stream_name_prefix):\n last_stream_number = 0\n for stream_name in self._GetStreamNames():\n if stream_name.startswith(stream_name_prefix):\n _, _, stream_number = stream_name.partition('.')\n\n try:\n stream_number = int(stream_number, 10)\n except ValueError:\n raise IOError(\n 'Unsupported stream number: {0:s}'.format(stream_number))\n\n if stream_number > last_stream_number:\n last_stream_number = stream_number\n\n return last_stream_number + 1", "def get_suffix(self):\n if self.device:\n return self.device\n else:\n return '%s%d' % (self.devletters(), self.get_index() + 1)", "def info_player_id(self, playername):\r\n number = 0\r\n name = playername.title().replace(\" \", \"+\")\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/team_news.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://stats.comunio.es/search.php?name=' + playername, headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n for i in soup.find_all('a', {'class', 'nowrap'}):\r\n number = re.search(\"([0-9]+)-\", str(i)).group(1)\r\n break # Solo devuelve la primera coincidencia\r\n return number", "def namelength(self):\n return self[\"namelength\"]", "def get_suffix(self):\n return '%s%d' % (self.disk.devletters(), self.get_index() + 1)", "def parse_num(path):\n nbasename = path.basename.lower()\n if nbasename.startswith(nprefix):\n try:\n return int(nbasename[len(nprefix) :])\n except ValueError:\n pass", "def getLength(string):\n return (0)", "def getNameIndex(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n index = int(name[location:])\n except Exception:\n index = 0\n return index", "def _get_freq(name):\n try:\n counts = int(name.split(\"_x\")[1])\n except:\n return 0\n return counts", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def namelengthsrc(self):\n return self[\"namelengthsrc\"]", "def get_string_length(self):\n return int(self.read('H')[0])", "def trimname(name):\n while name[-1].isdigit():\n name = name[:-1]\n if name[-1] == '_':\n name = name[:-1]\n return name", "def pname(name):\n ranks = list(reversed(name.split(';')))\n for i, rank in enumerate(ranks):\n if rank in ['Others', 'Unassigned']:\n return rank\n if rank == '__':\n continue\n if rank.split('__')[1] is '':\n return ranks[i+1] + ';' + rank\n return rank", "def parse_duration_level(f):\n stem = Path(f).stem\n return stem.split(\"_\")[2]", "def acquisition_number_of(path_to_func_or_anat_or_json: Path) -> str:\n\n return path_to_func_or_anat_or_json.stem.split(\"_\")[-1].zfill(2)", "def get_accession_number(url):\n return url.split(\"/\")[-1]", "def last_segment(account_name):\n return account_name.split(':')[-1]", "def number(self):\n return re.match(r'^.*?([0-9]+)$', self._alias).groups()[0]", "def dot_printname(self):\n return self.printname.split('/')[0].replace('-', '_')", "def get_part_video_num(num):\r\n if len(str(num)) == 1:\r\n return '00' + str(num)\r\n elif len(str(num)) == 2:\r\n return '0' + str(num)\r\n else:\r\n return str(num)", "def CountSoftHyphen(url):\r\n return url.count('-')", "def order(name: str):\n if name.startswith('pred'):\n split = name.split('_')\n if len(str(split[-2])) > 10: # New file format, -2 is hash\n return int(split[-3])\n return int(split[-2])\n split = name.split('_')\n x = split[-1].split('.')[0]\n return int(x)", "def minor(self):\n return self.normalized().split('/', 1)[1]", "def leading(self, string: str) -> int:\n leading_amount = 0\n while string[leading_amount] == \"-\":\n leading_amount += 1\n return leading_amount", "def numeric_part(self) -> str:\n if self.is_old_style:\n return self.split('/')[1]\n return str(self)" ]
[ "0.6315995", "0.6267976", "0.5928007", "0.58780754", "0.5774592", "0.5626339", "0.5619285", "0.559705", "0.5593243", "0.55860585", "0.5539551", "0.55384886", "0.5524113", "0.5524029", "0.55239666", "0.55148023", "0.5513144", "0.5452538", "0.5429969", "0.5423451", "0.54014844", "0.5392005", "0.5379358", "0.53414136", "0.5328008", "0.5321838", "0.5281249", "0.5281063", "0.5262465", "0.5260374" ]
0.8547603
0
Gets the name for the next part with help of the negative amount of digits (get_amount_of_digits).
def get_following_name(self, name: Name): name = str(name) amount_of_digits = self.get_amount_of_digits(name) number = int(name[amount_of_digits:]) number += 1 following_name = name[:amount_of_digits] following_name += str(number) return following_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_partname(self):\n return '{0:03.0f}{1}'.format(self.lon1, self.part)", "def get_suffix(self):\n return '%s%d' % (self.disk.devletters(), self.get_index() + 1)", "def find_next_name(self, etfName):\n etfName = etfName.split('-')[0]\n max_n = max(list(map(lambda x: int(x.split('-')[1]) if x.split('-')[0] == etfName else 0, self.etfs.keys())))\n return etfName + '-' + str(max_n + 1)", "def _next_partname(self):\n partname_str = '/ppt/slides/slide%d.xml' % (len(self)+1)\n return PackURI(partname_str)", "def getName(self):\n\n if self.vel == 0:\n return \"Rest\"\n\n order = self.nbr % 12 # Position in octave (Ex: 0 - C, 1 - C#,...)\n return ['C', 'C#/Db', 'D', 'D#/Eb',\n 'E', 'F', 'F#/Gb', 'G',\n 'G#/Ab', 'A', 'A#/Bb', 'B'][order]", "def incremental_part(self) -> int:\n if self.is_old_style:\n return int(self.numeric_part[4:])\n return int(self.split('.', 1)[1])", "def get_amount_of_digits(self, name: str):\n x = -1\n while name[x - 1].isdigit():\n x -= 1\n if name[:x].endswith(\"/streaming/p\"):\n return x", "def get_xml_part_name(xml_score_part):\n part_name_node = xml_score_part.find(\"part-name\")\n if part_name_node is not None:\n if part_name_node.get(\"print-object\", \"\") == \"no\":\n return \"\"\n return part_name_node.text if part_name_node is not None else \"\"", "def _getName(self):\n return self.id().split('.')[-2]", "def getDuPart(du):\n \n regex = \"z0(0|1)du(\\d+)\\(DB\\-SL\\-MSL\\-CH\\-SCH\\)\"\n \n try:\n m = re.search(regex,du)\n # this should be called zone,but calling it partition\n part = m.group(1)\n du = m.group(2)\n \n \n \n except:\n \n trace_error(\"Couldn't parse du str '%s' with regex '%s'\"%(du,regex))\n raise ParseError\n \n #if du.startswith('0\\d'):\n #du = du.lstrip('0')\n if re.search(\"0(\\d+)\",du):\n m = re.search(\"0(\\d+)\",du)\n du = m.group(1)\n \n return part + du", "def numeric_part(self) -> str:\n if self.is_old_style:\n return self.split('/')[1]\n return str(self)", "def dot_name(number):\n\tif number > 0:\n\t\treturn \"P {}\".format(number)\n\telse:\n\t\treturn \"O {}\".format(-number)", "def name_number(n):\n num = str(n)\n last_digits = [\"\", \"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\", \"eleven\",\n \"twelve\", \"thirteen\", \"fourteen\", \"fifteen\", \"sixteen\", \"seventeen\", \"eighteen\", \"nineteen\"]\n tens = [\"twenty\", \"thirty\", \"forty\", \"fifty\", \"sixty\", \"seventy\", \"eighty\", \"ninety\"]\n if n <= 19: # name numbers from 1 to 19 directly\n c = last_digits[int(num[-2:])]\n elif 19 < n < 100: # name numbers from 20 to 99 by concatenating name for tens digit with name for ones digit\n c = tens[int(num[-2]) - 2] + last_digits[int(num[-1])]\n elif n % 100 == 0 and n < 1000:\n # name numbers from 100 to 900 that are divisible by 100 by taking number in hundreds place and concatenating\n # with \"hundred\"\n c = last_digits[int(num[0])] + \"hundred\"\n elif n != 1000:\n # name numbers from 100 to 999 excluding those divisible by 100 by taking number in hundreds place, adding\n # \"hundredand\", then naming the other 2 decimal places recursively\n c = last_digits[int(num[0])] + \"hundredand\" + name_number(n % 100)\n else: # name 1000\n c = \"onethousand\"\n return c", "def get_name() -> str:", "def pname(name):\n ranks = list(reversed(name.split(';')))\n for i, rank in enumerate(ranks):\n if rank in ['Others', 'Unassigned']:\n return rank\n if rank == '__':\n continue\n if rank.split('__')[1] is '':\n return ranks[i+1] + ';' + rank\n return rank", "def numeric_part(self) -> str:\n return self.arxiv_id.numeric_part", "def get_suffix(self):\n if self.device:\n return self.device\n else:\n return '%s%d' % (self.devletters(), self.get_index() + 1)", "def get_part_video_num(num):\r\n if len(str(num)) == 1:\r\n return '00' + str(num)\r\n elif len(str(num)) == 2:\r\n return '0' + str(num)\r\n else:\r\n return str(num)", "def get_name(self):\n return self.id.split('.')[-1:][0]", "def divider(name=\"\", char=\"-\", n=75, func=info, **keys):\n if name:\n n2 = (n - len(name) - 2) // 2\n func(char*n2, name, char*n2, **keys)\n else:\n func(char*n, **keys)", "def _PartName(self,partindex,nparts):\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\n\t\tp = '%d_%d' % (partindex, nparts * nparts - 1)\n\t\tfd,name = mkstemp(prefix = p, suffix = '.image')\n\t\tos.close(fd)\n\t\tself.result = name\n\t\treturn name", "def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0", "def __next_ph_name(self, ph_type, id, orient):\n basename = slide_ph_basenames[ph_type]\n # prefix rootname with 'Vertical ' if orient is 'vert'\n if orient == PH_ORIENT_VERT:\n basename = 'Vertical %s' % basename\n # increment numpart as necessary to make name unique\n numpart = id - 1\n names = self.__spTree.xpath('//p:cNvPr/@name', namespaces=_nsmap)\n while True:\n name = '%s %d' % (basename, numpart)\n if name not in names:\n break\n numpart += 1\n # log.debug(\"assigned placeholder name '%s'\" % name)\n return name", "def word(self):\n return str(self.name.names[-1])", "def get_name():", "def getNameIndex(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n index = int(name[location:])\n except Exception:\n index = 0\n return index", "def _weight_name(self, name):\n return name.split('/')[-1]", "def partname(self):\n assert self.__partname, \"BasePart.partname referenced before assigned\"\n return self.__partname", "def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)", "def last_segment(account_name):\n return account_name.split(':')[-1]" ]
[ "0.6544332", "0.63715714", "0.61771786", "0.61768734", "0.6149106", "0.60808223", "0.6070014", "0.6048259", "0.6003679", "0.59391767", "0.58499116", "0.5833618", "0.58285874", "0.5824863", "0.58153534", "0.58108354", "0.57850534", "0.577331", "0.57347023", "0.57323456", "0.5718484", "0.57002866", "0.56960666", "0.56825906", "0.5664025", "0.56526226", "0.5644143", "0.5644026", "0.56009734", "0.5599014" ]
0.6799442
0
Gets content from the queue from lower and checks if the result is a list with the packetid on the first entry and the content object on the second entry
def get_content_from_queue_from_lower(self): queue_from_lower_entry = self.queue_from_lower.get() if isinstance(queue_from_lower_entry, list): if isinstance(queue_from_lower_entry[1], Nack): print("NACK:", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason) return queue_from_lower_entry[1] else: if isinstance(queue_from_lower_entry, Nack): print("NACK:", queue_from_lower_entry.interest, queue_from_lower_entry.reason) return queue_from_lower_entry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def GetQueueList(handler, query):\n json_config = {}\n if 'TiVo' in query:\n tivoIP = query['TiVo'][0]\n with active_tivos_lock:\n if tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['urls'] = [ status['url'] for status in active_tivos[tivoIP]['queue'] ]\n\n handler.send_json(json.dumps(json_config))", "def get_msg_by_content(self, content):\n msg_data = self.database.search(self.tname, MsgWithTag.get_msg_key(), content)\n if len(msg_data) != 0:\n return (msg_data[0][0], self.data_to_msg(msg_data[0]))\n return None", "def _get_data(self):\n while True:\n # self.logger.debug(\"data queue size is: {}\".format(len(self._dataqueue)))\n ans = self._parser.find_first_packet(self._dataqueue[:])\n if ans:\n self._dataqueue = ans[1]\n # self.logger.debug(\"found packet of size {}\".format(len(ans[0])))\n return ans[0]\n else:\n # self.logger.debug(\"Could not find packet in received data\")\n tmp = self.conn.recv(1024)\n self._dataqueue += tmp", "def check_packet_queue(self, queue, out):\n time.sleep(2)\n if queue == \"all\":\n self.verify(\"Queue= 0\" in out and \"Queue= 1\" in out and \"Queue= 2\" in out and \"Queue= 3\" in out,\n \"There is some queues doesn't work.\")\n elif queue == \"0\":\n self.verify(\"Queue= 0\" in out and \"Queue= 1\" not in out and \"Queue= 2\" not in out and \"Queue= 3\" not in out,\n \"RSS is enabled.\")\n lines = out.split(\"\\r\\n\")\n reta_line = {}\n queue_flag = 0\n packet_sumnum = 0\n # collect the hash result and the queue id\n for line in lines:\n line = line.strip()\n if queue_flag == 1:\n result_scanner = r\"RX-packets:\\s?([0-9]+)\"\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(line)\n packet_num = m.group(1)\n packet_sumnum = packet_sumnum + int(packet_num)\n queue_flag = 0\n elif line.strip().startswith(\"------- Forward\"):\n queue_flag = 1\n elif line.strip().startswith(\"RX-packets\"):\n result_scanner = r\"RX-packets:\\s?([0-9]+)\"\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(line)\n packet_rec = m.group(1)\n\n self.verify(packet_sumnum == int(packet_rec) == 128, \"There are some packets lost.\")", "def order_queue_content(self, origin_id):\n assert origin_id in self.origin_queue_lookup, 'origin does not exist'\n order_ids, head = [], self.origin_queue_lookup[origin_id]\n while head is not None:\n order_ids.append(head.order_id)\n head = head.next\n return [self.queued_orders[order_id] for order_id in order_ids]", "def read_queue(self):\n query = \"\"\"SELECT server,\n otp,\n modified,\n info,\n server_nonce\n FROM queue\"\"\"\n self._execute(query)\n return self._dictfetchall()", "def look(self):\n\t\tself.send(\"look\")\n\t\tmsg = self.recv()\n\n\t\tmsg_tokens = []\n\t\ttiles = []\n\n\t\tfor i in range(msg.size()):\n\t\t\tmsg_tokens.append(msg.get(i))\n\t\tfor tok in msg_tokens:\n\t\t\ttiles.append(tok.split(\"|\"))\n\n\t\treturn tiles", "def test_new_queue_from_list(self):\n data = [1, 3, 5, 7, 2, 4]\n queue = Queue_(data)\n self.assertFalse(queue.empty())\n self.assertEqual(queue.size(), len(data))\n self.assertEqual(queue.top(), data[0])", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def _messages_list(self, queue):\n\n return queue.messages()", "def get_user_message_queue(self,user_id,order=0):\n key = key_gen(user_id)\n result = self.redis.lrange(key,0,-1)\n if result != []:\n for index, item in enumerate(result):\n item = eval(item)\n result[index] = self.get_message_detail(item['message_id'])\n if order == 1:\n result.reverse()\n return result", "def scan_content(content, cur_lists):\n\n #even if there are multiple tags for one list,\n #they should all have a separate key that was created in make_empty_lists()\n all_tags = list(cur_lists.keys())\n \n verified = find_verified(content)\n\n #print\n #print verified\n #print len(verified)\n\n #could customize these:\n skip_tags = [ 'skip', 'meh', 'blah', 'bad' ]\n\n #TODO:\n #for some content without titles, tags are sometimes placed in title\n #could scan titles for tags to place in appropriate list\n \n for item in verified:\n #now scan all segments in item for valid playlist tags...\n #do not recurse here! one level is sufficient now\n for segment in item.segments:\n skip = False\n for skip_tag in skip_tags:\n if skip_tag in segment.tags:\n skip = True\n\n if not skip and segment.tags:\n matched = False\n\n #add items to the appropriate list based on tags\n for tag in all_tags:\n if tag in segment.tags:\n cur_lists[tag].append(segment)\n matched = True\n \n #special cases: \n if not matched:\n for tag in segment.tags:\n if re.search(\"\\+\", tag):\n cur_lists['good'].append(segment)\n matched = True\n \n #this should be handled by check above now\n ## #sometimes 'skip' may be in the tag... e.g. \"skip?\"\n ## elif re.search(\"skip\", tag):\n ## matched = True\n \n if not matched: \n print(\"Couldn't match: \", segment.tags)\n\n #could append to misc, if wanted\n cur_lists['misc'].append(segment)\n\n\n else:\n #TODO:\n #if we put it on a list (not skip),\n #that is a good indication that it's worth remembering\n #this is a good chance to see\n #if any people / group related notes exist for the song\n #TODO:\n #or, just open the list\n #and apply it from there\n #that way the process could work for any list\n #(maybe just needs adaptation)\n pass\n \n #print segment.status\n\n print()\n #print content.debug(recurse=False)", "def test_get_element(self):\n data = (1, 2, 3, 4)\n queue = Queue_(data)\n self.assertEqual(queue.get(), data[0])", "def queue_to_list(queue):\n result = []\n while queue.qsize() != 0:\n result.append(queue.get())\n return result", "def getContentList(self, content, index=-1):\n try:\n if index == -1: # this is a return for a single instance site\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n else: # this is the return for a multisite\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None", "def get_data(queue, item_count):\n return [loads(queue.get()) for _ in range(item_count)]", "def peek_order_queue_content(self, origin_id, min_quantity_sum, min_order_count):\n assert origin_id in self.origin_queue_lookup, 'origin does not exist'\n order_quantity_sum = 0\n peek_orders, head = [], self.origin_queue_lookup[origin_id]\n while head is not None and (order_quantity_sum < min_quantity_sum or len(peek_orders) < min_order_count):\n peek_orders.append(self.queued_orders[head.order_id])\n order_quantity_sum += peek_orders[-1].quantity\n head = head.next\n return peek_orders", "def parse_list_payload(payload):\n # the payload begins with a constant header\n if payload[0] != 'cbu':\n raise ValueError('Invalid list payload header: {}'.format(payload[0]))\n\n # the first submessage is always present, so let's treat it light a header\n first_submsg = payload[1][0][0]\n if len(first_submsg) == 5:\n (unknown_int, unknown_none, unknown_str, unknown_none_or_list,\n timestamp) = first_submsg\n unknown_list = None\n elif len(first_submsg) == 6:\n (unknown_int, unknown_none, unknown_str, unknown_none_or_list,\n timestamp, unknown_list) = first_submsg\n else:\n raise ValueError('Cannot parse first submessage: {}'\n .format(first_submsg))\n\n # The type of a submessage is determined by its position in the array\n submsgs = payload[1][0][1:]\n for submsg_type, submsg in enumerate(submsgs):\n if submsg is None:\n pass\n elif submsg_type == 1:\n # parse chat message\n conversation_id = submsg[0][0][0]\n sender_ids = submsg[0][1]\n timestamp = submsg[0][2]\n content = submsg[0][6]\n type_ = content[2][0][0][0]\n if type_ == 0: # text\n type_, text, formatting = content[2][0][0]\n links = None\n elif type_ == 2: # link\n type_, text, formatting, links = content[2][0][0]\n else:\n raise ValueError('Unknown message type {} for message: {}'\n .format(type_, submsg))\n yield {\n 'conversation_id': conversation_id,\n 'timestamp': timestamp,\n 'sender_ids': tuple(sender_ids),\n 'text': text,\n }\n\n elif submsg_type == 2:\n # TODO: parse unknown\n # conversation_id, sender_ids, timestamp, 1, 20\n pass\n elif submsg_type == 3:\n # TODO: parse unknown\n # conversation_id, sender_ids, timestand, 1 or 2\n pass\n elif submsg_type == 6:\n # TODO: parse unknown\n # sender_ids, conversation_id, timestamp\n pass\n elif submsg_type == 11:\n # TODO: parse conversation update\n pass\n elif submsg_type == 12:\n # TODO: parse unknown\n pass\n else:\n raise ValueError('Unknown submessage type {} for submessage {}'\n .format(submsg_type, submsg))", "def parse_payload(self):\n while len(self.buffer) >= 10:\n \"\"\" check magic word \"\"\"\n if self.buffer[0:2] != self.mw:\n #LogDebug(\"drop all buffer due to incorrect magic word\")\n self.buffer = b\"\" # drop entire buffer\n\n \"\"\" extract the value from length field \"\"\"\n length = struct.unpack(\"I\", self.buffer[2:6])[0] + 1\n #print \"packet len\", length, \"buffer len\", len(self.buffer)\n if len(self.buffer) < length:\n #LogDebug(\"imcompleted packet will be processed later\")\n break\n\n \"\"\" verify the packet CRC \"\"\"\n calculated_crc = struct.pack(\"I\", binascii.crc32(self.buffer[:length-4]) & 0xFFFFFFFF)\n if calculated_crc != self.buffer[length-4:length]:\n pass\n else:\n payload = self.buffer[6:length-4]\n self.payloads.append(payload)\n self.buffer = self.buffer[length:]", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def dequeue_content(queue):\n if queue.front is None:\n raise IndexError\n queue.front = queue.front.rest\n if queue.front is None:\n queue.back = None", "def _hear_message_from_server(self):\n while self.is_alive:\n data = self._socket.recv(1024)\n content = loads(data)\n self._current_list = content\n print(\"Servidor: {}\".format(content))", "def _messages(self):\n q = [json.loads(i)['message'] for i in self.client.kv.get(\n 'rhumba.q.testqueue', [])]\n return q", "def getContentList(self, webcontent):\n try:\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, webcontent)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None", "def inDownloadQueue(self, _src):\n for dl in self.downloadQueue:\n if _src in dl['src']:\n return True\n return False", "def ztest_get_item(self):\n \n queue = NMSQueue()\n \n result_set = queue.get_items_with_priority(1,1,0,1)\n \n for item in result_set:\n print(\"\\nItem = %s\\n\" % (item) )\n newitem = queue.get_item(item.uuid)\n print(\"\\nRetrieve the same from queue Item = %s\\n\" % (newitem) )", "def deserialize(self, data):\r\n #借助队列的结构来反序列化\r\n if not data:\r\n return None\r\n strlist = data.split(\"!\")\r\n return self._deser(strlist)", "def Peek(self):\n\t\tif not self.first:\n\t\t\traise ValueError(\"Empty Queue\")\n\t\treturn self.first.data", "def queue_to_stack(queue):\n stack = Stack()\n check_list = []\n\n while len(queue) != 0:\n check_list.append(queue.dequeue())\n\n check_list.reverse()\n\n while check_list != []:\n stack.push(check_list[0])\n check_list.remove(check_list[0])" ]
[ "0.555139", "0.5506232", "0.54767245", "0.5419503", "0.53994626", "0.5372635", "0.52298725", "0.5192108", "0.518906", "0.51844054", "0.51400787", "0.51395917", "0.51212037", "0.5103552", "0.5094152", "0.50883275", "0.5030852", "0.4973149", "0.4923798", "0.4916206", "0.49099907", "0.48940802", "0.4893319", "0.48509118", "0.48447794", "0.48392117", "0.48312467", "0.48301962", "0.4829749", "0.48213682" ]
0.7497363
0
Handles getting the content. Checks if the name is present in the buffer otherwise gets it from the queue_from_lower. If result is a metatitle, the following part is retrieved.
def get_content(self, next_name: str): buffer_output = self.check_buffer(next_name) if buffer_output: print("[get_next_content] Resulting content object out of the buffer:", buffer_output.name, buffer_output.content) resulting_content_object = buffer_output result = buffer_output.content else: resulting_content_object = self.get_content_from_queue_from_lower() if isinstance(resulting_content_object, Interest): print("[get_next_content] Resulting object is interest:", resulting_content_object.name, ", instead of content object with name:", next_name) else: print("[get_next_content] Resulting content object(desired name, resulting name):", next_name, resulting_content_object.name) # Gets stored in buffer if interest doesn't correspond to needed result is_content_correct = self.check_for_correct_content(resulting_content_object, next_name) while is_content_correct is False: #print("[get_next_content] Content wasn't correct", resulting_content_object.name) buffer_output = self.check_buffer(next_name) # If desired interest is in buffer return it and break out of while loop if buffer_output: resulting_content_object = buffer_output break else: # Get content out of queue_from_lower and check if it is correct -> until correct one is returned #print("[get_next_content] Content wasn't correct and not avaiable in the buffer.") resulting_content_object = self.get_content_from_queue_from_lower() #print("[get_next_content] Resulting content object:", resulting_content_object.name, next_name) is_content_correct = self.check_for_correct_content(resulting_content_object, next_name) result = resulting_content_object.content result = self.stream_part(result, resulting_content_object) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content_from_queue_from_lower(self):\n queue_from_lower_entry = self.queue_from_lower.get()\n if isinstance(queue_from_lower_entry, list):\n if isinstance(queue_from_lower_entry[1], Nack):\n print(\"NACK:\", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason)\n return queue_from_lower_entry[1]\n else:\n if isinstance(queue_from_lower_entry, Nack):\n print(\"NACK:\", queue_from_lower_entry.interest, queue_from_lower_entry.reason)\n return queue_from_lower_entry", "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if self.check_end_streaming(result) is False:\n next_name = self.get_following_name(current_name)\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n return result", "def get_content(name):\n raise NotImplementedError('derived class should overload me')", "def stream_part(self, result: str, resulting_content_object: Content):\n if self.check_for_metatitle(result):\n if str(resulting_content_object.name) not in self.get_next_buffer:\n self.get_next_buffer[str(resulting_content_object.name)] = resulting_content_object\n print(\"[Streaming] Part\", self.get_next_part_counter, \"starts here.\")\n next_name = str(resulting_content_object.name) + \"//streaming/p\" + str(self.get_next_part_counter)\n if self.classic is False:\n result = self.get_next_single_name(next_name)\n else:\n result = self.get_next_single_name_classic(next_name)\n print(\"[Streaming] Part\", self.get_next_part_counter, \"ends here with result:\", result)\n self.get_next_part_counter += 1\n if self.check_end_streaming(result):\n return None\n return result", "def __getitem__(self, name):\n if name in list(self.__getattribute__(\"contents\").keys()):\n return self.__getattribute__(\"contents\")[name]\n else:\n raise KeyError(\"No content named %s\" % name)", "def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result", "def chunk_get(request, key_name):\n chunk = lookup_or_404(Chunk, key_name)\n return HttpResponse(chunk.value, mimetype='text/plain')", "def process(self, query):\n response_text = self.fetch(query)\n content = self.get_content(response_text)\n return content", "def get(self, name, failobj=None):\n name = name.lower()\n for k, v in self._headers:\n if k.lower() == name:\n return v\n return failobj", "def fetch(self,key):\n try:\n return self.__content[key]\n except KeyError:\n return None", "def get(name):\n #retrieve the snippet from the db - commnet from session of nicole darcy\n #i added the 'cursor= ' line because it said it was unused code, copied it from def put()\n# commenting lines below to replace with new code as per class lesson\n # cursor=connection.cursor()\n # row = cursor.fetchone()\n # connection.commit()\n with connection, connection.cursor() as cursor:\n cursor.execute(\"select message from snippets where keyword=%s\", (name,))\n row = cursor.fetchone()\n if not row:\n #No snippet was found with that name.\n return \"404: Snippet not Found\"\n return row[0]\n \n # warning for 'unreachable code' so i commented it out...\n # logging.error(\"FIXME: Unimplemented - get({!r})\".format(name))\n # print(\"this function is running\",get.__name__)\n # return \"\"", "def __getitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n return tag.text\n raise KeyError(name)", "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and next_name) in the queue_to_lower. Next call only puts next_name\n if self.pos_name_list_multiple == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n next_name = self.name_list_multiple[self.pos_name_list_multiple]\n if self.check_end_streaming(next_name) is False:\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None\n else:\n return None", "async def _get_infobox_data(name):\n\n url = 'http://runescape.wikia.com/wiki/%s?action=raw'\n\n while True:\n async with aiohttp.get(url % parse.quote(name)) as r:\n\n if r.status != 200:\n return None\n\n text = await r.text()\n redirect = re.search('#REDIRECT ?\\[\\[(.+)\\]\\]', text, re.I)\n if redirect:\n name = redirect.group(1)\n continue\n\n return _parse_infobox_data(text)", "def bb_queue(hit):\n try:\n songid = hit.group(1)\n song = Song.objects.get(id=songid)\n artists = song.artists\n except:\n return \"[queue]%s[/queue]\" % songid\n \n t = loader.get_template('webview/queue_tag.html')\n c = Context({\n 'song' : song,\n 'artists' : artists,\n 'MEDIA_URL' : settings.MEDIA_URL,\n })\n \n result = t.render(c)\n return result", "def get_msg_by_content(self, content):\n msg_data = self.database.search(self.tname, MsgWithTag.get_msg_key(), content)\n if len(msg_data) != 0:\n return (msg_data[0][0], self.data_to_msg(msg_data[0]))\n return None", "def getchunk( self, name ):\n nm= self.fullNameFor( name )\n if nm in self.named:\n return self.named[nm]\n raise Error( \"Cannot resolve {!r} in {!r}\".format(name,self.named.keys()) )", "def pop_message(self, queue_name):\r\n messages = self.pop_messages(queue_name, count=1)\r\n if messages['item_count'] > 0:\r\n return messages['items'][0]\r\n else:\r\n return None", "def get(self):\n try:\n return self.url_queue.get(timeout=self.timeout)\n except Exception as e:\n print(e)\n return None", "def get(name):\n # cursor = connection.cursor()\n # command = \"select message from snippets where keyword='%s'\"%name\n # cursor.execute(command)\n # connection.commit()\n # message_tuple=cursor.fetchone()\n \n with connection, connection.cursor() as cursor:\n cursor.execute(\"select message from snippets where keyword=%s\", (name,))\n row = cursor.fetchone()\n \n if len(row)>0:\n logging.debug(\"Get Snippet successfully.\")\n return row[0]\n else:\n logging.debug(\"No Snippet got\")\n return \"\"", "def _get_content_by_type(self, elem, attr_name=None):\n if not attr_name:\n resp = elem.text\n else:\n resp = elem.get(attr_name, \"\")\n \n return resp.strip()", "def process_name(device_index, mp3_filename, record):\n\n text = \"May I please ask your name?\"\n name = process_extract_name_organization_details(device_index, mp3_filename, text, record)\n\n if name is None:\n text = process_name_organization(device_index, mp3_filename, record)\n else:\n text = \"All right, and what company are you with?\"\n text = process_organization(device_index, mp3_filename, record, text, name)\n\n return text", "def _get_data(self):\n while True:\n # self.logger.debug(\"data queue size is: {}\".format(len(self._dataqueue)))\n ans = self._parser.find_first_packet(self._dataqueue[:])\n if ans:\n self._dataqueue = ans[1]\n # self.logger.debug(\"found packet of size {}\".format(len(ans[0])))\n return ans[0]\n else:\n # self.logger.debug(\"Could not find packet in received data\")\n tmp = self.conn.recv(1024)\n self._dataqueue += tmp", "def get_content(short_path):", "def head(self):\n if self.isquiet():\n raise QueueEmpty()\n\n qcurr = self.base + \".\" + str(self.curr)\n assert os.path.exists(qcurr)\n qt = open(qcurr, \"r\")\n data = qt.read()\n qt.close()\n return data", "def get_result_by_name(self, name):\n found = None\n for result in self.results:\n if result.heading == name:\n found = result\n break\n return found", "async def read_one(self) -> Text:\n message = \"\"\n headers = HTTPHeaders()\n\n line = await convert_yielded(self._readline())\n\n if line:\n while line and line.strip():\n headers.parse_line(line)\n line = await convert_yielded(self._readline())\n\n content_length = int(headers.get(\"content-length\", \"0\"))\n\n if content_length:\n raw = await self._read_content(length=content_length)\n if raw is not None:\n message = raw.decode(\"utf-8\").strip()\n else: # pragma: no cover\n self.log.warning(\n \"%s failed to read message of length %s\",\n self,\n content_length,\n )\n\n return message", "def chunk_reader(chunk_filenames, chunk_filename_queue):\n chunks = []\n done = chunk_filenames\n\n while True:\n if not chunks:\n chunks, done = done, chunks\n random.shuffle(chunks)\n if not chunks:\n print(\"chunk_reader didn't find any chunks.\")\n return None\n while len(chunks):\n filename = chunks.pop()\n done.append(filename)\n chunk_filename_queue.put(filename)\n print(\"chunk_reader exiting.\")\n return None", "def get_next_multiple_names_classic(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None", "def _stream_from_name(self):\n # Need to check if searching locally is forbidden\n if not self.dont_cache_search:\n match = search_locally(self.name)\n if match:\n self.title = match[0]\n self.stream_url = match[1]\n else:\n if not self._get_youtube_data_name():\n return\n\n local_path = search_URL(self.URL)\n\n # Try to check if the URL is mapped locally.\n if local_path is not None:\n logger.debug(\"Replacing the stream URL with the local.\")\n self.stream_url = local_path\n else:\n self._dw()\n # Update the URL cache\n update_URL_cache(self.title, self.URL)\n else:\n self._get_youtube_data_name()\n direct_to_play(self.stream_url, self.show_lyrics, self.title)" ]
[ "0.6432524", "0.59715605", "0.57843566", "0.575296", "0.57495564", "0.55204153", "0.5494455", "0.5369599", "0.53649986", "0.52783656", "0.5223427", "0.5204933", "0.5191491", "0.5146155", "0.51180285", "0.5115465", "0.51074815", "0.50316614", "0.5002304", "0.50022376", "0.4946996", "0.4940058", "0.49155593", "0.48932692", "0.48887753", "0.48529634", "0.48492157", "0.4837975", "0.48369685", "0.48060134" ]
0.69451576
0
get next for the single name case. Before returning the result the next name get already put into the queue_to_lower. The first name is the only one which is put into the queue immediately before requesting.
def get_next_single_name(self, arg: str): current_name = arg if self.get_next_part_counter == 0: self.sent_interests[str(current_name)] = True self.queue_to_lower.put((self.packetid, Interest(current_name))) result = self.get_content(current_name) if self.check_end_streaming(result) is False: next_name = self.get_following_name(current_name) self.sent_interests[str(next_name)] = True self.queue_to_lower.put((self.packetid, Interest(next_name))) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and next_name) in the queue_to_lower. Next call only puts next_name\n if self.pos_name_list_multiple == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n next_name = self.name_list_multiple[self.pos_name_list_multiple]\n if self.check_end_streaming(next_name) is False:\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None\n else:\n return None", "def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result", "def next(self):\n try:\n e = self.pool[self.i]\n self.i = self.i + 1\n except IndexError:\n if not hasattr(self, 'j'):\n self.j = 0\n self.pool.extend([x.lower() for x in self.pool])\n try:\n e = self.pool[self.i % len(self.pool)] + self.pool[self.j]\n self.j = self.j + 1\n except IndexError:\n self.i += 1\n self.j = 0\n return self.next()\n return '_{}'.format(e)", "def next_name():\r\n\tif not q.empty():\r\n\t\tname = q.get()\r\n\t\tWAITING_ROOM.remove(name)\r\n\t\tprint(name)\r\n\telse:\r\n\t\tprint(\"Queue is Empty\")", "def _next_until_name_match(generator, ref_name):\n curr_name = ''\n while not curr_name == ref_name:\n curr_aln = generator.next()\n curr_name = curr_aln.read.name.split()[0]\n return curr_aln", "def get_next(self, arg: str):\n if self.check_for_singlename(arg):\n if self.classic is False:\n return self.get_next_single_name(arg)\n else:\n return self.get_next_single_name_classic(arg)\n if self.check_streaming(arg):\n if self.classic is False:\n return self.get_next_multiple_names(arg)\n else:\n return self.get_next_multiple_names_classic(arg)\n else:\n return self.get_next_inner_computation(arg)", "def get_next_multiple_names_classic(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None", "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def get_seq_by_name(self, name: str) -> Optional['Sequencer']:\n for seq in self.Sequencers:\n if seq.Name.lower() == name.lower():\n return seq\n return None", "def find_action_by_name(self, name): # because I dont want possible actions to be a dictionary- it fucks stuff up\n return next(x for x in self.possible_actions if name.lower() == x.name.lower())", "def get_next_smaller(self, lookup_string: str) -> Optional[SupportsStr]:\n ...", "def initialize_get_next_single(self, arg: str):\n if self.check_streaming(arg) is False:\n return \"Not for streaming.\"\n self.name_list_single = arg.splitlines()\n self.name_list_single.pop(0)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def get(self):\n\n self.index += 1\n\n # If we've gone through the entire deque once, shuffle it again to\n # simulate ever-flowing random. self.shuffle() will run __init__(),\n # which will reset the index to 0.\n if self.index == len(self):\n self.shuffle()\n\n self.rotate(1)\n try:\n return self[0]\n except:\n return u\"hello ~ 舰长\"", "def get_next_word(self, user_word, computer_word):\n pass", "def byname(self, name):\n\n name = name.lower()\n for i in self.bots:\n if name == i.name:\n return i", "def next(self):\n try:\n return self.queue.get()\n except Empty:\n raise StopIteration", "def get_account_by_name(self, name):\n return next((account for account in self.accounts\n if account.name.lower() == name.lower()), None)", "def caseDisambiguator(self) -> FsmParse:\n parseString = self.parsesWithoutPrefixAndSuffix()\n if len(self.__fsmParses) == 1:\n return self.__fsmParses[0]\n if len(self.__fsmParses) == 0:\n return None\n defaultCase = self.__defaultCaseForParseString(self.__fsmParses[0].getWord().getName(), parseString,\n self.__fsmParses[0].getFinalPos())\n if defaultCase is not None:\n for i in range(len(self.__fsmParses)):\n fsmParse = self.__fsmParses[i]\n if defaultCase in fsmParse.transitionList():\n return fsmParse\n return None", "def _next(self):\n if self._queue_lock.acquire(False):\n raise AssertionError('Expected _queue_lock to be held here')\n\n queue_ids = list(self._queues.keys())\n if not queue_ids:\n raise StopIteration()\n\n # Find the queue whose head item has the lowest priority value\n best_queue_id = None\n best_priority_value = None\n for candidate_queue_id in queue_ids:\n selected_queue = self._queues[candidate_queue_id]\n if not len(selected_queue):\n raise AssertionError('Invariant violation: queue %r is empty' % (candidate_queue_id,))\n head_of_queue = selected_queue[0]\n priority_value = head_of_queue.priority_fn() if head_of_queue.priority_fn else 0\n if best_queue_id is None or priority_value < best_priority_value:\n best_queue_id = candidate_queue_id\n best_priority_value = priority_value\n queue_id = best_queue_id\n assert queue_id is not None\n\n next_task = self._queues[queue_id].popleft()\n if len(self._queues[queue_id]) == 0:\n del self._queues[queue_id]\n return next_task", "def __getitem__(self, name):\n return self._items[name.lower()][1]", "def get_contract_by_name(self, name):\n return next((contract for contract in self.contracts\n if contract.name.lower() == name.lower()), None)", "def _lookup_name(self, name):\n name = name.lower()\n if name not in self.lookup:\n return { name }\n names = functools.reduce(operator.or_, self.lookup[name])\n names.add(name)\n return names", "def first_lower(self, s):\n if len(s) == 0:\n return s\n else:\n return s[0].lower() + s[1:]", "def __get_next_visit_node(self) -> str:\r\n node = None\r\n\r\n for visit in self.__priority_queue:\r\n # We can't visit the start node\r\n if visit.node_name == self.__fr:\r\n continue\r\n\r\n # Finds the lowest value node\r\n if node is None or visit.cost < node.cost:\r\n node = visit\r\n\r\n return node.node_name", "def walk(self, name=None):\n if not name is None:\n name = name.upper()\n return self._walk(name)", "async def request(self) -> str:\n self._name = [None] * 2\n await self.addr_conn.conn.segment_scan_completed_event.wait()\n self.name_known.clear()\n for trh in self.trhs:\n trh.activate()\n await self.name_known.wait()\n return self.name", "def __findNextWord(self):\n self.activeWindow().searchCurrentWordForward()", "def get_content_from_queue_from_lower(self):\n queue_from_lower_entry = self.queue_from_lower.get()\n if isinstance(queue_from_lower_entry, list):\n if isinstance(queue_from_lower_entry[1], Nack):\n print(\"NACK:\", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason)\n return queue_from_lower_entry[1]\n else:\n if isinstance(queue_from_lower_entry, Nack):\n print(\"NACK:\", queue_from_lower_entry.interest, queue_from_lower_entry.reason)\n return queue_from_lower_entry", "def get_by_name(self, name, isCaseSensitive=None):\n # log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_by_name()\")\n\n if isCaseSensitive is None or isCaseSensitive is True:\n for obj in self.get_list():\n if obj.options['name'] == name:\n return obj\n else:\n for obj in self.get_list():\n if obj.options['name'].lower() == name.lower():\n return obj\n return None" ]
[ "0.6534286", "0.6476175", "0.60643685", "0.59021914", "0.5780743", "0.5695691", "0.5597527", "0.5528981", "0.5254184", "0.5216752", "0.5201601", "0.5185522", "0.5162252", "0.51276624", "0.51116544", "0.5095773", "0.5073627", "0.50501657", "0.50407803", "0.5026237", "0.5020459", "0.50053936", "0.4991328", "0.49700364", "0.49189347", "0.4907738", "0.48816863", "0.48756158", "0.48620445", "0.4861499" ]
0.66704434
0
get next for the multiple name case. Before returning the result the next name get already put into the queue_to_lower. The first name is the only one which is put into the queue immediately before requesting.
def get_next_multiple_names(self, arg: str): self.initialize_get_next_multiple(arg) if self.pos_name_list_multiple < len(self.name_list_multiple)-1: current_name = self.name_list_multiple[self.pos_name_list_multiple] # Only first call puts two names (current_name and next_name) in the queue_to_lower. Next call only puts next_name if self.pos_name_list_multiple == 0: self.sent_interests[str(current_name)] = True self.queue_to_lower.put((self.packetid, Interest(current_name))) self.pos_name_list_multiple += 1 next_name = self.name_list_multiple[self.pos_name_list_multiple] if self.check_end_streaming(next_name) is False: self.sent_interests[str(next_name)] = True self.queue_to_lower.put((self.packetid, Interest(next_name))) result = self.get_content(current_name) return result elif self.pos_name_list_multiple == len(self.name_list_multiple)-1: self.name_list_multiple = None else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if self.check_end_streaming(result) is False:\n next_name = self.get_following_name(current_name)\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n return result", "def get_next_multiple_names_classic(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None", "def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result", "def next(self):\n try:\n e = self.pool[self.i]\n self.i = self.i + 1\n except IndexError:\n if not hasattr(self, 'j'):\n self.j = 0\n self.pool.extend([x.lower() for x in self.pool])\n try:\n e = self.pool[self.i % len(self.pool)] + self.pool[self.j]\n self.j = self.j + 1\n except IndexError:\n self.i += 1\n self.j = 0\n return self.next()\n return '_{}'.format(e)", "def _next_until_name_match(generator, ref_name):\n curr_name = ''\n while not curr_name == ref_name:\n curr_aln = generator.next()\n curr_name = curr_aln.read.name.split()[0]\n return curr_aln", "def get_next(self, arg: str):\n if self.check_for_singlename(arg):\n if self.classic is False:\n return self.get_next_single_name(arg)\n else:\n return self.get_next_single_name_classic(arg)\n if self.check_streaming(arg):\n if self.classic is False:\n return self.get_next_multiple_names(arg)\n else:\n return self.get_next_multiple_names_classic(arg)\n else:\n return self.get_next_inner_computation(arg)", "def next_name():\r\n\tif not q.empty():\r\n\t\tname = q.get()\r\n\t\tWAITING_ROOM.remove(name)\r\n\t\tprint(name)\r\n\telse:\r\n\t\tprint(\"Queue is Empty\")", "def _lookup_name(self, name):\n name = name.lower()\n if name not in self.lookup:\n return { name }\n names = functools.reduce(operator.or_, self.lookup[name])\n names.add(name)\n return names", "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def get_next_word(self, user_word, computer_word):\n pass", "def initialize_get_next_single(self, arg: str):\n if self.check_streaming(arg) is False:\n return \"Not for streaming.\"\n self.name_list_single = arg.splitlines()\n self.name_list_single.pop(0)", "def get_next_smaller(self, lookup_string: str) -> Optional[SupportsStr]:\n ...", "def complete_name(self, current: str) -> Iterator[Tuple[str, str]]:\n # check if current is trying to do so\n for name in self.names:\n if len(name) != 1:\n continue\n # current is not like -vvv\n if self._prefix_name(name) + len(current[2:]) * name != current:\n continue\n # check the max\n value: int = len(current) - 1\n if self._kwargs[\"max\"] and value >= self._kwargs[\"max\"]:\n # already max'ed, no further completion\n continue\n\n ncompletes: int = (\n min(self._kwargs[\"max\"] - value, 2)\n if self._kwargs[\"max\"]\n else 2\n )\n for i in range(ncompletes):\n yield current + name * (i + 1), self.desc[0].splitlines()[0]\n break\n\n else:\n yield from super().complete_name(current)", "def get_seq_by_name(self, name: str) -> Optional['Sequencer']:\n for seq in self.Sequencers:\n if seq.Name.lower() == name.lower():\n return seq\n return None", "def acronym_gen(name):\n return tuple(w[0] for w in name.split() if capitalized(w))", "def name_generator(suggested, forbidden_names):\n new_name = suggested.strip()\n while new_name.lower() in [x.lower() for x in forbidden_names]:\n new_name += str(random.choice(string.ascii_lowercase))\n return new_name.strip()", "def starts_with(self, matchstr, **kwargs):\r\n \r\n valid_kwargs = ['num_results', 'case_sensitive']\r\n validator.validate(kwargs.keys(), valid_kwargs)\r\n\r\n final_list = []\r\n case_sensitive = False\r\n num_results = 0\r\n \r\n if 'num_results' in kwargs:\r\n num_results = int(kwargs['num_results'])\r\n \r\n if len(matchstr) == 0:\r\n if num_results:\r\n return self.__sorted_names[0:num_results]\r\n return self.__sorted_names[:]\r\n\r\n if 'case_sensitive' in kwargs:\r\n if kwargs['case_sensitive']:\r\n case_sensitive = True\r\n\r\n tag_names_that_start_with_char = []\r\n \r\n if case_sensitive:\r\n if matchstr[0] not in self.__name_index:\r\n return []\r\n else:\r\n if matchstr[0].lower() not in self.__name_index and matchstr[0].upper() not in self.__name_index:\r\n return []\r\n \r\n if case_sensitive:\r\n idxs = self.__name_index[matchstr[0]]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n else:\r\n if matchstr[0].lower() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].lower()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n\r\n if matchstr[0].upper() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].upper()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char += [self.__sorted_names[idxs['first']]]\r\n else:\r\n tag_names_that_start_with_char += self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n if len(matchstr) == 1:\r\n if num_results == 0:\r\n return tag_names_that_start_with_char[:]\r\n else:\r\n return tag_names_that_start_with_char[0:num_results]\r\n \r\n if case_sensitive:\r\n for t in tag_names_that_start_with_char:\r\n if (t.find(matchstr) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n else:\r\n for t in tag_names_that_start_with_char:\r\n if (t.lower().find(matchstr.lower()) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n\r\n return final_list", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def initialize_get_next_multiple(self, arg: str):\n if self.name_list_multiple is None:\n self.name_list_multiple = arg.splitlines()\n self.name_list_multiple.pop(0)\n self.pos_name_list_multiple = 0", "def decamelize(name):\n pat = re.compile(r'([A-Z]*[^A-Z]*)(.*)')\n bits = []\n while True:\n head, tail = re.match(pat, name).groups()\n bits.append(head)\n if tail:\n name = tail\n else:\n break\n return '_'.join([bit.lower() for bit in bits])", "def caseDisambiguator(self) -> FsmParse:\n parseString = self.parsesWithoutPrefixAndSuffix()\n if len(self.__fsmParses) == 1:\n return self.__fsmParses[0]\n if len(self.__fsmParses) == 0:\n return None\n defaultCase = self.__defaultCaseForParseString(self.__fsmParses[0].getWord().getName(), parseString,\n self.__fsmParses[0].getFinalPos())\n if defaultCase is not None:\n for i in range(len(self.__fsmParses)):\n fsmParse = self.__fsmParses[i]\n if defaultCase in fsmParse.transitionList():\n return fsmParse\n return None", "def byname(self, name):\n\n name = name.lower()\n for i in self.bots:\n if name == i.name:\n return i", "def find_action_by_name(self, name): # because I dont want possible actions to be a dictionary- it fucks stuff up\n return next(x for x in self.possible_actions if name.lower() == x.name.lower())", "def next():", "def next():", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def walk(self, name=None):\n if not name is None:\n name = name.upper()\n return self._walk(name)", "def get_word(wordlist, args): #{{{\n iters = 0\n while iters < 500:\n if args.lowercase == True:\n word = random.choice(wordlist).strip().lower()\n return word\n elif args.lowercase == False:\n word = random.choice(wordlist).strip().lower().capitalize()\n return word\n\n if args.punctuation == False:\n if len(word) < args.max_length and word.isalpha() == True:\n return word\n iters += 1\n elif args.punctuation == True:\n if len(word) < args.max_length:\n return word\n iters += 1 #}}}", "def whentransit(sourcename) :\n return s.whenTransit(sourcename)", "def process_strings(self):\n for string in self.input:\n matcher = self.choose_algorithm()\n matcher.find_match(string, self.case_insensitive)\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n if self.__results:\n self.output(string)" ]
[ "0.62946945", "0.6215512", "0.6121404", "0.6006907", "0.56078154", "0.553223", "0.5502037", "0.53517884", "0.53446215", "0.5037705", "0.502239", "0.50016195", "0.4936732", "0.49000195", "0.48903698", "0.48809487", "0.4872933", "0.48728496", "0.48441896", "0.4841367", "0.48091424", "0.48079857", "0.47513497", "0.47428873", "0.47428873", "0.473384", "0.47129333", "0.47079477", "0.4707846", "0.47033814" ]
0.7067897
0
get_next for the classic multiple name case. The name only gets put in the queue_to_lower before requesting it.
def get_next_multiple_names_classic(self, arg: str): self.initialize_get_next_multiple(arg) if self.pos_name_list_multiple < len(self.name_list_multiple)-1: current_name = self.name_list_multiple[self.pos_name_list_multiple] self.sent_interests[str(current_name)] = True self.queue_to_lower.put((self.packetid, Interest(current_name))) self.pos_name_list_multiple += 1 result = self.get_content(current_name) return result elif self.pos_name_list_multiple == len(self.name_list_multiple)-1: self.name_list_multiple = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and next_name) in the queue_to_lower. Next call only puts next_name\n if self.pos_name_list_multiple == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n next_name = self.name_list_multiple[self.pos_name_list_multiple]\n if self.check_end_streaming(next_name) is False:\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None\n else:\n return None", "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if self.check_end_streaming(result) is False:\n next_name = self.get_following_name(current_name)\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n return result", "def next(self):\n try:\n e = self.pool[self.i]\n self.i = self.i + 1\n except IndexError:\n if not hasattr(self, 'j'):\n self.j = 0\n self.pool.extend([x.lower() for x in self.pool])\n try:\n e = self.pool[self.i % len(self.pool)] + self.pool[self.j]\n self.j = self.j + 1\n except IndexError:\n self.i += 1\n self.j = 0\n return self.next()\n return '_{}'.format(e)", "def _next_until_name_match(generator, ref_name):\n curr_name = ''\n while not curr_name == ref_name:\n curr_aln = generator.next()\n curr_name = curr_aln.read.name.split()[0]\n return curr_aln", "def get_next(self, arg: str):\n if self.check_for_singlename(arg):\n if self.classic is False:\n return self.get_next_single_name(arg)\n else:\n return self.get_next_single_name_classic(arg)\n if self.check_streaming(arg):\n if self.classic is False:\n return self.get_next_multiple_names(arg)\n else:\n return self.get_next_multiple_names_classic(arg)\n else:\n return self.get_next_inner_computation(arg)", "def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result", "def next_name():\r\n\tif not q.empty():\r\n\t\tname = q.get()\r\n\t\tWAITING_ROOM.remove(name)\r\n\t\tprint(name)\r\n\telse:\r\n\t\tprint(\"Queue is Empty\")", "def next():", "def next():", "def initialize_get_next_multiple(self, arg: str):\n if self.name_list_multiple is None:\n self.name_list_multiple = arg.splitlines()\n self.name_list_multiple.pop(0)\n self.pos_name_list_multiple = 0", "def complete_name(self, current: str) -> Iterator[Tuple[str, str]]:\n # check if current is trying to do so\n for name in self.names:\n if len(name) != 1:\n continue\n # current is not like -vvv\n if self._prefix_name(name) + len(current[2:]) * name != current:\n continue\n # check the max\n value: int = len(current) - 1\n if self._kwargs[\"max\"] and value >= self._kwargs[\"max\"]:\n # already max'ed, no further completion\n continue\n\n ncompletes: int = (\n min(self._kwargs[\"max\"] - value, 2)\n if self._kwargs[\"max\"]\n else 2\n )\n for i in range(ncompletes):\n yield current + name * (i + 1), self.desc[0].splitlines()[0]\n break\n\n else:\n yield from super().complete_name(current)", "def initialize_get_next_single(self, arg: str):\n if self.check_streaming(arg) is False:\n return \"Not for streaming.\"\n self.name_list_single = arg.splitlines()\n self.name_list_single.pop(0)", "def get_next_word(self, user_word, computer_word):\n pass", "def __next__(self):\n\t\treturn next()", "def _get_next_name(self, flow):\n parent_ref = '%s ASSEMBLY' % flow.external_ref\n try:\n self.fg.get(parent_ref)\n except EntityNotFound:\n return parent_ref\n _ac = 0\n while True:\n parent_ref = '%s ASSEMBLY alt %d' % (flow.external_ref, _ac)\n try:\n self.fg.get(parent_ref)\n _ac += 1\n except EntityNotFound:\n return parent_ref", "def next(self) -> str:\n raise NotImplementedError", "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def _lookup_name(self, name):\n name = name.lower()\n if name not in self.lookup:\n return { name }\n names = functools.reduce(operator.or_, self.lookup[name])\n names.add(name)\n return names", "def acronym_gen(name):\n return tuple(w[0] for w in name.split() if capitalized(w))", "def __call__(self):\r\n return self.next()", "def next(self):\r\n pass", "def get_next_smaller(self, lookup_string: str) -> Optional[SupportsStr]:\n ...", "def name_generator(suggested, forbidden_names):\n new_name = suggested.strip()\n while new_name.lower() in [x.lower() for x in forbidden_names]:\n new_name += str(random.choice(string.ascii_lowercase))\n return new_name.strip()", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass" ]
[ "0.73124295", "0.6605842", "0.64387214", "0.63707966", "0.6359002", "0.633987", "0.59783775", "0.5580597", "0.5580597", "0.5559593", "0.5515206", "0.5506333", "0.54304075", "0.53769016", "0.5359713", "0.5350913", "0.53147024", "0.5285153", "0.5285153", "0.5285153", "0.521028", "0.51202285", "0.5105742", "0.5060467", "0.5050828", "0.5047106", "0.50388724", "0.50388724", "0.50388724", "0.50388724" ]
0.6632271
1
Transform the inner name to correct syntax so it can be parsed. Replaces first and last '=' with an '"' and the '' with an '_'.
def transform_inner(self, arg: str): first = arg.find("=") last = len(arg) - arg[::-1].find("=") - 1 hash = arg.find("#") arg = list(arg) arg[first] = '"' arg[last] = '"' arg[hash] = "_" arg = "".join(arg) return arg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_name():\n def _extract_name(quoted_name):\n return e.String(quoted_name.subexpression.name)\n yield (\"(λ &[name] . str)\", _extract_name)", "def make_python_name(self, name):\n # FIXME see cindex.SpellingCache\n for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),\n (\"$\", \"DOLLAR\"), (\".\", \"DOT\"), (\"@\", \"_\"), (\":\", \"_\"),\n ('-', '_')]:\n if k in name: # template\n name = name.replace(k, v)\n # FIXME: test case ? I want this func to be neutral on C valid\n # names.\n if name.startswith(\"__\"):\n return \"_X\" + name\n if len(name) == 0:\n pass\n elif name[0] in \"01234567879\":\n return \"_\" + name\n return name", "def test_expanded_name( self ):\n\t\tself.doBasicTest(\n\t\t\t'''s := something +\n\t\t\t>something< := r\n\t\t\tr := [ab]\n\t\t\tv := [c]\n\t\t\t''',\n\t\t\t's',\n\t\t\t'abammmab',\n\t\t\t(1,[\n\t\t\t\t('r',0,1, NullResult),\n\t\t\t\t('r',1,2, NullResult),\n\t\t\t\t('r',2,3, NullResult),\n\t\t\t],3)\n\t\t)", "def clean_name(x: str) -> str:\n x = x.replace(\", var.\", \" var.\")\n if \"{\" in x:\n x = x[:x.find(\"{\")-1]\n return x", "def fix_varname(s):\n t = str(s).translate(TRANS_VARS)\n if t[0] not in VALID_CHARS1:\n t = '_%s' % t\n while t.endswith('_'):\n t = t[:-1]\n return t", "def adjust_name_for_printing(name):\n if name is not None:\n name2 = name\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_m_\")\n name = name.replace(\"+\", \"_p_\").replace(\"!\", \"_I_\")\n name = name.replace(\"**\", \"_xx_\").replace(\"*\", \"_x_\")\n name = name.replace(\"/\", \"_l_\").replace(\"@\", '_at_')\n name = name.replace(\"(\", \"_of_\").replace(\")\", \"\")\n if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:\n raise NameError(\"name {} converted to {} cannot be further converted to valid python variable name!\".format(name2, name))\n return name\n return ''", "def encode_name_components(self, name: Name):\n first_quot = False\n new_component = \"\"\n for component in name.components:\n if '\"' in str(component):\n if first_quot is True:\n new_component += str(component)\n first_quot = False\n else:\n first_quot = True\n if first_quot:\n new_component += str(component)\n new_component = new_component.replace(\"'b'\", \"/\").replace(\"b'\", \"\")[:-1]\n if \"=\" not in new_component and '\"' in new_component:\n new_component = new_component.replace('\"', \"\")\n start_of_component = 0\n for i in range(0, len(name.components)):\n if \"_(\" in str(name.components[i]):\n start_of_component = i\n comp_list_len = len(name.components)\n for i in range(start_of_component, comp_list_len - 2):\n name.components.pop(len(name.components) - 2)\n name.components[-2] = new_component.encode(\"ascii\")\n return name", "def _expanded_id(name: str, sep: str = '_') -> str:\n return sep.join([el.lower()\n for el in re.split(r'([A-Z]+[^A-Z]*)', name)\n if el])", "def normalize_pipeline_name(name=''):\n normalized_name = name\n for bad in '\\\\/?%#':\n normalized_name = normalized_name.replace(bad, '_')\n return normalized_name", "def _process_name(name):\n\n # Unescape HTML entities\n name = unescape(name)\n\n # Remove bracketed stuff on the end\n name = NG_RE.sub('', name).strip() # Nomenclature groups\n name = END_RE.sub('', name).strip(', ') # Words\n name = RATIO_RE.sub('', name).strip(', ') # Ratios\n\n # Remove stuff off start\n name = START_RE.sub('', name).strip()\n\n # Remove balanced start and end brackets if none in between\n name = BRACKET_RE.sub('\\g<1>', name)\n\n # Un-invert CAS style names\n comps = name.split(', ')\n if len(comps) == 2:\n if comps[1].endswith('-'):\n name = comps[0]\n name = '%s%s' % (comps[1], name)\n elif len(comps) > 2:\n name = comps[0]\n for i in range(1, len(comps)):\n if comps[i].endswith('-'):\n name = '%s%s' % (comps[i], name)\n else:\n name = '%s %s' % (name, comps[i])\n return name", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def _convert_name(self, name):\n if not self.re_name.match(name):\n org_name = name\n name = self.re_white.sub('_', name)\n name = self.re_alpha.sub('_', name)\n if not self.re_name.match(name):\n name = 'x_' + name2\n self.warn('Converting name <' + org_name + '> to <' + name + '>.')\n return name", "def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"", "def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value", "def regular_edge_name(name: str) -> str:\n regular = \"\"\n for char in name:\n if char.isalpha() or char.isdigit():\n regular = f\"{regular}{char}\"\n else:\n regular = f\"{regular}_\"\n if not regular[0].isalpha():\n regular = f\"auto_legalized__{regular}\"\n return regular", "def _normalize_element_name(some_str, element_aliases_dict=None):\n some_str = _lowercase_despace_depunctuate(some_str)\n if element_aliases_dict:\n for key in element_aliases_dict.keys():\n if key == some_str:\n some_str = element_aliases_dict[key]\n return some_str", "def underToMixed(name):\n if name.endswith('_id'):\n return underToMixed(name[:-3] + \"ID\")\n return _underToMixedRE.sub(lambda m: m.group(0)[1].upper(),\n name)", "def _to_jsonc_name(member_name):\n\n characters = []\n uppercase_next = False\n for character in member_name:\n if character == '_':\n uppercase_next = True\n elif uppercase_next:\n characters.append(character.upper())\n uppercase_next = False\n else:\n characters.append(character)\n return ''.join(characters)", "def Escape(name):\n return re.sub(r'[^\\w#-]', '_', name)", "def mangle(raw_name: str) -> str:\n\n # Handle names with '.'.\n if '.' in raw_name:\n res = []\n for name in raw_name.split('.'):\n if invalid_identifier.search(name):\n res.append(mangle(name))\n else:\n res.append(name)\n return '.'.join(res)\n\n name = raw_name.lstrip('_')\n underscores = '_' * (len(raw_name) - len(name))\n return underscores + 'hyx_' + _mangle_re.sub(_match, name)", "def standard_name_remapper(orig_name):\n # Remove any trailing parentheses.\n # TODO(tjann): to check if this is safe.\n paren_start = orig_name.find(\"(\")\n if paren_start != -1:\n orig_name = orig_name[:paren_start]\n\n # Removes separating words.\n orig_name = orig_name.replace(\",\", \" \")\n orig_name = orig_name.replace(\"-\", \" \")\n orig_name = orig_name.replace(\"and \", \"\")\n return \"\".join([word.capitalize() for word in orig_name.split()])", "def TransformNames(self) -> _n_2_t_0[str]:", "def _sanitize_to_identifer(name):\n n = name.strip()\n n = re.sub('/', ' ', n)\n n = re.sub('-', ' ', n)\n n = re.sub(' +', '_', n)\n n = re.sub('[\\W]+', '', n)\n return n", "def _clean_name(cls, string):\n rep = {\n \"VPort\": \"Vport\",\n \"IPID\": \"IpID\"\n }\n\n rep = dict((re.escape(k), v) for k, v in rep.iteritems())\n pattern = re.compile(\"|\".join(rep.keys()))\n return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)", "def MAKE_NAME(name):\n name = name.replace('$', 'DOLLAR')\n name = name.replace('.', 'DOT')\n if name.startswith('__'):\n return '_X' + name\n elif name[0] in '01234567879':\n return '_' + name\n return name", "def mangle_name(name):\n import re\n try:\n return re.sub('_+','_',re.sub('[^\\w_]','_',name).lower()).rstrip('_')\n except TypeError:\n raise TypeError(\n 'Trying to mangle name with invalid type of: ' + str(type(name)))", "def _unescape_identifier(self, value):\n\n return value.replace('\"\"', '\"')", "def cvarname(name):\n\treturn re.sub(r'[^\\w\\s]', '_', name)", "def alias(name):\n name = name.strip().replace(\" \", \"_\").replace(\"-\", \"_\")\n while \"__\" in name:\n name = name.replace(\"__\", \"_\")\n return \"\".join(x for x in name if x.isalnum() or x == \"_\").lower()", "def cast_name(key):\n special_symbols = set('{}{}'.format(punctuation, ' '))\n special_symbols.remove('_')\n new_key = ['_' if x in special_symbols else x for x in key]\n casted_key = ''.join(new_key)\n return casted_key" ]
[ "0.6225861", "0.6090764", "0.60897714", "0.59808266", "0.59418184", "0.5897039", "0.5874254", "0.5845735", "0.582517", "0.5824621", "0.58186024", "0.5805057", "0.57880336", "0.57758003", "0.57509226", "0.57499963", "0.57029814", "0.57001", "0.5684109", "0.56642014", "0.56628066", "0.56025743", "0.5588423", "0.557919", "0.5574757", "0.5562325", "0.55281067", "0.5518548", "0.55071247", "0.5498977" ]
0.7548356
0
Encodes the name components so it can be handled from the lower layers.
def encode_name_components(self, name: Name): first_quot = False new_component = "" for component in name.components: if '"' in str(component): if first_quot is True: new_component += str(component) first_quot = False else: first_quot = True if first_quot: new_component += str(component) new_component = new_component.replace("'b'", "/").replace("b'", "")[:-1] if "=" not in new_component and '"' in new_component: new_component = new_component.replace('"', "") start_of_component = 0 for i in range(0, len(name.components)): if "_(" in str(name.components[i]): start_of_component = i comp_list_len = len(name.components) for i in range(start_of_component, comp_list_len - 2): name.components.pop(len(name.components) - 2) name.components[-2] = new_component.encode("ascii") return name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_name(param):\n sname = param\n # replace all kind of unwanted chars in a python dictname.\n sname = sname.strip()\n for ch in ['/', ' + ', ' ', '#', '&', '-', ',', '+', ]:\n if ch in sname:\n sname = sname.replace(ch, \"_\")\n\n # replace brackets\n for ch in ['(', ')']:\n if ch in sname:\n sname = sname.replace(ch, \"\")\n\n # replace the numbers 2 and 10 with the text representation\n if '10' in sname:\n sname = sname.replace('10', 'TEN')\n\n if '2' in sname:\n sname = sname.replace('2', 'TWO')\n return sname.upper()", "def get_name(self, name):\n name.value = self._get_name(name.value.encode())", "def _encode_name(self, name):\n uuid_str = name.replace(\"-\", \"\")\n vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)\n vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes)\n if six.PY3:\n vol_encoded = vol_encoded.decode('ascii')\n return vol_encoded[:19]", "def encode_name(self,name):\n #print(\"Hackable label decoder in place\")\n if not isinstance(name,dnslib.dns.DNSLabel):\n name = dnslib.dns.DNSLabel(name)\n if len(name) > 253:\n raise dnslib.dns.DNSLabelError(\"Domain label too long: %r\" % name)\n name = list(name.label)\n while name:\n if tuple(name) in self.names:\n # Cached - set pointer\n pointer = self.names[tuple(name)]\n pointer = set_bits(pointer,3,14,2)\n self.pack(\"!H\",pointer)\n return\n else:\n self.names[tuple(name)] = self.offset\n element = name.pop(0)\n if len(element) > MAX_LABEL_LEN:\n raise dnslib.dns.DNSLabelError(\"Label component too long: %r\" % element)\n self.pack(\"!B\",len(element))\n self.append(element)\n self.append(b'\\x00')", "def TransformNames(self) -> _n_2_t_0[str]:", "def encoded_name(self):\n return slugify(str(self).lower())", "def _convert_name(self, name):\n if not self.re_name.match(name):\n org_name = name\n name = self.re_white.sub('_', name)\n name = self.re_alpha.sub('_', name)\n if not self.re_name.match(name):\n name = 'x_' + name2\n self.warn('Converting name <' + org_name + '> to <' + name + '>.')\n return name", "def normalize_name(self):\n name = self.ua_data.get('name', '')\n if not name:\n return\n self.ua_data['name'] = self.normalized_name.get(name.lower(), name)", "def name(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def name(self):\n if not self.has_name():\n return \"\"\n name_length = self.unpack_word(0x2)\n unpacked_string = self.unpack_string(0x14, name_length)\n if self.has_ascii_name():\n return unpacked_string.decode(\"windows-1252\")\n return unpacked_string.decode(\"utf-16le\")", "def get_name():", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def _build_name(self):\n self.ctrl_name = NameUtils.get_unique_name(self.asset,\n self.side,\n self.part,\n \"ctrl\")", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def get_name() -> str:", "def canonical_name(self, name):\n raise NotImplementedError", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def __str__(self):\n return self.get('name', u'').encode('utf_8', 'replace')", "def elenaNamesToOlafNames(name):\n layer, region, num, position = locateTTHalfModule(name)\n reg = {'A': 'R3', 'B': 'R2', 'C':'R1'}\n return layer+'Layer'+reg[region]+'Module'+str(num+1)+position.capitalize()" ]
[ "0.6692533", "0.6645358", "0.6490253", "0.64649105", "0.63855964", "0.62322897", "0.6221056", "0.6217053", "0.61058635", "0.6068496", "0.6068496", "0.6068496", "0.6068496", "0.6068496", "0.6014892", "0.60105383", "0.6001321", "0.6001321", "0.6001321", "0.59878004", "0.5968081", "0.5963674", "0.59552354", "0.5952883", "0.5952883", "0.5952883", "0.5952883", "0.5946698", "0.59303725", "0.5928493" ]
0.75535154
0
Handles the inner computation part from get_next. Transforms and encodes the name and puts it into the queue_to_lower and calls get_content() to retrieve the result.
def get_next_inner_computation(self, arg: str): print("[get_next - inner computation] starts here.") # Start of transformation and component encoding name_str = self.transform_inner(arg) # print("[get_next - inner computation] after transform:", arg) name_after_transform = Name(name_str) name = self.encode_name_components(name_after_transform) # End of transformation and component encoding print("[get_next - inner computation] after encoding:", name) self.queue_to_lower.put((self.packetid, Interest(name))) inner_result = self.get_content(name) print("[get_next - inner computation] ends here with result:", inner_result) return inner_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if self.check_end_streaming(result) is False:\n next_name = self.get_following_name(current_name)\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n return result", "def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result", "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and next_name) in the queue_to_lower. Next call only puts next_name\n if self.pos_name_list_multiple == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n next_name = self.name_list_multiple[self.pos_name_list_multiple]\n if self.check_end_streaming(next_name) is False:\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None\n else:\n return None", "def get_next(self, arg: str):\n if self.check_for_singlename(arg):\n if self.classic is False:\n return self.get_next_single_name(arg)\n else:\n return self.get_next_single_name_classic(arg)\n if self.check_streaming(arg):\n if self.classic is False:\n return self.get_next_multiple_names(arg)\n else:\n return self.get_next_multiple_names_classic(arg)\n else:\n return self.get_next_inner_computation(arg)", "def get_next_multiple_names_classic(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None", "async def __anext__(self):\n if self._next_query is None:\n raise StopAsyncIteration()\n page = await self._executor(self._next_query)\n self._next_query = page.next_query\n return page.content", "def get_content_from_queue_from_lower(self):\n queue_from_lower_entry = self.queue_from_lower.get()\n if isinstance(queue_from_lower_entry, list):\n if isinstance(queue_from_lower_entry[1], Nack):\n print(\"NACK:\", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason)\n return queue_from_lower_entry[1]\n else:\n if isinstance(queue_from_lower_entry, Nack):\n print(\"NACK:\", queue_from_lower_entry.interest, queue_from_lower_entry.reason)\n return queue_from_lower_entry", "def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))", "def stream_part(self, result: str, resulting_content_object: Content):\n if self.check_for_metatitle(result):\n if str(resulting_content_object.name) not in self.get_next_buffer:\n self.get_next_buffer[str(resulting_content_object.name)] = resulting_content_object\n print(\"[Streaming] Part\", self.get_next_part_counter, \"starts here.\")\n next_name = str(resulting_content_object.name) + \"//streaming/p\" + str(self.get_next_part_counter)\n if self.classic is False:\n result = self.get_next_single_name(next_name)\n else:\n result = self.get_next_single_name_classic(next_name)\n print(\"[Streaming] Part\", self.get_next_part_counter, \"ends here with result:\", result)\n self.get_next_part_counter += 1\n if self.check_end_streaming(result):\n return None\n return result", "def next(self):\n try:\n e = self.pool[self.i]\n self.i = self.i + 1\n except IndexError:\n if not hasattr(self, 'j'):\n self.j = 0\n self.pool.extend([x.lower() for x in self.pool])\n try:\n e = self.pool[self.i % len(self.pool)] + self.pool[self.j]\n self.j = self.j + 1\n except IndexError:\n self.i += 1\n self.j = 0\n return self.next()\n return '_{}'.format(e)", "async def proccess_message(self, *args):\n\n await self.used_middlewares[0].compute(*args)", "def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()", "def process(self):\n\n return self._load_next_file()", "def run(self):\r\n while True:\r\n try:\r\n processor, iprot, oprot, otrans, callback = self.queue.get()\r\n if processor is None:\r\n break\r\n processor.process(iprot, oprot)\r\n callback(True, otrans.getvalue())\r\n except Exception:\r\n logging.exception(\"Exception while processing request\")\r\n callback(False, '')", "def process(self):\n try:\n if not self._successor:\n return self.loading_strategy()\n else:\n return self._successor.process_next(self.loading_strategy())\n except Exception as e:\n Oprint.err(e, 'lmdo')", "def _execute(self, name, worker):\n with self._rotation_lock:\n if len(self._active_workers) < self.__max_workers:\n self._total_workers += 1\n self._active_workers[name] = worker\n self._active_workers[name].start()\n else:\n if len(self._worker_queue) < self.__max_q_size:\n self._total_workers += 1\n self._worker_queue.append(worker)\n else:\n self._rotate_workers(worker)", "def process(self, query):\n response_text = self.fetch(query)\n content = self.get_content(response_text)\n return content", "def crawl_queue(q, result_set):\n _log = logging.getLogger(crawl_queue.__name__)\n while not q.empty():\n worker = q.get() #get an itme from the queue\n\n try:\n req = requests.get(worker[1], verify = False, timeout = (30,30), headers = create_fakeheader(ua,browser))\n cont = req.content\n result_set[worker[0]] = cont\n except:\n _log.warning(f' couldnt find a request for index {worker[0]}')\n result_set[worker[0]] = ''\n if q.qsize() % 100 == 0:\n _log.info(f'things left to process {q.qsize()}')\n q.task_done()\n return True", "def process(self, queue: SuperQueue):\n\n def apply_emphasis(queue_name):\n if self.emphasis[queue_name].is_checked:\n self.update()\n pause(self.emphasis[queue_name].pause_time)\n\n while queue:\n queue.re_orient()\n next_cell = queue[-1]\n self.field[next_cell].bg = \"active_cell\"\n\n if queue == self.clear_queue:\n apply_emphasis(\"clear_queue\")\n self.uncover(next_cell)\n\n elif queue == self.auto_queue:\n apply_emphasis(\"auto_queue\")\n queue.remove(next_cell)\n self.solve_block(next_cell)\n\n elif queue == self.hyper_queue:\n apply_emphasis(\"hyper_queue\")\n queue.remove(next_cell)\n self.solve_block(next_cell, first_round=False)\n\n queue.is_busy = False", "async def process_letter(letter: str, letter_counter: Counter) -> None:\r\n print(f'Started processing letter {letter}...')\r\n url = base_page_url + '&from=' + letter\r\n while url is not None:\r\n url = await process_page(url, letter, letter_counter)\r\n print(f'Done processing letter {letter}.')", "def _get_next_name(self, flow):\n parent_ref = '%s ASSEMBLY' % flow.external_ref\n try:\n self.fg.get(parent_ref)\n except EntityNotFound:\n return parent_ref\n _ac = 0\n while True:\n parent_ref = '%s ASSEMBLY alt %d' % (flow.external_ref, _ac)\n try:\n self.fg.get(parent_ref)\n _ac += 1\n except EntityNotFound:\n return parent_ref", "def process_job(q):\n del log_msg[:]\n logger.info('Processing Job %s', q.id)\n\n datatype = q.datatype\n input_dir = q.input_dir\n output_dir = q.output_dir\n processor = q.processor\n if datatype.lower() == 'laz':\n block_name = proper_block_name(input_dir)\n elif datatype.lower() == 'ortho':\n block_name = proper_block_name_ortho(input_dir)\n if datatype.lower() == 'laz' or datatype.lower() == 'ortho':\n logger.info('Verifying las tiles in directory...')\n log_msg.append('Verifying las tiles in directory...\\n')\n has_error, remarks = verify_dir(input_dir, datatype.lower())\n\n if has_error:\n assign_status(q, error=True)\n log_msg.append('Error in verify_las/verify_raster!\\n {0} \\n'.format(remarks))\n else:\n logger.info('Renaming tiles...')\n\n logger.info('BLOCK NAME %s', block_name)\n log_msg.append('BLOCK NAME {0}\\n'.format(block_name))\n\n in_coverage, block_uid = find_in_coverage(block_name)\n\n #: Check first if folder or `block_name` is in `Cephgeo_LidarCoverageBlock`\n #: If not found, `output_dir` is not created and data is not processed\n if in_coverage:\n logger.info('Found in Lidar Coverage model %s %s',\n block_name, block_uid)\n log_msg.append('Found in Lidar Coverage model {0} {1}\\n'.format(\n block_name, block_uid))\n\n rename_tiles(input_dir, output_dir, processor,\n block_name, block_uid, q)\n logger.info('Status %s Status Timestamp %s',\n q.status, q.status_timestamp)\n log_msg.append('Status {0} Status Timestamp {1}\\n'.format(\n q.status, q.status_timestamp))\n\n else:\n has_error = True\n logger.error('ERROR NOT FOUND IN MODEL %s %s', block_name, block_uid)\n log_msg.append('ERROR NOT FOUND IN MODEL {0} {1}\\n'.format(block_name, block_uid))\n assign_status(q, error=True)\n # for DEM\n else:\n logger.info('Handler not implemented for type: %s',\n str(q.datatype))\n log_msg.append('Handler not implemented for type: {0}\\n'.format(\n str(q.datatype)))\n assign_status(q, error=True)\n\n paragraph = ''\n for par in log_msg:\n paragraph = paragraph + par\n\n #: Save log messages from renaming tiles to `Automation_AutomationJob.log`\n with PSQL_DB.atomic() as txn:\n new_q = (Automation_AutomationJob\n .update(data_processing_log=paragraph, status_timestamp=datetime.now())\n .where(Automation_AutomationJob.id == q.id))\n new_q.execute()", "def _worker(\n self, work_queue: Queue, done_queue: Queue, build_results: bool = True\n ):\n for chunk in iter(work_queue.get, \"STOP\"):\n interactions = self._play_matches(chunk, build_results)\n done_queue.put(interactions)\n done_queue.put(\"STOP\")\n return True", "def process(self):", "def process(self):", "def process(self):", "def rec_from_queue( self, ):\r\n try:\r\n action, function, function_args = self.queue_fr_helper.get_nowait()\r\n except queue.Empty:\r\n action = \"\"\r\n function = None\r\n function_args = None\r\n\r\n return ( action, function, function_args )", "def get_external_result(self):\n while True:\n if len(self.result_queue) > 0:\n result = copy.deepcopy(self.result_queue[0])\n del self.result_queue[0]\n return result", "def __next__(self) -> type:\n\n return self._next_worker()" ]
[ "0.6603064", "0.61165065", "0.5809198", "0.5797553", "0.55958676", "0.51660335", "0.50432974", "0.50409937", "0.49529454", "0.48004133", "0.47770488", "0.47690475", "0.47548085", "0.47340557", "0.47298717", "0.470084", "0.46876696", "0.46718737", "0.4669386", "0.46650812", "0.4656006", "0.46518132", "0.46469587", "0.46371567", "0.46291822", "0.46291822", "0.46291822", "0.46069974", "0.4595465", "0.45930213" ]
0.7423148
0
The get_next function which is used for the named functions. This function handles getting the desired content according to its case. Two cases are possible. The multi name case for getting the next part if the length of the stream is given. The handling of an inner computation where the name has to be changed to thr correct format before getting the content.
def get_next(self, arg: str): if self.check_for_singlename(arg): if self.classic is False: return self.get_next_single_name(arg) else: return self.get_next_single_name_classic(arg) if self.check_streaming(arg): if self.classic is False: return self.get_next_multiple_names(arg) else: return self.get_next_multiple_names_classic(arg) else: return self.get_next_inner_computation(arg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if self.check_end_streaming(result) is False:\n next_name = self.get_following_name(current_name)\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n return result", "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n result = buffer_output.content\n else:\n resulting_content_object = self.get_content_from_queue_from_lower()\n if isinstance(resulting_content_object, Interest):\n print(\"[get_next_content] Resulting object is interest:\", resulting_content_object.name, \", instead of content object with name:\", next_name)\n else:\n print(\"[get_next_content] Resulting content object(desired name, resulting name):\", next_name, resulting_content_object.name)\n # Gets stored in buffer if interest doesn't correspond to needed result\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n while is_content_correct is False:\n #print(\"[get_next_content] Content wasn't correct\", resulting_content_object.name)\n buffer_output = self.check_buffer(next_name)\n # If desired interest is in buffer return it and break out of while loop\n if buffer_output:\n resulting_content_object = buffer_output\n break\n else:\n # Get content out of queue_from_lower and check if it is correct -> until correct one is returned\n #print(\"[get_next_content] Content wasn't correct and not avaiable in the buffer.\")\n resulting_content_object = self.get_content_from_queue_from_lower()\n #print(\"[get_next_content] Resulting content object:\", resulting_content_object.name, next_name)\n is_content_correct = self.check_for_correct_content(resulting_content_object, next_name)\n\n result = resulting_content_object.content\n result = self.stream_part(result, resulting_content_object)\n return result", "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and next_name) in the queue_to_lower. Next call only puts next_name\n if self.pos_name_list_multiple == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n next_name = self.name_list_multiple[self.pos_name_list_multiple]\n if self.check_end_streaming(next_name) is False:\n self.sent_interests[str(next_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(next_name)))\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None\n else:\n return None", "def get_next_inner_computation(self, arg: str):\n print(\"[get_next - inner computation] starts here.\")\n # Start of transformation and component encoding\n name_str = self.transform_inner(arg)\n # print(\"[get_next - inner computation] after transform:\", arg)\n name_after_transform = Name(name_str)\n name = self.encode_name_components(name_after_transform)\n # End of transformation and component encoding\n print(\"[get_next - inner computation] after encoding:\", name)\n self.queue_to_lower.put((self.packetid, Interest(name)))\n inner_result = self.get_content(name)\n print(\"[get_next - inner computation] ends here with result:\", inner_result)\n return inner_result", "def get_next_single_name_classic(self, arg: str):\n current_name = arg\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n return result", "async def fetch_next_part(self) -> Union[MultipartReader, BodyPartReader]:\n ...", "def get_next_multiple_names_classic(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n self.pos_name_list_multiple += 1\n result = self.get_content(current_name)\n return result\n elif self.pos_name_list_multiple == len(self.name_list_multiple)-1:\n self.name_list_multiple = None", "def next():", "def next():", "def get_next():\n return \"some_value\"", "def next(self, default: Any = None) -> Any:\n while True:\n vtype, data = self._read_next_obj()\n if vtype is None or data is None:\n if default is None:\n raise StopIteration\n return default\n\n elif vtype == T_DESCRIPTOR_NAME:\n self._next_descr_name = data.decode(\"utf8\")\n self._next_descr = self._dpool.FindMessageTypeByName(self._next_descr_name)\n\n elif vtype == T_MESSAGE:\n if self.return_raw_object:\n return self._next_descr_name, self._next_descr, data\n else:\n return reflection.ParseMessage(self._next_descr, data)\n\n else:\n raise Exception(f\"Unknown message type {vtype}\")", "def initialize_get_next_single(self, arg: str):\n if self.check_streaming(arg) is False:\n return \"Not for streaming.\"\n self.name_list_single = arg.splitlines()\n self.name_list_single.pop(0)", "def get_next(self, name=None):\n self._get_next_call_count += 1\n if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:\n warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)\n\n # TODO(b/169442955): Investigate the need for this colocation constraint.\n with ops.colocate_with(self._iterator_resource):\n # pylint: disable=protected-access\n flat_ret = gen_dataset_ops.iterator_get_next(\n self._iterator_resource,\n output_types=self._flat_tensor_types,\n output_shapes=self._flat_tensor_shapes,\n name=name)\n return structure.from_tensor_list(self._element_spec, flat_ret)", "def next(self) -> str:\n raise NotImplementedError", "def data_next(self, *args, **kwargs):\n # there is this nasty tradeoff where if you implement this in this way\n # where data can take arguments, then _any_ downstream artifact that you\n # want also has to take those arguments as well, clearly undesireable\n # in cases where you would like to be able to do the transformation\n # without having to haul a bunch of stuff around with you\n # what this means is that either you have to accept a set of defaults that\n # are sane and will get you what you want, you identifier is incomplete and\n # thus you add arguments to your function to flesh it out, or\n # you have to drop down a level, configure your argument ahead of time\n # and then make the request again with slightly differen types\n\n # allowing the underlying abstraction to bubble up into optional kwarsg\n # frankly seems like a pretty good option, if it werent for the fact that\n # it is an absolute pain to maintain in the absense of mimicArgs\n # I feel like cl generics could make this much easier ...\n\n # OR OR OR the graph is successor stream of the actual instantiation of this stream\n # which means that ... the extra args would go in at init time??? no\n # that doesn't seem like the right tradeoff, any successor streams\n # basically have to present kwargs for any variables that cannot be\n # set to a sane default within the scope of the identifier system (sigh)\n # or at least in cases where it hasn't been demostrated that the variables\n # are simply a matter of representaiton, not differences in information\n # (i.e. that there isn't a function that can 1:1 interconvert)\n\n generator = self.metadata().data_next(yield_response_gen=True, **kwargs)\n format, *header_chunks, (resp, gen) = generator\n self.headers = resp.headers\n self.format = format\n # TODO populate header graph? not sure this is actually possible\n # maybe need to double wrap so that the header chunks always get\n # consumbed by the header object ?\n if self.format == 'application/rdf+xml':\n resp.close()\n return None\n\n return chain(header_chunks, gen)", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "def stream_part(self, result: str, resulting_content_object: Content):\n if self.check_for_metatitle(result):\n if str(resulting_content_object.name) not in self.get_next_buffer:\n self.get_next_buffer[str(resulting_content_object.name)] = resulting_content_object\n print(\"[Streaming] Part\", self.get_next_part_counter, \"starts here.\")\n next_name = str(resulting_content_object.name) + \"//streaming/p\" + str(self.get_next_part_counter)\n if self.classic is False:\n result = self.get_next_single_name(next_name)\n else:\n result = self.get_next_single_name_classic(next_name)\n print(\"[Streaming] Part\", self.get_next_part_counter, \"ends here with result:\", result)\n self.get_next_part_counter += 1\n if self.check_end_streaming(result):\n return None\n return result", "def _next(self, filename):\n try:\n return self.tmp_read[filename]['reader'].__next__()\n except StopIteration:\n return None", "def __next__(self):\n return next(self.stream_chunker)", "def __next__(self):\n return self.read_message()", "def next(self):\n return self.read_message()", "def next(self):\r\n self._collect()\r\n if not self._heads and not self._refresh:\r\n return Stream.EOF\r\n minimum = self._pop()\r\n if minimum:\r\n line, stream = minimum\r\n self._refresh.add(stream)\r\n return (self._labels[stream], line)", "def next(self, type=None):\n i = self.stop\n s = self.sentence\n while i < len(s):\n if s[i].chunk is not None and type in (s[i].chunk.type, None):\n return s[i].chunk\n i += 1", "def __next__(self):\n\t\treturn next()", "async def get_next(continuation_token=None):\n if not continuation_token:\n return {\"nextLink\": \"page2\", \"value\": [\"value1.0\", \"value1.1\"]}\n else:\n return {\"nextLink\": None, \"value\": [\"value2.0\", \"value2.1\"]}", "async def get_next(continuation_token=None):\n if not continuation_token:\n return {\"nextLink\": \"page2\", \"value\": [\"value1.0\", \"value1.1\"]}\n else:\n return {\"nextLink\": None, \"value\": [\"value2.0\", \"value2.1\"]}", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()" ]
[ "0.68529546", "0.6562418", "0.6089761", "0.6070447", "0.6034299", "0.5983131", "0.5961039", "0.5956156", "0.5956156", "0.59460646", "0.58795226", "0.58600503", "0.5786577", "0.5783287", "0.57088214", "0.57027763", "0.57027763", "0.5626713", "0.5601194", "0.5573359", "0.54974437", "0.54853797", "0.5447034", "0.54343015", "0.5352495", "0.5308903", "0.5308903", "0.52657795", "0.5230434", "0.5230434" ]
0.73879164
0
The write_out function which is used for the named functions. Stores content object as parts into the content store. Before the first element is stored a meta title is stored into the content store so the node who gets this content object can detect and start the stream.
def write_out(self, content_content: str): print("[write_out] Computation name: ", self.comp_name) # meta_title_content object creation to return as a first part if self.write_out_part_counter < 0: metatitle_content = Content(self.comp_name, "sdo:\n" + str(self.comp_name) + "/streaming/p*") self.queue_to_lower.put((self.packetid, metatitle_content)) # self.cs.add_content_object(metatitle_content) TODO not needed? # actual content_object for streaming self.write_out_part_counter += 1 content_name = self.comp_name content_name += "/streaming/p" + str(self.write_out_part_counter) content_object = Content(content_name, content_content) self.cs.add_content_object(content_object) print("[write_out] Last entry in content store:", self.cs.get_container()[-1].content.name, self.cs.get_container()[-1].content.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, content):\n ...", "def write(self, content):\n pass", "def write(self, out):", "def last_write_out(self):\n end_name = self.comp_name\n self.write_out_part_counter += 1\n end_name += \"/streaming/p\" + str(self.write_out_part_counter)\n end_streaming_content_object = Content(end_name, \"sdo:endstreaming\")\n self.cs.add_content_object(end_streaming_content_object)\n print(\"[last_write_out] Last entry in content store:\", self.cs.get_container()[-1].content.name,\n self.cs.get_container()[-1].content.content)", "def writeOutput(self, output):", "def write_output(self,content):\n text=\"\"\"# typ eta phi pt jmass ntrk btag had/em dummy dummy\\n\"\"\"\n self.output.writelines(text)\n text=\"0 \"+str(self.nb_data)+\" \"+str(len(content))+\"\\n\"\n self.output.writelines(text)\n\n i=1\n for particle in content:\n text=str(i)+' '+particle.lhco_line()+'\\n'\n self.output.writelines(text)\n i+=1", "def write(self, data, meta):\n raise NotImplementedError", "def save(self, output, data):", "def write_output(self):", "def store(self, out, includeDocs=True):\n self._write(out, dict(prettyPrint=False), includeDocs=includeDocs)", "def write(self):", "def write(self):", "def dump_parts(self, io):\n\n # XXX refactor with Tempita\n title = \"Parts created by the docutils writer '%s'\" % self.strategy.name\n io.say(title + os.linesep)\n io.say(len(title) * '-')\n io.say(2 * os.linesep)\n io.say('Part keys: ' + 2 * os.linesep)\n\n parts = self.publish_parts(io)\n io.say(os.linesep.join(sorted(parts.keys())))\n io.say(2 * os.linesep)\n for part in parts:\n io.say(\"Value of part '%s':%s\" % (part, os.linesep))\n io.say(parts[part].encode('utf-8') + os.linesep)\n io.say(80*'-'+os.linesep)\n io.say(os.linesep)", "def write(self,out):\n with open( out, \"wb\") as fi:\n fi.write(html.tostring(self.book))", "def _write_content(i, content):\n fpath = io_mgr.get_parties_json(i)\n with open(fpath, 'w') as fstream:\n fstream.write(json.dumps(content, indent=4))", "def write_data():", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def write(self):\n\t\traise NotImplementedError('%s: No write function implemented!' % self.name)", "def write(self, content):\n for f in self.files:\n f.write(content)\n f.flush() # Want this content is displayed immediately on file", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values, output):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n file=output,\n )", "def write(self, output_dir, names, data=None):\n assert len(names) == len(self.object_list), \"Give as many names as objects to save multi-object\"\n\n pos = 0\n for elt, name in zip(self.object_list, names):\n if data is None:\n elt.write(output_dir, name)\n\n else:\n if elt.type.lower() in ['surfacemesh', 'polyline', 'pointcloud', 'landmark']:\n elt.write(output_dir, name, data['landmark_points'][pos:pos + elt.get_number_of_points()])\n pos += elt.get_number_of_points()\n\n elif elt.type.lower() == 'image':\n elt.write(output_dir, name, data['image_intensities'])", "def write(self):\n raise NotImplementedError", "def save(self, output, data):\n pass", "def write(name, keyword, domain, citation, author, description, species, version, contact, license, values,\n functions, output, value_prefix):\n write_namespace(\n name, keyword, domain, author, citation, values,\n namespace_description=description,\n namespace_species=species,\n namespace_version=version,\n author_contact=contact,\n author_copyright=license,\n functions=functions,\n file=output,\n value_prefix=value_prefix\n )", "def save_meta(self):\n # jOut = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"w\") as f:\n json.dump(self.meta, f)", "def writeMeta(outDir, metaData, fulltextData):\n filename = join(outDir, 'articleMeta.tab')\n logging.debug('Appending metadata to %s' % filename)\n minId = pubConf.identifierStart['crawler']\n metaData['articleId'] = str(minId + int(metaData['pmid']))\n if 'main.html' in metaData:\n metaData['fulltextUrl'] = metaData['main.html']['url']\n elif 'landingUrl' in metaData:\n metaData['fulltextUrl'] = metaData['landingUrl']\n if not isfile(filename):\n codecs.open(filename, 'w', encoding='utf8').write(u'\\t'.join(metaHeaders) + '\\n')\n maxCommon.appendTsvDict(filename, metaData, metaHeaders)\n row = []\n for h in metaHeaders:\n row.append(metaData.get(h, ''))\n\n dbFname = join(outDir, 'articles.db')\n con, cur = maxTables.openSqliteCreateTable(dbFname, 'articles', metaHeaders, idxFields=['pmid', 'pmcId', 'doi'], intFields=['pmid', 'articleId', 'pmcId'], primKey='pmid', retries=100)\n writeOk = False\n tryCount = 100\n logging.log(5, '%s' % row)\n while not writeOk and tryCount > 0:\n try:\n try:\n maxTables.insertSqliteRow(cur, con, 'articles', metaHeaders, row)\n except sqlite3.IntegrityError:\n logging.warn('Already present in meta info db')\n\n writeOk = True\n except sqlite3.OperationalError:\n logging.info('sqlite db is locked, waiting for 60 secs')\n time.sleep(60)\n tryCount -= 1\n\n if not writeOk:\n raise Exception('Could not write to sqlite db')", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def write(data):", "def _write_h5_out(self, fout, save_hybrid_meta=True):\n\n with Outputs(fout, mode='a') as out:\n if 'meta' in out.datasets and save_hybrid_meta:\n hybrid_meta = to_records_array(self.hybrid_meta)\n out['meta'] = hybrid_meta\n\n for dset, data in self.profiles.items():\n out[dset] = data", "def write(self):\n pass" ]
[ "0.6725428", "0.63619083", "0.6285846", "0.6174999", "0.59593", "0.58944917", "0.58546984", "0.584093", "0.5826792", "0.58217865", "0.58128077", "0.58128077", "0.5803597", "0.5695644", "0.5638194", "0.5635738", "0.5605294", "0.55881625", "0.55847704", "0.5579301", "0.5565279", "0.55552953", "0.55352086", "0.55330455", "0.5521421", "0.5489609", "0.5487721", "0.5469778", "0.5460166", "0.54522794" ]
0.78408104
0
The last_write_out function which is used for the named functions.
def last_write_out(self): end_name = self.comp_name self.write_out_part_counter += 1 end_name += "/streaming/p" + str(self.write_out_part_counter) end_streaming_content_object = Content(end_name, "sdo:endstreaming") self.cs.add_content_object(end_streaming_content_object) print("[last_write_out] Last entry in content store:", self.cs.get_container()[-1].content.name, self.cs.get_container()[-1].content.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLastFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def lastsave(self):\r\n return self.format_inline('LASTSAVE')", "def hook_output(self, name: str, func: Callable[[\"self.value_type\"], None]):\n self.write_hooks[name] = func", "def get_output(self, last = 1):\n\t\tif last == -1:\n\t\t\ttmp = self.out_param[::]\n\t\t\tself.out_param = []\n\t\t\treturn tmp\n\t\treturn self.out_param[-last:]", "def lastsave(self):\n self.connect()\n self._write('LASTSAVE\\r\\n')\n return self._get_numeric_response()", "def writer_last_names(self, obj):\n\n return ' / '.join(\n writer.last_name.upper() for writer in set(\n obj.writers.order_by('last_name')))", "def getLast(self):\n if self.last != None:\n return self.last.filename\n else:\n return None", "def write():\n pass", "def get_my_last_event(self):\r\n return self._handler.get_my_last_event()", "def outWriteEvent(self):\r\n pass", "def get_masscan_last_output(self):\n return self._masscan_last_output", "def write_end(self) -> str:\n assert self._read_pipe_name\n return self._read_pipe_name", "def most_recent_read(self):\n self.read_pos = (self.write_pos - 1) % self.log_len\n return", "def writetif(self,outputname,):\n pass", "def outname(self, fileappend=''):\n if self._outname is None:\n self._outname = self.generate_outname(fileappend='')\n\n return self._outname", "def _write_leader_optime(self, last_operation):", "def write_function(output_file, function_name, n_vars, curr_function):\n curr_function[0] = function_name\n output_file.write(\"(\" + function_name + \")\" + \"\\n\")\n for i in range(int(n_vars)):\n write_push_constant(output_file, \"0\")", "def getLastWaveIdentifier(self): \n return self.lastWaveIdentifier", "def last_operation_name(self) -> Optional[str]:\n return pulumi.get(self, \"last_operation_name\")", "def get_last_save(self) -> Optional[int]:\n return self._save_marker", "def read_end(self) -> str:\n assert self._write_pipe_name\n return self._write_pipe_name", "def _write_nover():\n return []", "def get_last_save_info(self) -> Any:\n return self._bin_iter.get_last_save_info()", "def last(word):\n\treturn word[-1]", "def get_last(self, count):", "def xpathLastFunction(self, nargs):\n libxml2mod.xmlXPathLastFunction(self._o, nargs)", "def get_last(self):\n return \"%s%s%s\" % (\n # As per the original list there's a 1/39 (not conting Bob)\n # chance for a 'Mc' prefix to the lastname\n #\n # Can also, with low propability be \"von <lastname>\"\n weighted_choice([(\"\", 35), (\"Mc\", 3), (\"von \", 1)]),\n choice(self.second_chunks).title(),\n choice(self.third_chunks))", "def last_move(self):\n return self.last_action", "def last_node(self):\n return \"last_node\"", "def get_last_save(self) -> Optional[float]:\n return None if self._save_marker is None else self._save_marker + self._offset" ]
[ "0.6481691", "0.58430004", "0.5758233", "0.5632203", "0.5593506", "0.54959834", "0.5467705", "0.54547566", "0.5439232", "0.5418257", "0.53904366", "0.5386019", "0.5384885", "0.5378884", "0.53705084", "0.5370292", "0.53638005", "0.5259693", "0.5248828", "0.52414453", "0.5228014", "0.52194273", "0.5199417", "0.5198776", "0.5198424", "0.5192889", "0.51872736", "0.51800454", "0.517265", "0.516122" ]
0.62631506
1
Streaming function for inner nodes. Runs get_next and writes out the result until end of stream is reached.
def write_out_on_get_next(self, arg: Name): res = self.get_next(arg) while res and self.check_end_streaming(res) is False: self.write_out(res) res = self.get_next(arg) self.last_write_out()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next(self):\n node = self.head\n while node != None:\n yield node.data\n node = node.right", "def next():", "def next():", "def __next__(self):\n return next(self.stream_chunker)", "def __next__(self):\n return next(self.buffered_streamer)", "def __iter__(self) -> SLNode:\n cur = self.head\n while cur is not None:\n yield cur\n cur = cur.next", "def __iter__(self) -> SLNode:\n cur = self.head\n while cur is not None:\n yield cur\n cur = cur.next", "def __iter__(self):\n node = self.head\n while node is not None:\n yield node._data\n node = node._next", "def __iter__(self):\n node = self.head.next[0]\n while node.next:\n yield node.value\n node = node.next[0]", "def __iter__(self):\n # set current node to front node\n current = self.front\n # while current != None\n while current:\n # send out current node's data\n yield current.data\n # move to next node\n current = current.prior", "def next(self):\r\n self._collect()\r\n if not self._heads and not self._refresh:\r\n return Stream.EOF\r\n minimum = self._pop()\r\n if minimum:\r\n line, stream = minimum\r\n self._refresh.add(stream)\r\n return (self._labels[stream], line)", "def run(self):\n node_runs = [\n (node, node.run())\n for node in self.nodes\n if node.loaded\n ]\n\n while True:\n for node, node_run in node_runs:\n if node in self.buffered_input and node.mode == Node.READ:\n for d, values in self.buffered_input[node].iteritems():\n if values and not node.has_inputs(d):\n node.write_input(d, values.pop(0))\n self.num_buffered_inputs -= 1\n break\n\n yield next(node_run)", "def __iter__(self):\n cur = self.head\n while cur is not None:\n yield cur.data\n cur = cur.next", "def traverse(self):\n current = self.head\n while current is not None:\n print current.value\n current = current.next", "def __next__(self) -> object:\n if not self.current_node:\n raise StopIteration\n\n current_node_value = self.current_node.value()\n self.current_node = self.current_node.next()\n return current_node_value", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def iter(self):\n\n current = self.head\n while current:\n yield current\n current = current.next", "def __next__(self):\n if self.idx < len(self.nodes) - 1:\n self.idx += 1\n return self.nodes[self.idx]\n else:\n if self.mode is 'circular':\n self.idx = 0\n else:\n self.idx = len(self.nodes)\n raise StopIteration", "def __forward(self):\n if self.is_empty():\n raise StopIteration\n\n current = self._head\n yield current._data\n while current._next:\n current = current._next\n yield current._data", "def get_stream(ast):\n while True:\n yield evaluate_program(ast)", "def next(self):\n return _libsbml.XMLInputStream_next(self)", "def __iter__(self):\n if self.is_empty():\n raise StopIteration\n current = self._head\n yield current._data\n while current._next:\n current = current._next\n yield current._data", "def __call__(self):\r\n return self.next()" ]
[ "0.6188734", "0.61485165", "0.61485165", "0.6088194", "0.6049867", "0.5947307", "0.5947307", "0.5915881", "0.5863198", "0.5844089", "0.5831229", "0.58085644", "0.57978195", "0.57714903", "0.5739277", "0.5733169", "0.5733169", "0.5733169", "0.5733169", "0.5733169", "0.5733169", "0.5733169", "0.57281226", "0.57196444", "0.57163024", "0.5662551", "0.56480706", "0.564645", "0.56378037", "0.5610607" ]
0.66700655
0
Perform timeout. Resets the info counter of API calls done since the last timeout, as well as the session
def doTimeout(self): log.info('Executed {} calls until timeout'.format(self.calls_to_timeout)) self.calls_to_timeout = 0 self.resetSession() time.sleep(self.timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_timeout(self):\n self.logger.debug('id=%d, Session timed out!', self.id)\n self.close(SessionCloseErrorCode.SESSION_DIED)", "def reset_time_out(self):\n self.reconnect()\n self.reconnect_params()", "def timeout(self):\n self._status_update(\"Pyloton: Timeout\")\n time.sleep(3)", "def settimeout(self,timeout=10):\r\n # Update\r\n self.timeout = timeout", "def _on_timeout(self, info: str = None) -> None:\n self._timeout = None\n error_message = \"Timeout {0}\".format(info) if info else \"Timeout\"\n if self.final_callback is not None:\n print('raise')\n\n # self._handle_exception(\n # HTTPTimeoutError, HTTPTimeoutError(error_message), None\n # )", "def _idle(self):\n # self._purge_timedout()\n # ...", "def timeout(self):\n self.timeout_scan_flag=True\n self.timer.stop()\n self.status_sig.emit([\"Update_Status\",\"Timeout during acquisition\",'log'])\n self.status_sig.emit([\"Timeout\"])", "def set_timeout(self, timeout):\n self.timeout = timeout", "def timeout_handle():\r\n response = requests.get(base_url + '/get', params=None, timeout=3)\r\n print(response.json)\r\n print(response.status_code)", "def on_timeout(self):\n pass", "def set_timeout(self, timeout):\n pass", "def set_retry_timeout(self, retry_timeout):", "def TODO_testTimeout(self):\n return \"\"\"TODO: Highly dependent on hardcoded downstream timeout val\"\"\"\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n\n self.client_send('get time0\\r\\n', 0)\n self.mock_recv('get time0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # downstream timeout logic should kick in,\n # without our mock server having to send anything.\n\n self.wait(210)\n\n self.client_recv('END\\r\\n', 0)\n\n # TODO: The number of server sessions should be 0,\n # except the close might not have propagated.", "def reset_timeout(self, timeout: float) -> None:\n self._reset_timeout = timeout", "def get_timeout(self) -> int:", "def setTimeout(self, timeout):\n self.timeout = timeout", "def timeout(self, timeout):\n\n self._timeout = timeout", "def pytest_timeout_cancel_timer(item):", "def _on_timeout(self, key: object, info: str = None) -> None:\n request, callback, timeout_handle = self.waiting[key]\n self.queue.remove((key, request, callback))\n\n error_message = \"Timeout {0}\".format(info) if info else \"Timeout\"\n timeout_response = UDPResponse(\n request,\n 599,\n #error=HTTPTimeoutError(error_message),\n request_time=self.io_loop.time() - request.start_time,\n )\n self.io_loop.add_callback(callback, timeout_response)\n del self.waiting[key]", "def setTimeOut(self, timeout=6.0):\n self.timeout = timeout", "def timeout(self):\n pf.debug(\"TIMEOUT\")\n self.acceptData(TIMEOUT)", "def reset_timeout (self, new_timeout):\n self.timer.cancel()\n self.timer = Timeout(new_timeout, TestIsTakingTooLong(new_timeout))", "def setdefaulttimeout(timeout):\r\n global _TIMEOUT\r\n _TIMEOUT = timeout", "def functionThatWillTimeOut():\n time.sleep(5)", "def on_timeout(self):\n logger.debug(\"on_timeout\")\n self.discard_env()\n self.transport.close()", "def pytest_timeout_set_timer(item, settings):", "def __init__(self, timeout=129600):\n self.timeout = timeout", "def _init_timeouts(self):\n cur_time = time()\n self._chunk_time = cur_time\n self._total_time = cur_time", "def __init__( self, timeout = 60.0 ):\n\n self.timeout = timeout\n self.alive = None", "def settimeout(self, value: int) -> None:\n ..." ]
[ "0.644774", "0.6438548", "0.6436007", "0.631921", "0.6305379", "0.62621534", "0.6204743", "0.61857486", "0.6181763", "0.61060214", "0.6093841", "0.6060867", "0.60562134", "0.5959294", "0.59495", "0.59370124", "0.59354204", "0.59328425", "0.59189075", "0.5898101", "0.58769494", "0.58192104", "0.5811019", "0.58095586", "0.58041984", "0.57939416", "0.5784738", "0.57714057", "0.5764275", "0.5760872" ]
0.76772344
0
Gets a new API key, configured with it's correspondent access rights. If all keys are already used, start from the first one.
def getNextApiKey(self): self.resetSession(get_new_api_key=False) if self.key_idx == len(self.api_keys): self.key_idx = 0 self.session.auth = (self.api_keys[self.key_idx][0], '') self.number_of_max_req = self.api_keys[self.key_idx][1] self.key_idx += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regenerate_API_key(self) -> None:\n session = create_session()\n new_key = generate_random_string(24)\n # Check if there is any user with exact same API key as just generated\n if new_key not in session.query(User.API_KEY).all():\n self.API_KEY = new_key\n session.merge(self)\n session.commit()\n else:\n while new_key in session.query(User.API_KEY).all():\n new_key = generate_random_string(24)\n self.API_KEY = new_key\n session.merge(self)\n session.commit()", "def get_api_access_key(self):\n if self.api_access_key in [None, '']:\n options = string.letters + string.digits\n self.api_access_key = ''.join([\n random.choice(options)\n for i in range(64)\n ])\n self.save()\n return self.api_access_key", "def get_api_key(api_key):\n api.get(api_key)", "def get_apikey_from_keyring(platform_id='public', # type: str\n base_url=None, # type: str\n keyring_entries_username=KR_DEFAULT_USERNAME, # type: str\n ):\n client = ODSClient(platform_id=platform_id, base_url=base_url, keyring_entries_username=keyring_entries_username)\n return client.get_apikey_from_keyring(ignore_import_errors=False)", "def create_api_keys(self, **kwargs):\n\n all_params = ['api_key']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'api_key' in params:\n body_params = params['api_key']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKeyWithPrivileges',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_api_key(instance):\n\n # TODO make this work with environment variables or else\n # by getting the api-key from ~/.config/flywheel/user.json\n # if the KEY_FILE is not present but that doesn't honor the\n # \"instance\" argument to this method\n\n with open(KEY_FILE) as json_file:\n keys = json.load(json_file)\n the_user = keys[\"default\"]\n for key, val in keys[\"ids\"][the_user].items():\n if instance.startswith(key):\n api_key = val\n if not api_key:\n print(f\"{CR}Could not find instance '{instance}'{C0}\")\n return api_key", "def temp_api_key(cloud):\n payload = {'name': 'pelion_e2e_dynamic_api_key'}\n r = cloud.account.create_api_key(payload, expected_status_code=201)\n resp = r.json()\n\n log.info('Created new developer api key for test case, id: {}'.format(resp['id']))\n\n yield resp\n\n log.info('Cleaning out the generated test case developer api key, id: {}'.format(resp['id']))\n cloud.account.delete_api_key(resp['id'], expected_status_code=204)", "def generate_api_key(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method generate_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys/_generate'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_key ():", "def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_api_keys(self, **kwargs):\n\n all_params = ['page', 'per_page', '_from', 'to', 'sort_dir', 'sort_field', 'filters']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/apikeys'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page' in params:\n query_params['_page'] = params['page']\n if 'per_page' in params:\n query_params['_perPage'] = params['per_page']\n if '_from' in params:\n query_params['_from'] = params['_from']\n if 'to' in params:\n query_params['_to'] = params['to']\n if 'sort_dir' in params:\n query_params['_sortDir'] = params['sort_dir']\n if 'sort_field' in params:\n query_params['_sortField'] = params['sort_field']\n if 'filters' in params:\n query_params['_filters'] = params['filters']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ApiKey]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def generate_access_token(self):\n return gen_api_key(length=self.token_length)", "def set_api_key(new_api_key):\n global api_key\n api_key = new_api_key", "def get_key(self, user, api_key):\n return True", "def set_api_access_keys(**kwargs):\n API_BASE_PARAMS['key'] = kwargs['key']", "def switch_key():\n with open(\"secret_key.txt\", 'r') as key_file:\n api_keys = key_file.read().splitlines()\n\n for api_key in api_keys:\n yield api_key", "def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']", "def api_keys(self):\n return SpaceApiKeysProxy(self._client, self.id)", "def get_new_key() -> rsa.RSAPrivateKeyWithSerialization:\n\n return rsa.generate_private_key(\n backend=default_backend(), public_exponent=65537, key_size=2048\n )", "def API_KEY(self):\n raise NotImplementedError()", "def api_key( self, trans, user_id, **kwd ):\n user = self.get_user( trans, user_id )\n key = self.create_api_key( trans, user )\n return key", "def API_KEY(self):\n return 2", "def generate_api_key(key_length: int = settings.api_app_auth_key_length) -> str:\n return secrets.token_urlsafe(64)[:key_length]", "def create_apikey(self, username, api_key):\r\n return 'ApiKey %s:%s' % (username, api_key)", "def test_create_api_key(self):\n pass", "async def generate_new_refesh_key(payload: dict = Depends(get_jwt_payload)):\n if payload[\"type\"] != \"refresh\":\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"You gave the access key, but we need the refresh key\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\n # <- Your token revocation code should be here!\n\n access_token_data = jwt_claims.copy()\n access_token_data[\"sub\"] = payload[\"sub\"]\n access_token_data[\"exp\"] = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token_data[\"jti\"] = str(uuid.uuid4())\n\n return AccessToken(access_token=jwt.encode(access_token_data, SECRET_KEY, algorithm=ALGORITHM))", "def getAPIKey(self, keyID=None):\n kcfg = self.getKeyConfig(keyID)\n\n if \"key\" not in kcfg:\n raise ConfigException(\"invalid config: missing api key\")\n\n return kcfg[\"key\"]", "def API_KEY(self):\n return 11", "def _newKey(self, key):\n pass", "def API_KEY(self):\n return 10" ]
[ "0.6455453", "0.63159895", "0.6125672", "0.6044153", "0.60338324", "0.5977004", "0.5949956", "0.5945266", "0.5931769", "0.5898856", "0.5832408", "0.5827095", "0.58270764", "0.5792817", "0.57863086", "0.5750034", "0.5749857", "0.57467425", "0.5730262", "0.5726184", "0.57043725", "0.5699099", "0.5676141", "0.5656027", "0.56538767", "0.5633933", "0.56252736", "0.5593603", "0.5582291", "0.55779773" ]
0.6538258
0
Number of additions to favorites.
def count_favorite(self, obj): return obj.recipe_fav.count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_favourites_page_count(self, page, page_size):\n total_roms = Roms(self._connection).page_size(page_size).page_offset(page).get_count()\n return int(float(total_roms) / page_size)", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if movie.is_watched:\n movies_watched += 1\n return movies_watched", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def __len__(self):\n return sum(1 for item in self.wishlist.values())", "def count():", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def count(self):\n # TODO not implemented yet\n return 0", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def bookmarks_count(self) -> int:\n return pulumi.get(self, \"bookmarks_count\")", "def count(self):\n\n raise NotImplementedError", "def count_fingers(self):\n return self._finger.count(True)", "def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres", "def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count", "def fileCount(self):\n pass", "def add_count(self):\n self.count += 1", "def update_adds(self):\n self.nb_added = self.chefs.count()\n self.save()", "def count(self):\n return self.size()", "def count(self):\n return sum(1 for _ in self)", "def num_reviews(self):\n return self.review_set.count()", "def count(self):\n return len(self)", "def count(self):\n return self.get_count()", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def hives_count(self) -> int:\n return self.hives.count()", "def get_number_of_movies(self):\n raise NotImplementedError", "def count(self) -> int:\n return self.__count", "def getFileCount(self) -> int:\n ...", "def n_featured():\r\n sql = text('''select count(*) from featured;''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count" ]
[ "0.69606006", "0.6493855", "0.6325194", "0.6325194", "0.6303647", "0.62787765", "0.62787765", "0.62787765", "0.62787765", "0.62150556", "0.6180549", "0.6174093", "0.61588585", "0.6152416", "0.6150293", "0.61248976", "0.6096311", "0.6087514", "0.60759777", "0.60732627", "0.60600597", "0.60509723", "0.6006602", "0.59868", "0.5982884", "0.5972997", "0.5972983", "0.5953849", "0.59509003", "0.5946554" ]
0.7623228
0
Move files/folders from source path to the show's directory Directory is specified by user, or if possible, loaded from data.json
def move_files(): if "defaultdirectory" in data: print("\n*** Default video source directory:", data["defaultdirectory"]) srcdir = input("Enter '1' to use default video source directory\n" "Otherwise, please enter the full path where your videos are located.\n" "Example: C:\\user\\downloads\\ \n").strip() if srcdir.startswith('1'): srcdir = data["defaultdirectory"] else: srcdir = input("Please enter the full path where your videos are located.\n" "Example: C:\\user\\downloads\\ \n").strip() while not os.path.isdir(srcdir): srcdir = input("\n*** Invalid directory. Please enter the full path where your " "videos are located.\n Example: C:\\user\\downloads\\ \n").strip() data["defaultdirectory"] = srcdir save_json() print() for filename in os.listdir(srcdir): # Only look for folders/files in the format "S##E##". Example: "X-Files S01E02" if re.search('[sS]\\d{2}[eE]\\d{2}', filename): found = False filepath = os.path.join(srcdir, filename) name = filename.lower() for key in data: keywords = key.split() if all(word in name for word in keywords): found = True try: shutil.move(filepath, data[key]) except Exception as e: print("*** Error with {}".format(filename)) print("The file might be open in another program") print(repr(e)) else: print("*** Moved:", (filename[:41] + "...") if len(filename) > 44 else filename) break if not found: # Prints the shows that matched the episode formatting, but were not # configured to be processed print("* Not processed:", (filename[:35] + '...') if len(filename) > 38 else filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def askdirectory_source(self):\n\n self.fromdir = tkFileDialog.askdirectory(**self.dir_opt_source)\n self.dir_opt_source['initialdir'] = self.fromdir\n self.source_label.set(self.fromdir)", "def prepare_src_folder(self, src_folder: str) -> None:", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def move_from_temp_directory(self):", "def from_dir_changed(self):\n text = self.from_dir.toPlainText().strip()\n if os.path.exists(text):\n sqlite.w('update settings set source_path = (?) where id is 1', text)\n all_files = self.get_all_files_from_path(text, extension='PDF')\n self.pdf_files = self.make_all_files_dictionary(all_files)\n\n if not self.pdf_files:\n return\n\n self.reset_widgets(all=True)\n self.draw_pdf_files()", "def select_destionation(self):\n dest_folder = self.view.folder_explorer.getExistingDirectory()\n\n # PySide\n if isinstance(dest_folder, list) and os.path.isdir(dest_folder[0]):\n self.view.folder_line.setText(dest_folder[0])\n # PySide2\n if os.path.isdir(dest_folder):\n self.view.folder_line.setText(dest_folder)", "def on_moved(self, event):\n super(myEventHandler,self).on_moved(event)\n #moveto events from external folders have no src_path\n source = event.src_path\n dest = event.dest_path\n if event.is_directory:\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n pass\n #file = splitpath[1]\n #pathtoonedir = self.onedir.getonedirrectory()\n #oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n #newpath = splitdest[0].replace(pathtoonedir ,\"\")\n #if oldpath is \"\":\n # oldpath = os.path.sep\n #self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n oldname = source\n newname = dest\n pathtoonedir = self.onedir.getonedirrectory()\n oldname = oldname.replace(pathtoonedir ,\"\")\n newname = newname.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(oldname,newname)\n else:\n #if it comes from outside the folder structure\n if source is None:\n try:\n #use os.path.split to get file name and path\n splitpath = split(dest)\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n else:\n #file was moved!\n #check if name stays the same i.e. it's a move not a rename!\n splitpath = split(source)\n splitdest = split(dest)\n if splitpath[1] == splitdest[1]:\n try:\n #where are we moving from\n file = splitpath[1]\n pathtoonedir = self.onedir.getonedirrectory()\n oldpath = splitpath[0].replace(pathtoonedir ,\"\")\n #calculate new path\n newpath = splitdest[0].replace(pathtoonedir ,\"\")\n if oldpath is \"\":\n oldpath = os.path.sep\n self.onedir.movefile(file,newpath,oldpath)\n except OSError as e:\n print \"Error copying file! \" + e\n exit(1)\n else:\n #rename!!!!!!!!\n file = splitpath[1]\n newname = splitdest[1]\n pathtoonedir = self.onedir.getonedirrectory()\n path = splitpath[0].replace(pathtoonedir ,\"\")\n if path is \"\":\n path = os.path.sep\n else:\n path = path[1:]\n self.onedir.rename(file,path,newname)", "def cp_dir_or_files(self):\n if self.recursive:\n if self.cmdtype == 'upload' and not self.srcpath.endswith(os.path.sep):\n basename = os.path.basename(self.srcpath)\n self.destpath = join_obs_path(self.destpath, basename)\n elif self.cmdtype == 'download' and not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = os.path.join(self.destpath, basename)\n elif not self.srcpath.endswith('/'):\n bucket, key = split_bucket_key(self.srcpath)\n basename = key.split('/')[-1]\n if basename:\n self.destpath = join_obs_path(self.destpath, basename)", "def destination_chooser(self):\n dialog = QFileDialog()\n dialog.setFileMode(QFileDialog.Directory)\n dialog.setOption(QFileDialog.ShowDirsOnly)\n dialog.exec_()\n self.lblDestPath.setEnabled(True)\n self.lblDestPath.setText(os.path.abspath(dialog.directory().absolutePath()))\n self.update_table_view()\n self.copyButton.setEnabled(True)", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def populateOutputFolder(self):\n filePath = pm.fileDialog2(fileMode=2,\n startingDirectory=self.startDir,\n fileFilter=' Shifter Game Assembly folder')\n if not filePath:\n return\n if not isinstance(filePath, string_types):\n filePath = filePath[0]\n self.gtUIInst.path_lineEdit.setText(filePath)", "def change_dir(filename):", "def set_source_path(self, folder):\n self.source_path = folder", "def rename(source_dir,dest_dir):\n keep_going(text=\"This script will backup the original folder to dest_dir/Source/** and remove the original folder. It will make copies of the original files and rename them in directories called Darks, Flats, etc. Do you wish to continue? Answer Y or N.\")\n\n ## Backup Original Source Folder\n dir_util.copy_tree(source_dir, dest_dir + '/Source')\n\n data = []\n for file in os.listdir(\"./\" + source_dir): # put in your path directory\n if file.endswith(\".fits\"): # what does the file end with?\n data.append(os.path.join(source_dir, file))\n\n n = len(data)\n obj, itime, filt, renamed, datemod, count, flatmod, mod = ([] for i in range(8))\n for i in range(0, n):\n header = fits.getheader(data[i])\n Name, Date, Number, Ext = data[i].split(\".\")\n obj.append(header['OBJECT'])\n itime.append(header['ITIME'])\n filt.append(header['FWINAME'])\n mod.append((header['OBJECT'] + header['FWINAME']))\n flatmod.append((header['OBJECT'] + header['FWINAME'] + Date))\n datemod.append(datetime.strptime(Date, \"%Y%m%d\").date())\n if flatmod[i] in flatmod:\n count = flatmod.count(flatmod[i])\n if ('Lamp' in obj[i] or 'Flat' in obj[i]):\n renamed.append((dest_dir + '/Flats/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + str(count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Flats/' + str(datemod[i]) + '/'), exist_ok=True)\n elif ('Dark' in obj[i]) or ('dark' in obj[i]):\n renamed.append((dest_dir + '/Darks/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + str(count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Darks/' + str(datemod[i]) + '/'), exist_ok=True)\n elif ('Sky' in obj[i]) or ('sky' in obj[i]):\n renamed.append((dest_dir + '/Skys/' + str(datemod[i]) + '/' + 'K' + header['OBJECT'] + header['FWINAME'] + str(\n count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Skys/' + str(datemod[i]) + '/'), exist_ok=True)\n else:\n renamed.append((dest_dir + '/Objects/' + header['OBJECT'].upper() + '/' + str(datemod[i]) + '/' + 'K' + list(header['CAMNAME'])[0].title() + header['OBJECT'].upper() +\n header['FWINAME'] + str(\n count) + \".fits\"))\n os.makedirs(os.path.dirname(dest_dir + '/Objects/' + header['OBJECT'].upper() + '/' + str(datemod[i]) + '/'), exist_ok=True)\n os.rename(data[i], renamed[i])\n\n ## REMOVE LEFT OVER original Folders\n shutil.rmtree(source_dir)\n\n lists = [data, mod, datemod, itime, flatmod, renamed]\n data_headers = pd.concat([pd.Series(x) for x in lists], axis=1)\n\n return data_headers", "def test_6e_move_data_btw_folders(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif (GST.default_folder_to_be_used):\n if not (default_folders_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare default directories\")\n elif (not GST.dir1_exists) or (not GST.dir2_exists):\n raise unittest.SkipTest(\"Skipped for failed to prepare dirs\")\n elif not GST.moving_data_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare moving data tests.\")\n self.dismiss_dialogs()\n function = js_func[\"move_file\"] % (GST.gs_file_paths[\"file_to_move_to_folder_source_path\"], GST.gs_file_paths[\"move_to_folder_target_path\"])\n try:\n self.send_request(function, \"move_file()\")\n except Exception as e:\n raise MoveException(\"Failed to move the data between folders. \\n\" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise MoveException(\"Failed to move the data between folders. \\n\" + response)", "def move_dirs(args):\n src = args[0]\n dst = args[1]\n print(\"Moving from: {}\".format(src))\n print(\" to: {}\".format(dst))\n shutil.move(src, dst)\n return", "def askdirectory_dest(self):\n\n self.todir = tkFileDialog.askdirectory(**self.dir_opt_dest)\n self.dir_opt_dest['initialdir'] = self.todir\n self.target_label.set(self.todir)", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def DestDialog(self):\r\n self.dest_dir = tk.filedialog.askdirectory(title = \"Select Destination Directory for image data\")", "def on_modified(self, event):\n super(myEventHandler,self).on_modified(event)\n if event.is_directory:\n try:\n source = event.src_path\n dest = event.src_dest\n pathtoonedir = self.onedir.getonedirrectory()\n source = source.replace(pathtoonedir ,\"\")\n dest = dest.replace(pathtoonedir ,\"\")\n self.onedir.renamedirectory(source, dest)\n except Exception as e:\n print e\n exit(1)\n else:\n source = event.src_path\n try:\n #use os.path.split to get file name and path\n splitpath = split(source)\n file = splitpath[1]\n if file.startswith('.'):\n return\n pathtoonedir = self.onedir.getonedirrectory()\n relpath = splitpath[0].replace(pathtoonedir ,\"\")\n self.onedir.sendfile(file, relpath)\n except OSError as e:\n print \"Error copying file! \" + e.strerror\n exit(1)\n except IOError as e:\n print \"IOerror creating file \" + e.strerror\n exit(1)", "def on_moved(self, event):\n print(\"Moved\")\n time.sleep(5)\n self.moveFile(event.dest_path)", "def change_dir(self, src: str = None, dest: str = None):\n\n if not is_empty(src):\n self._srcDir = src\n\n if not is_empty(dest):\n self._destDir = dest", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def walk():\n os.chdir('Lyrics')\n for directory_name, subdirectories, filenames in os.walk('.'):\n print(\"Directory:\", directory_name)\n print(\"\\tcontains subdirectories:\", subdirectories)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n for filename in filenames:\n shutil.move(os.path.join(directory_name, filename),\n os.path.join(directory_name) + '/' + get_fixed_filename(filename))", "def mv(self, src: int, dest: int) -> bool:\n url = 'https://webapi.115.com/files/move'\n result = self.s.post(url, data={'pid': dest, 'fid[0]': src}, headers={'Origin': origin['webapi'], 'Referer': referer['115'].format(self.default_dir)}).json()\n if result['errno'] == '':\n _ = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs) # TODO: need to test\n self._dirs_lookup[src] = self._dirs_lookup[dest].append(dest)\n parent = functools.reduce(dict.__getitem__, self._dirs_lookup[src], self.dirs)\n if src not in parent:\n parent.update({src: _})\n else:\n parent.get(src).update(_)\n return True", "def on_moved(self, event):\n\n # build the relative source and destination paths\n source_path = event.src_path.replace(self.root_path, \".\")\n destination_path = event.dest_path.replace(self.root_path, '.')\n is_directory = event.is_directory\n\n # propagate the moved event if server connection is established\n if self.protocol.connected:\n self.protocol.send_move_event(is_directory, source_path, destination_path)\n else:\n logging.info(\"Connection with server has not been established, changes will not be propagated.\")", "def process_source_and_destination(self):\n self.rsync_command += [str(self.source_path) + '/' if self.source_content else '',\n str(self.current_destination_path)]\n logging.debug('Directories: %s -> %s',\n str(self.source_path) + '/' if self.source_content else '',\n str(self.current_destination_path))", "def syncfolder():" ]
[ "0.6254314", "0.60398024", "0.59097177", "0.58858806", "0.58225375", "0.5794603", "0.5771358", "0.5735009", "0.5731622", "0.5720588", "0.569069", "0.5670436", "0.56292623", "0.5626693", "0.56261504", "0.5596827", "0.5595239", "0.5590468", "0.5582738", "0.5580043", "0.55735177", "0.55503905", "0.5541647", "0.5522409", "0.5511312", "0.54824775", "0.54810834", "0.54431736", "0.5435586", "0.543553" ]
0.6373189
0
Add directories to the data dictionary User provides keywords for each show and the directory for that show. Keywords are used so 'X.Files', 'XFiles', and 'X Files' will all be matched by the keywords 'X' and 'Files'
def add_directory(): showKeywords = input("\nInput mandatory keywords for the show title seperated by a space.\n" "Example: X files\n").lower().strip() while re.search('[^A-Za-z0-9 ]+', showKeywords) or showKeywords.startswith('defaultdirectory'): showKeywords = input("Invalid keywords, please input alphanumeric characters only\n" + "Input mandatory keywords for the show title seperated by a space.\n" "Example: X files\n").lower().strip() while showKeywords.lower() in data: showKeywords = input("Show already in database, enter new show keywords:\n") showPath = input("\nInput path for the folder for {}:\n".format(showKeywords) + "Example: C:\\videos\\x files\n").strip() if not os.path.exists(showPath): os.makedirs(showPath) print("\n*** Directory did not exist. Created directory: '{}'".format(showPath)) print("*** Move '{}' shows to directory: '{}'".format(showKeywords, showPath)) data[showKeywords] = showPath save_json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_directory_entries(self, key, config):\n # Initialize key variables\n updated = False\n dir_dict = {\n 'log_directory': 'log',\n 'ingest_cache_directory': 'cache',\n }\n directory = general.root_directory()\n\n # Setup the key value to a known good default\n if key in config['main']:\n # Verify whether key value is empty\n if config['main'][key] is not None:\n # Create\n if os.path.isdir(config['main'][key]) is False:\n config['main'][key] = ('%s/%s') % (\n directory, dir_dict[key])\n updated = True\n else:\n config['main'][key] = ('%s/%s') % (directory, dir_dict[key])\n updated = True\n else:\n config['main'][key] = ('%s/%s') % (directory, dir_dict[key])\n updated = True\n\n # Return\n return (updated, config)", "def _parse_directories(d):\n for k, v in d.items():\n if isinstance(v, abc.Mapping):\n _parse_directories(v)\n else:\n d[k] = os.path.expandvars(v)\n return d", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "def set_keywords(self):\n\n if len(self.get_keywords()) == 0 and len(self.get_files()) > 0:\n self.keywords = self.files[0].get_parent()[\"title\"].split(\" \")\n for keyword in self.keywords:\n if str(keyword) in str(self.text):\n self.keywords = []", "def collect_keyword_dataframe(collect_all: bool = False,\n path: str = SAVED_ARTICLES_PATH\n ) -> pd.DataFrame:\n sentences: List[Dict[str, str]] = []\n keywords = os.listdir(path)\n for keyword in keywords: \n kw_directory = os.path.join(path, keyword)\n sentences = collect_article_text(\n kw_directory, sentences, keyword, collect_all\n )\n keyword_df = pd.DataFrame(sentences)\n return keyword_df", "def create(self):\n\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n process_name = sample_info[\"process_name_specific\"]\n for charge_selection in self.charge_selections:\n key_dir = getKey(process_name, charge_selection)\n for dir_type in [ DKEY_CFGS, DKEY_HIST, DKEY_LOGS, DKEY_RLES ]:\n initDict(self.dirs, [ key_dir, dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_LOGS ]:\n self.dirs[key_dir][dir_type] = os.path.join(self.configDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n else:\n self.dirs[key_dir][dir_type] = os.path.join(self.outputDir, dir_type, self.channel,\n \"_\".join([ charge_selection ]), process_name)\n for dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_HIST, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n initDict(self.dirs, [ dir_type ])\n if dir_type in [ DKEY_CFGS, DKEY_SCRIPTS, DKEY_LOGS, DKEY_DCRD, DKEY_PLOT, DKEY_HADD_RT ]:\n self.dirs[dir_type] = os.path.join(self.configDir, dir_type, self.channel)\n else:\n self.dirs[dir_type] = os.path.join(self.outputDir, dir_type, self.channel)\n ##print \"self.dirs = \", self.dirs\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n inputFileLists = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n logging.info(\"Checking input files for sample %s\" % sample_info[\"process_name_specific\"])\n inputFileLists[sample_name] = generateInputFileList(sample_name, sample_info, self.max_files_per_job, self.debug)\n \n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info[\"use_it\"] or sample_info[\"sample_category\"] in [ \"additional_signal_overlap\", \"background_data_estimate\" ]:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable_analyze, process_name)) \n\n is_mc = (sample_info[\"type\"] == \"mc\")\n lumi_scale = 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"]\n apply_genWeight = sample_info[\"apply_genWeight\"] if (is_mc and \"apply_genWeight\" in sample_info.keys()) else False\n sample_category = sample_info[\"sample_category\"]\n triggers = sample_info[\"triggers\"]\n apply_trigger_bits = (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc\n\n for charge_selection in self.charge_selections:\n for central_or_shift in self.central_or_shifts:\n\n inputFileList = inputFileLists[sample_name]\n for jobId in inputFileList.keys():\n if central_or_shift != \"central\" and not is_mc:\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttH\") and sample_category != \"signal\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttW\") and sample_category != \"TTW\":\n continue\n if central_or_shift.startswith(\"CMS_ttHl_thu_shape_ttZ\") and sample_category != \"TTZ\":\n continue\n\n # build config files for executing analysis code\n key_dir = getKey(process_name, charge_selection)\n key_analyze_job = getKey(process_name, charge_selection, central_or_shift, jobId)\n\n ntupleFiles = inputFileList[jobId]\n if len(ntupleFiles) == 0:\n print \"Warning: ntupleFiles['%s'] = %s --> skipping job !!\" % (key_job, ntupleFiles)\n continue\n self.jobOptions_analyze[key_analyze_job] = {\n 'ntupleFiles' : ntupleFiles,\n 'cfgFile_modified' : os.path.join(self.dirs[key_dir][DKEY_CFGS], \"analyze_%s_%s_%s_%s_%i_cfg.py\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'histogramFile' : os.path.join(self.dirs[key_dir][DKEY_HIST], \"%s_%s_%s_%i.root\" % \\\n (process_name, charge_selection, central_or_shift, jobId)),\n 'logFile' : os.path.join(self.dirs[key_dir][DKEY_LOGS], \"analyze_%s_%s_%s_%s_%i.log\" % \\\n (self.channel, process_name, charge_selection, central_or_shift, jobId)),\n 'sample_category' : sample_category,\n 'triggers' : sample_info[\"triggers\"],\n 'charge_selection' : charge_selection,\n 'jet_minPt' : self.jet_minPt,\n 'jet_maxPt' : self.jet_maxPt,\n 'jet_minAbsEta' : self.jet_minAbsEta,\n 'jet_maxAbsEta' : self.jet_maxAbsEta,\n 'hadTau_selection_denominator' : self.hadTau_selection_denominator,\n 'hadTau_selections_numerator' : self.hadTau_selections_numerator,\n 'absEtaBins' : self.absEtaBins,\n ##'use_HIP_mitigation_mediumMuonId' : sample_info[\"use_HIP_mitigation_mediumMuonId\"],\n 'use_HIP_mitigation_mediumMuonId' : True,\n 'is_mc' : is_mc,\n 'central_or_shift' : central_or_shift,\n 'lumi_scale' : 1. if not (self.use_lumi and is_mc) else sample_info[\"xsection\"] * self.lumi / sample_info[\"nof_events\"],\n 'apply_genWeight' : sample_info[\"genWeight\"] if (is_mc and \"genWeight\" in sample_info.keys()) else False,\n 'apply_trigger_bits' : (is_mc and (self.era == \"2015\" or (self.era == \"2016\" and sample_info[\"reHLT\"]))) or not is_mc,\n }\n self.createCfg_analyze(self.jobOptions_analyze[key_analyze_job])\n\n # initialize input and output file names for hadd_stage1\n key_hadd_stage1 = getKey(process_name, charge_selection)\n if not key_hadd_stage1 in self.inputFiles_hadd_stage1:\n self.inputFiles_hadd_stage1[key_hadd_stage1] = []\n self.inputFiles_hadd_stage1[key_hadd_stage1].append(self.jobOptions_analyze[key_analyze_job]['histogramFile'])\n self.outputFile_hadd_stage1[key_hadd_stage1] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage1_%s_%s_%s.root\" % \\\n (self.channel, process_name, charge_selection))\n\n # initialize input and output file names for hadd_stage2\n key_hadd_stage1 = getKey(process_name, charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n if not key_hadd_stage2 in self.inputFiles_hadd_stage2:\n self.inputFiles_hadd_stage2[key_hadd_stage2] = []\n self.inputFiles_hadd_stage2[key_hadd_stage2].append(self.outputFile_hadd_stage1[key_hadd_stage1])\n self.outputFile_hadd_stage2[key_hadd_stage2] = os.path.join(self.dirs[DKEY_HIST], \"histograms_harvested_stage2_%s_%s.root\" % \\\n (self.channel, charge_selection))\n\n logging.info(\"Creating configuration files for executing 'comp_jetToTauFakeRate'\")\n for charge_selection in self.charge_selections:\n key_comp_jetToTauFakeRate_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job] = {\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"comp_jetToTauFakeRate_%s_cfg.py\" % charge_selection),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_HIST], \"comp_jetToTauFakeRate_%s.root\" % charge_selection),\n 'logFile' : os.path.join(\n self.dirs[DKEY_LOGS], \"comp_jetToTauFakeRate_%s.log\" % charge_selection),\n 'looseRegion' : \"jetToTauFakeRate_%s/denominator/\" % charge_selection,\n 'tightRegion' : \"jetToTauFakeRate_%s/numerator/\" % charge_selection,\n 'absEtaBins' : self.absEtaBins,\n 'ptBins' : self.ptBins\n }\n self.createCfg_comp_jetToTauFakeRate(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job])\n self.targets.append(self.jobOptions_comp_jetToTauFakeRate[key_comp_jetToTauFakeRate_job]['outputFile'])\n\n logging.info(\"Creating configuration files to run 'makePlots'\")\n for charge_selection in self.charge_selections:\n key_makePlots_job = getKey(charge_selection)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_cfg.py\" % self.channel),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s.png\" % self.channel),\n 'histogramDir' : \"jetToTauFakeRate_%s\" % charge_selection,\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n self.cfgFile_make_plots = self.cfgFile_make_plots_denominator\n for absEtaBin in [ \"absEtaLt1_5\", \"absEta1_5to9_9\" ]:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"denominator\")\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_denominator_%s_cfg.py\" % (self.channel, charge_selection, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_denominator_%s.png\" % (self.channel, charge_selection, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/denominator/%s\" % (charge_selection, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n for hadTau_selection_numerator in self.hadTau_selections_numerator:\n key_makePlots_job = getKey(charge_selection, absEtaBin, \"numerator\", hadTau_selection_numerator)\n key_hadd_stage2 = getKey(charge_selection)\n self.jobOptions_make_plots[key_makePlots_job] = {\n 'executable' : self.executable_make_plots,\n 'inputFile' : self.outputFile_hadd_stage2[key_hadd_stage2],\n 'cfgFile_modified' : os.path.join(\n self.dirs[DKEY_CFGS], \"makePlots_%s_%s_numerator_%s_%s_cfg.py\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'outputFile' : os.path.join(\n self.dirs[DKEY_PLOT], \"makePlots_%s_%s_numerator_%s_%s.png\" % (self.channel, charge_selection, hadTau_selection_numerator, absEtaBin)),\n 'histogramDir' : \"jetToTauFakeRate_%s/numerator/%s/%s\" % (charge_selection, hadTau_selection_numerator, absEtaBin),\n 'label' : None,\n 'make_plots_backgrounds' : [ \"TT\", \"TTW\", \"TTZ\", \"EWK\", \"Rares\" ],\n }\n self.createCfg_makePlots(self.jobOptions_make_plots[key_makePlots_job])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_analyze)\n self.sbatchFile_analyze = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_analyze_%s.py\" % self.channel)\n self.createScript_sbatch_analyze(self.executable_analyze, self.sbatchFile_analyze, self.jobOptions_analyze)\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable_comp_jetToTauFakeRate)\n self.sbatchFile_comp_jetToTauFakeRate = os.path.join(self.dirs[DKEY_SCRIPTS], \"sbatch_comp_jetToTauFakeRate.py\")\n self.createScript_sbatch(self.executable_comp_jetToTauFakeRate, self.sbatchFile_comp_jetToTauFakeRate, self.jobOptions_comp_jetToTauFakeRate)\n\n lines_makefile = []\n self.addToMakefile_analyze(lines_makefile)\n self.addToMakefile_hadd_stage1(lines_makefile)\n self.addToMakefile_hadd_stage2(lines_makefile)\n self.addToMakefile_comp_jetToTauFakeRate(lines_makefile)\n self.addToMakefile_make_plots(lines_makefile)\n self.createMakefile(lines_makefile)\n \n logging.info(\"Done\")", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "def _listfiles(dirname):\n for root, dirs, files in os.walk(dirname):\n fkey = os.path.basename(root)\n f = []\n for name in files:\n key = os.path.splitext(name)[0]\n listfilters[key] = os.path.join(root, name)\n f.append(key)\n if f:\n showfilters[fkey] = f", "def CollectDatasets(redirector_str):\n \n \n # uploadDir = 'srv/' for lpcjobqueue shell or TTbarAllHadUproot/ for coffea casa and WinterFell\n \n if 'cmsxrootd' in redirector_str:\n uploadDir = 'srv'\n else:\n uploadDir = 'TTbarAllHadUproot'\n \n uploadDir = ''\n \n filedir = 'nanoAODv9Files/'\n Years = ['UL16', 'UL17', 'UL18']\n VFP = ['preVFP', 'postVFP'] # preVFP unavailable in Winterfell for the moment\n # VFP = ['postVFP'] # Only for simple test in WinterFell\n filesets = {} # To be filled and returned by this function\n \n # ---- Before concatenation with +=, lists should be declard ---- # \n \n for y in Years:\n if '16' in y:\n for v in VFP:\n filesets[y+v+'_QCD'] = []\n filesets[y+v+'_TTbar_700_1000'] = []\n filesets[y+v+'_TTbar_1000_Inf'] = []\n # ---- JetHT and SingleMu ---- #\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'preVFP_JetHT'+l+'_Data'] = []\n filesets[y+'preVFP_SingleMu'+l+'_Data'] = []\n for l in ['', 'F', 'G', 'H']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n \n elif '17' in y:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'B', 'C', 'D', 'E', 'F']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n else:\n filesets[y+'postVFP_QCD'] = []\n filesets[y+'postVFP_TTbar'] = []\n for l in ['', 'A', 'B', 'C', 'D']:\n filesets[y+'postVFP_JetHT'+l+'_Data'] = []\n filesets[y+'postVFP_SingleMu'+l+'_Data'] = []\n \n # ---- Loop through years and VFP status, filling the filesets dictionary with the MC file locations from corresponding txt files ---- #\n \n for y in Years:\n if '16' in y:\n for v in VFP:\n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n \n # ---- TTbar ---- #\n ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar700to1000filename) as f:\n ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulttbar1000toInffilename) as f:\n ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n if 'pre' in v:\n if 'Run2016' in filename: #preVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016 \n elif 'post' in v:\n if 'Run2016' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2016 = [redirector_str + s.strip() for s in f.readlines() if ('HIPM' not in s and not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2016\n \n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n# # ---- RS KK Gluon ---- #\n# ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n# ulRSGluonfiles=[]\n# l=0\n# for i in range(1000, 5500, 500):\n# with open(ulRSGluonfilename) as f:\n# ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n# filesets[y+v+'_RSGluon'+str(i)] += ulRSGluonfiles[l]\n# l += 1\n \n else: # UL17 and UL18\n v = VFP[1] # No preVFP after 2016 Run vertex problem was fixed\n \n # ---- QCD ---- #\n ulqcdfilename = filedir + 'QCD/QCD_NanoAODv9_' + y + '_' + v + '.txt'\n with open(ulqcdfilename) as f:\n ulqcdfiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n filesets[y+v+'_QCD'] += ulqcdfiles\n\n# # ---- TTbar ---- #\n# ulttbar700to1000filename = filedir + 'TT/TT_Mtt-700to1000_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar700to1000filename) as f:\n# ulttbar700to1000files = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# ulttbar1000toInffilename = filedir + 'TT/TT_Mtt-1000toInf_NanoAODv9_' + y + '_' + v + '.txt'\n# with open(ulttbar1000toInffilename) as f:\n# ulttbar1000toInffiles = [redirector_str + s.strip() for s in f.readlines() if not s.startswith('#')]\n# filesets[y+v+'_TTbar_700_1000'] += ulttbar700to1000files\n# filesets[y+v+'_TTbar_1000_Inf'] += ulttbar1000toInffiles\n \n # ---- JetHT ---- #\n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist: \n if 'Run2017' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2017\n elif 'Run2018' in filename: #postVFP\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2018 = [redirector_str + s.strip() for s in f.readlines() if (not s.startswith('#'))] \n filesets[y+v+'_JetHT_Data'] += jetdatafiles2018\n\n # ---- Z' Dark Matter Mediator ---- #\n ulZprimeDMfilename = filedir + 'ZprimeDMToTTbar/ZprimeDMToTTbar_NanoAODv9_' + y + '_' + v + '.txt'\n ulDMfiles=[]\n k=0\n for i in range(1000, 5500, 500):\n with open(ulZprimeDMfilename) as f:\n ulDMfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"ResoIncl_MZp\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_DM'+str(i)] = ulDMfiles[k]\n k += 1\n \n # ---- RS KK Gluon ---- #\n ulRSGluonfilename = filedir + 'RSGluonToTT/RSGluonToTT_NanoAODv9_' + y + '_' + v + '.txt'\n ulRSGluonfiles=[]\n l=0\n for i in range(1000, 5500, 500):\n with open(ulRSGluonfilename) as f:\n ulRSGluonfiles.append([redirector_str + s.strip() for s in f.readlines() if (\"RSGluonToTT_M-\"+str(i) in s and not s.startswith('#'))])\n filesets[y+v+'_RSGluon'+str(i)] = ulRSGluonfiles[l]\n l += 1\n \n \n # ---- JetHT Eras---- #\n \n datafilelist = os.listdir(filedir + 'JetHT/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_JetHTE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'JetHT/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'JetHT/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_JetHTF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_JetHTF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'JetHT/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'JetHT/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_JetHTH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'JetHT/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'JetHT/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_JetHTF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'JetHT/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'JetHT/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'JetHT/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'JetHT/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_JetHTD_Data'] += jetdatafiles2018d\n \n\n \n # ---- Single Muon ---- #\n datafilelist = os.listdir(filedir + 'SingleMu/')\n for filename in datafilelist:\n \n if 'Run2016B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2016b = [redirector_str + s.strip() for s in b.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuB_Data'] += jetdatafiles2016b\n elif 'Run2016C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2016c = [redirector_str + s.strip() for s in c.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuC_Data'] += jetdatafiles2016c\n elif 'Run2016D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2016d = [redirector_str + s.strip() for s in d.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuD_Data'] += jetdatafiles2016d\n elif 'Run2016E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2016e = [redirector_str + s.strip() for s in e.readlines() if not s.startswith('#')] \n filesets['UL16preVFP_SingleMuE_Data'] += jetdatafiles2016e\n elif 'Run2016F' in filename:\n with open(filedir + 'SingleMu/' + filename) as fold:\n jetdatafiles2016fold = [redirector_str + s.strip() for s in fold.readlines() if ('HIPM' in s and not s.startswith('#'))]\n with open(filedir + 'SingleMu/' + filename) as fnew:\n jetdatafiles2016fnew = [redirector_str + s.strip() for s in fnew.readlines() if ('HIPM' not in s and not s.startswith('#'))]\n filesets['UL16preVFP_SingleMuF_Data'] += jetdatafiles2016fold\n filesets['UL16postVFP_SingleMuF_Data'] += jetdatafiles2016fnew\n elif 'Run2016G' in filename:\n with open(filedir + 'SingleMu/' + filename) as g:\n jetdatafiles2016g = [redirector_str + s.strip() for s in g.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuG_Data'] += jetdatafiles2016g\n elif 'Run2016H' in filename:\n with open(filedir + 'SingleMu/' + filename) as h:\n jetdatafiles2016h = [redirector_str + s.strip() for s in h.readlines() if not s.startswith('#')] \n filesets['UL16postVFP_SingleMuH_Data'] += jetdatafiles2016h\n \n if 'Run2017B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2017b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuB_Data'] += jetdatafiles2017b\n elif 'Run2017C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2017c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuC_Data'] += jetdatafiles2017c\n elif 'Run2017D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2017d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuD_Data'] += jetdatafiles2017d\n elif 'Run2017E' in filename:\n with open(filedir + 'SingleMu/' + filename) as e:\n jetdatafiles2017e = [redirector_str + s.strip() for s in e.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuE_Data'] += jetdatafiles2017e\n elif 'Run2017F' in filename:\n with open(filedir + 'SingleMu/' + filename) as f:\n jetdatafiles2017f = [redirector_str + s.strip() for s in f.readlines()[::3] if not s.startswith('#')] \n filesets['UL17postVFP_SingleMuF_Data'] += jetdatafiles2017f\n \n if 'Run2018A' in filename:\n with open(filedir + 'SingleMu/' + filename) as a:\n jetdatafiles2018a = [redirector_str + s.strip() for s in a.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuA_Data'] += jetdatafiles2018a\n elif 'Run2018B' in filename:\n with open(filedir + 'SingleMu/' + filename) as b:\n jetdatafiles2018b = [redirector_str + s.strip() for s in b.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuB_Data'] += jetdatafiles2018b\n elif 'Run2018C' in filename:\n with open(filedir + 'SingleMu/' + filename) as c:\n jetdatafiles2018c = [redirector_str + s.strip() for s in c.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuC_Data'] += jetdatafiles2018c\n elif 'Run2018D' in filename:\n with open(filedir + 'SingleMu/' + filename) as d:\n jetdatafiles2018d = [redirector_str + s.strip() for s in d.readlines()[::3] if not s.startswith('#')] \n filesets['UL18postVFP_SingleMuD_Data'] += jetdatafiles2018d\n \n \n # print(filesets['UL16postVFP_JetHT_Data'])\n # print('==========================================================================================================')\n # print(filesets['UL16postVFP_TTbar'])\n \n return filesets", "def set_keys(self, keys):\r\n \r\n self._keydict=[]\r\n self._keys=[]\r\n\r\n # Connect userfriendly keywords to the characters that indicates the energy in Dirac.\r\n for key in keys:\r\n tmpdat=[]\r\n infile=open(self._datafile, 'r')\r\n if not infile:\r\n if self._resultfile:\r\n self._resultfile.write(\"Error (parsedirac, set_keys): Datafile on how to interpret the dirac files does not exists. \\n\"\r\n \" File=\"+self._datafile)\r\n else:\r\n print \"Error (parsedirac, set_keys): Datafile on how to interpret the dirac files does not exists. \"\r\n print \" File=\"+self._datafile\r\n sys.exit()\r\n for tmpc in infile:\r\n # Read the line starting with the current keyword into the list tmpdat.\r\n ilast=0\r\n quoted=False\r\n for i in range(0, len(tmpc)):\r\n if tmpc[i:i+1]=='#' and not(quoted): break\r\n elif (tmpc[i:i+1]==' ' and not(quoted)) or i==len(tmpc)-1:\r\n if ilast==0 and key==tmpc[ilast:i]:\r\n tmpdat=[tmpc[ilast:i]]\r\n ilast=i\r\n elif ilast>0:\r\n if tmpc[ilast:ilast+1]=='\"': tmpdat.append(tmpc[ilast+1:i-1])\r\n else: tmpdat.append(tmpc[ilast+1:i])\r\n else: break\r\n ilast=i\r\n elif tmpc[i:i+1]=='\"':\r\n quoted=not(quoted)\r\n if quoted: ilast=i\r\n if ilast: break\r\n else:\r\n if self._resultfile: self._resultfile.write(\"WARNING: Key not found in parsedirac.dat. Key= \"+key)\r\n else: print \"WARNING: Key not found in parsedirac.dat. Key=\"+key\r\n sys.exit()\r\n if 0<len(tmpdat):\r\n # Make a dictonary from tmpdat.\r\n tmpkey=[]\r\n j=0\r\n for i in range(0, len(diracparser._keyname)):\r\n if i<len(tmpdat):\r\n if i==len(diracparser._keyname)-2:\r\n tmpkey.append(diracparser._keyname[i])\r\n for j in range(i, len(tmpdat)-1):\r\n tmpkey[i][1].append(tmpdat[j])\r\n j=len(tmpdat)-1\r\n else: \r\n tmpkey.append((diracparser._keyname[i][0], tmpdat[j]))\r\n j=j+1\r\n else:\r\n if i==len(diracparser._keyname)-1:\r\n try:\r\n tmpn=float(tmpdat[-1])\r\n except:\r\n tmpn=0\r\n if (tmpn): tmpkey.append((diracparser._keyname[i][0], tmpn))\r\n else: tmpkey.append(diracparser._keyname[i])\r\n else: tmpkey.append(diracparser._keyname[i])\r\n\r\n\r\n self._keydict.append(dict(tmpkey))\r\n self._keys.append(key)\r\n self._energy=self.readenergy(self._filelist)", "def __init__(self, data_dirs, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.data_dirs = strax.to_str_tuple(data_dirs)\n\n for d in self.data_dirs:\n try:\n os.makedirs(d)\n except FileExistsError:\n pass\n else:\n self.log.debug(f\"Created data dir {d}\")", "def create_station_dics(data_directories):\n \n files_all = {} \n for k,v in data_directories.items() :\n files = os.listdir(v)\n \n for f in files:\n station = f.split('_')[0] \n if station not in files_all.keys():\n files_all[station] = {}\n \n if k == 'ncar': # separating ncar temperature and wind files \n if 'trhc' in f:\n k = 'ncar_t'\n elif 'windc' in f:\n k = 'ncar_w'\n files_all[station][k] = ''\n files_all[station][k] = v + '/' + f # compelte path to the netCDF file \n\n #print('check') \n \n \n return files_all", "def _detector_dir(self):\n detector_dir = {}\n for name in self._detectors:\n# if name in self._evt_dets:\n# detector_dir[name.upper()] = name\n detector_dir[name] = name\n return detector_dir", "def set_directories(self, directories):\n\t\tself.directoryModel.clear()\n\t\tfor directoryTagName, directory in directories:\n\t\t\tdirectoryTagName = directoryTagName[:DIRECTORY_TAG_NAME_MAX]\n\t\t\tself.directoryModel.append_row(directoryTagName=directoryTagName, directory=directory)", "def set_folders(self):\n self._tst_dir = self._val_dir = self._trn_dir = self._ds_root\n self._lbl_dir = f\"{self._ds_root}/flow\"\n self._pred_lbl_dir = f\"{self._ds_root}/flow_pred\"", "def search(self,path,key_words):\t#key_words must be tuple\n\t\ttry:\n\t\t\tall=os.walk(path,False)\t#os.walk() is a generator , the return is a tuple which is (dirpath,dirnames,filenames)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tfor item in all:\n\t\t\t\tfilepath=item[0]\n\t\t\t\tfor filename in item[2]:\n\t\t\t\t\tfor key_word in key_words:\t#find all key_word\n\t\t\t\t\t\tif key_word in filename.lower():\t#ignore case of word , and only search filename\n\t\t\t\t\t\t\tself.result.append(os.path.join(filepath,filename))", "def generate_clips_kwds(base_path='../datasets/TIMIT/TRAIN/'):\n if not os.path.exists(kw_path):\n os.mkdir(kw_path)\n else:\n shutil.rmtree(kw_path)\n os.mkdir(kw_path)\n\n non_kw_clips = []\n kw_clips = {k: [] for k in keywords}\n\n for dialect in listdir(base_path):\n print(\"Dialect:\", dialect)\n\n for speaker in listdir(base_path + dialect):\n bp = base_path + dialect + '/' + speaker + '/'\n\n for file in listdir(bp):\n if file.split('.')[-1] == 'WRD':\n with open(bp + file, 'r') as f:\n dat = f.readlines()\n\n # read the word level annotations and crop out the keywords\n all_dat = [x.strip().split(' ') for x in dat]\n words = [x[-1] for x in all_dat]\n\n # check if words in utterance and keyword have ONE and ONLY ONE keyword\n inter = set(keywords).intersection(set(words))\n if len(inter) == 1:\n cur_keyword = inter.pop()\n word_line = all_dat[words.index(cur_keyword)]\n kw_clips[cur_keyword].append(\n (bp + file, (int(word_line[0]) // downsample_rate, int(word_line[1]) // downsample_rate))\n )\n else:\n non_kw_clips.append(bp + file)\n\n chosen_kw_clips = {k: [] for k in keywords}\n kw_templates = {k: [] for k in keywords}\n\n for word, l in kw_clips.items():\n # choose desired number fo templates and keyword utterances\n idxs = np.random.choice(len(l), utter_per_kw + templates_per_kw)\n for idx in idxs[:utter_per_kw]:\n chosen_kw_clips[word].append(l[idx][0])\n for idx in idxs[utter_per_kw:]:\n kw_templates[word].append(l[idx])\n\n # Save keyword wav files\n for word, l in kw_templates.items():\n for idx in range(len(l)):\n save_name = kw_path + word + '_' + str(idx) + '.wav'\n start_t, stop_t = l[idx][1]\n wav_path = l[idx][0][:-3] + 'wav'\n (rate, sig) = wav.read(wav_path)\n only_kw = sig[start_t:stop_t]\n wav.write(save_name, rate, only_kw)\n\n np.random.shuffle(non_kw_clips)\n print(\n \"non-kw clips: {0} ; kw-clips: {1}\".format(len(non_kw_clips), sum([len(x) for x in chosen_kw_clips.values()])))\n return non_kw_clips[:total_nonkw_utter], chosen_kw_clips", "def process_keyword(self, keyword, index, file_path):\n if keyword not in self.word_dict:\n self.word_dict[keyword] = []\n\n word_dict_entry = self.word_dict[keyword]\n\n if not self.keyword_file_presence_exists(keyword, file_path):\n word_dict_entry.append(KeywordFilePresence(file_path))\n\n word_dict_entry[-1].add_index(index)", "def populate(self, dr=None):\n if dr is not None: self.directory = dr\n \n for k in OM.glob_strings:\n string =self.directory+\"/\"+OM.glob_strings[k]\n print(\"OM::populate -- Checking \",k,\" (\",string,\")\", end=\"\")\n fnames = glob.glob(string)\n print(\"... found\",len(fnames), \"files\")\n setattr(self, k, fnames)\n #print(k, lst)", "def addDrizKeywords(self, hdr, versions):\n\n # Extract some global information for the keywords\n _geom = 'User parameters'\n\n _imgnum = 0\n for pl in self.parlist:\n\n # Start by building up the keyword prefix based\n # on the image number for the chip\n # _keyprefix = 'D%03d'%_imgnum\n _imgnum += 1\n\n drizdict = DRIZ_KEYWORDS.copy()\n # Update drizdict with current values\n # Any limit on the length of the strings was removed as an update to\n # new versions of the FITS standard and to accommodate MVM processing.\n drizdict['VER']['value'] = pl['driz_version']\n drizdict['DATA']['value'] = pl['data']\n drizdict['DEXP']['value'] = pl['exptime']\n drizdict['OUDA']['value'] = pl['outFinal']\n drizdict['OUWE']['value'] = pl['outWeight']\n if pl['outContext'] is None:\n outcontext = \"\"\n else:\n outcontext = pl['outContext']\n drizdict['OUCO']['value'] = outcontext\n if self.single:\n drizdict['MASK']['value'] = pl['singleDrizMask']\n else:\n drizdict['MASK']['value'] = pl['finalMask']\n\n # Process the values of WT_SCL to be consistent with\n # what IRAF Drizzle would output\n if 'wt_scl_val' in pl:\n _wtscl = pl['wt_scl_val']\n else:\n if pl['wt_scl'] == 'exptime': _wtscl = pl['exptime']\n elif pl['wt_scl'] == 'expsq': _wtscl = pl['exptime'] * pl['exptime']\n else: _wtscl = pl['wt_scl']\n\n drizdict['WTSC']['value'] = _wtscl\n drizdict['KERN']['value'] = pl['kernel']\n drizdict['PIXF']['value'] = pl['pixfrac']\n drizdict['OUUN']['value'] = self.units\n if pl['fillval'] is None:\n _fillval = 'INDEF'\n else:\n _fillval = pl['fillval']\n drizdict['FVAL']['value'] = _fillval\n drizdict['WKEY']['value'] = pl['driz_wcskey']\n\n drizdict['SCAL'] = {'value': pl['scale'], 'comment': 'Drizzle, pixel size (arcsec) of output image'}\n drizdict['ISCL'] = {'value': pl['idcscale'], 'comment': 'Drizzle, default IDCTAB pixel size(arcsec)'}\n\n # Now update header with values\n writeDrizKeywords(hdr, _imgnum, drizdict)\n del drizdict\n\n # Add version information as HISTORY cards to the header\n if versions is not None:\n ver_str = \"AstroDrizzle processing performed using: \"\n hdr.add_history(ver_str)\n for k in versions.keys():\n ver_str = ' ' + str(k) + ' Version ' + str(versions[k])\n hdr.add_history(ver_str)", "def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname", "def TvScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for key, location in tvshows.iteritems():\r\n for filename in fnmatch.filter(filenames, key):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(location, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n # Send the file through another filter in case of lower case filenames \r\n for filename in fnmatch.filter(filenames, key.lower()):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(location, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For TV Shows'", "def process_folder(root, path=\"\"):\n myDict = {}\n if path:\n if root.cd(path):\n for key in ROOT.gDirectory.GetListOfKeys():\n filterKey(root, key, path, myDict, \"__List\")\n else:\n for key in ROOT.gDirectory.GetListOfKeys():\n mypath = ROOT.gDirectory.GetPathStatic()\n filterKey(root, key, mypath, myDict, \"\")\n ROOT.gDirectory.cd(mypath)\n return myDict", "def records_to_filepaths(\n records: Union[kapture.RecordsCamera, kapture.RecordsWifi, kapture.RecordsLidar, kapture.RecordsGnss],\n kapture_dirpath: str\n) -> Dict[str, str]:\n return {filename: path_secure(path.join(kapture_dirpath, RECORD_DATA_DIRNAME, filename))\n for _, _, filename in kapture.flatten(records)}", "def buildDataMap(dataDir,trDir=None,trExt=None):\n \n dataMap = {}\n \n if not trDir:\n trDir = 'TrainingObjects/'\n if not trExt:\n trExt = 'TrainingObject.h5'\n\n dataMap['object'] = {'{}{}'.format(dataDir,trDir): trExt}\n\n dataMap['midline'] = {'{}Midlines/'.format(dataDir) : \n 'Midline_Indices.mat'}\n dataMap['matching'] = {'{}MatchingLibraries/Test/MatchingMatrices/'.format(dataDir) : \n 'MatchingMatrix.0.05.Frequencies.mat'}\n \n return dataMap", "def get_keyword_usage_in_dir(self, path):\n\t\tkeyword_usage = {}\n\t\tkeyword_usage['@fc@'] = 0\n\n\t\tdef count_keywords(file_path):\n\t\t\tkeyword_usage['@fc@'] += 1\n\t\t\ttry:\n\t\t\t\tfor kw in self.kw.GetKeywordsFromPath(file_path):\n\t\t\t\t\tif kw in keyword_usage.keys():\n\t\t\t\t\t\tkeyword_usage[kw] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tkeyword_usage[kw] = 1\n\t\t\texcept (OSError, IOError), e: pass\n\n\t\tif self.recursive.get_active(): max = 99\n\t\telse: max = 1\n\n\t\tmap( lambda x: count_keywords(x),\n\t\t\tfsapi.minifind(path, maxdepth=max, itype=stat.S_IFREG))\n\n\t\tassert logging.debug('Dir %s, %d files, kwu: %s.' % (styles.stylize(styles.ST_PATH, path), keyword_usage['@fc@'], keyword_usage))\n\n\t\t# look if each keyword is on all file or on only some files\n\t\tkuk = keyword_usage.keys()\n\t\tfor kw in self.kw.keywords.keys():\n\t\t\tif kw in kuk:\n\t\t\t\tif keyword_usage[kw] == keyword_usage['@fc@']:\n\t\t\t\t\tkeyword_usage[kw] = self.checked\n\t\t\t\telse:\n\t\t\t\t\tkeyword_usage[kw] = self.inconsistent\n\t\t\telse:\n\t\t\t\tkeyword_usage[kw] = self.unchecked\n\n\t\tassert logging.debug('Dir %s, %d files, final kwu: %s.' % (styles.stylize(styles.ST_PATH, path), keyword_usage['@fc@'], keyword_usage))\n\n\t\treturn keyword_usage", "def user_directories():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('User Directories', level=1)\r\n userdirectories = get_qlik_sense.get_userdirectory()\r\n num_of_udc = len(userdirectories)\r\n table = document.add_table(rows=num_of_udc+1, cols=6)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'userDirectoryName'\r\n row.cells[2].text = 'configured'\r\n row.cells[3].text = 'operational'\r\n row.cells[4].text = 'type'\r\n row.cells[5].text = 'syncOnlyLoggedInUsers'\r\n for directory in range(num_of_udc):\r\n row = table.rows[directory+1]\r\n row.cells[0].text = str(userdirectories[directory][0])\r\n row.cells[1].text = str(userdirectories[directory][1])\r\n row.cells[2].text = str(userdirectories[directory][2])\r\n row.cells[3].text = str(userdirectories[directory][3])\r\n row.cells[4].text = str(userdirectories[directory][4])\r\n row.cells[5].text = str(userdirectories[directory][5])\r\n\r\n # document.add_page_break()\r", "def find_data_dirs(cls, directory,\n basedata_dir=c.WORDS_DIR,\n markables_dir=c.MARKABLES_DIR,\n dirs_to_ignore=c.DIRS_TO_IGNORE):\n dirs_to_ignore = set(dirs_to_ignore)\n for subdir, subsubdirs, _ in os.walk(directory):\n if os.path.basename(subdir) in dirs_to_ignore:\n continue\n has_words = basedata_dir in subsubdirs\n has_markables = markables_dir in subsubdirs\n if has_words and has_markables:\n logger.debug(f\"subdir: {subdir}\")\n yield subdir\n elif has_markables:\n logger.warn(\n f\"{subdir} has a markables directory ({markables_dir}),\"\n f\" but no words directory ({basedata_dir}).\"\n )\n elif has_words:\n logger.warn(\n f\"{subdir} has a words directory ({basedata_dir}), but no\"\n f\" markables directory ({markables_dir}).\"\n )", "def files_debug(dirfiles):\r\n\r\n files_path = {}\r\n \r\n normal_folder = [\"K002\"]\r\n OR_folder = [\"KA01\"]\r\n IR_folder = [\"KI01\"]\r\n MIX_folder = [\"KB23\"] # VERIFICAR\r\n\r\n settings_files = [\"N15_M07_F10_\", \"N09_M07_F10_\", \"N15_M01_F10_\", \"N15_M07_F04_\"]\r\n\r\n n = 20\r\n\r\n # Normal\r\n for folder in normal_folder:\r\n for idx, setting in enumerate(settings_files):\r\n for i in range(1, n+1):\r\n key = \"Normal_\" + folder + \"_\" + str(idx) + \"_\" + str(i)\r\n files_path[key] = os.path.join(dirfiles, folder, setting + folder +\r\n \"_\" + str(i) + \".mat\")\r\n\r\n # OR\r\n for folder in OR_folder:\r\n for idx, setting in enumerate(settings_files):\r\n for i in range(1, n+1):\r\n key = \"OR_\" + folder + \"_\" + str(idx) + \"_\" + str(i)\r\n files_path[key] = os.path.join(dirfiles, folder, setting + folder +\r\n \"_\" + str(i) + \".mat\")\r\n\r\n # IR\r\n for folder in IR_folder:\r\n for idx, setting in enumerate(settings_files):\r\n for i in range(1, n+1):\r\n key = \"IR_\" + folder + \"_\" + str(idx) + \"_\" + str(i)\r\n files_path[key] = os.path.join(dirfiles, folder, setting + folder +\r\n \"_\" + str(i) + \".mat\")\r\n\r\n return files_path", "def generate_folder_summaries(final_dictionary):\n for cat in final_dictionary:\n if len(cat[1]) == 0:\n pass\n else:\n category_name = cat[0]\n summary_path = \"./%s/summary.txt\" % category_name\n with open(summary_path, 'w') as sumry:\n for match_file in cat[1]:\n sumry.write(match_file[0] + '\\n')" ]
[ "0.55344474", "0.55103064", "0.5479557", "0.54403985", "0.54251444", "0.5390415", "0.53592753", "0.53480476", "0.53404677", "0.5294248", "0.5284923", "0.5277684", "0.5273796", "0.5273391", "0.52395517", "0.52274686", "0.52246755", "0.5173277", "0.51578385", "0.5152383", "0.51160526", "0.5104612", "0.5097997", "0.5091522", "0.5083942", "0.5080549", "0.5057069", "0.50535184", "0.50483435", "0.50363815" ]
0.74111634
0
Remove directory from data dictionary Remove show/directory from list and update data.json
def remove_directory(): count = 1 # Creates a dict to map user selection numbers to keys of the data dict deleteDict = {} print('\n') for key in sorted(data): if not key.startswith('defaultdirectory'): print("{}. {} --> {}".format(count, key, data[key])) deleteDict[count] = key count += 1 print("{}. Cancel".format(count)) selection = input("\nSelect the number of the directory you want to delete:\n").strip() while (not selection.isdigit()) or (int(selection) not in deleteDict) and \ (int(selection) != count): selection = input("Invalid selection. Select the number of the directory you want " "to delete:\n").strip() selection = int(selection) if selection != count: print("\n*** {} has been deleted".format(deleteDict[selection])) del data[deleteDict[selection]] save_json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def anonymize(data_dir):\n\n for root, dirs, files in os.walk(dst_data_dir, topdown=False):\n for dir_name in sorted(dirs):\n dir_path = os.path.join(root, dir_name)\n json_path = os.path.join(root, dir_name, 'data.json')\n print(json_path)\n\n with open(json_path) as f:\n data = json.load(f)\n\n data.pop('user_email')\n data.pop('user_name')\n data.pop('survey_data')\n\n with open(json_path, 'w') as f:\n json.dump(data, f)", "def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])", "def remove_data(data=None): #clear\n data = get_data(data)\n shutil.rmtree(data)", "def _delete_data (self, path):\n head, tail = os.path.split(path)\n for subdir, dirs, files in os.walk(head):\n for file in files:\n if tail in file:\n os.remove(os.path.join(subdir, file))", "def parse_json_data(settings, dataset):\n for directory in dataset: # for directory in list of directories\n directory[\"data\"] = []\n for record in directory[\"rawdata\"]: # each record is the raw JSON data of a file in a directory\n jsonrootpath = get_json_root_path(record)\n globaloptions = get_json_global_options(record)\n #for item in record[\"client_stats\"]:\n # if \"job options\" in item.keys():\n # print(item[\"job options\"][\"iodepth\"])\n process_json_record(settings, directory, record, jsonrootpath, globaloptions)\n #print(\"================================\")\n #print(directory[\"data\"])\n #for directory in dataset:\n # for item in directory[\"data\"]:\n # print(item[\"iodepth\"])\n directory[\"data\"] = sort_list_of_dictionaries(directory[\"data\"])\n return dataset", "def process_folder(root, path=\"\"):\n myDict = {}\n if path:\n if root.cd(path):\n for key in ROOT.gDirectory.GetListOfKeys():\n filterKey(root, key, path, myDict, \"__List\")\n else:\n for key in ROOT.gDirectory.GetListOfKeys():\n mypath = ROOT.gDirectory.GetPathStatic()\n filterKey(root, key, mypath, myDict, \"\")\n ROOT.gDirectory.cd(mypath)\n return myDict", "def remove_useless_file(rootPath):\n all_file_paddle_list = get_all_paddle_file(rootPath)\n ut_file_map_new = {}\n ut_file_map = \"%s/build/ut_file_map.json\" % rootPath\n with open(ut_file_map, 'r') as load_f:\n load_dict = json.load(load_f)\n for key in load_dict:\n if key in all_file_paddle_list:\n ut_file_map_new[key] = load_dict[key]\n\n with open(\"%s/build/ut_file_map.json\" % rootPath, \"w\") as f:\n json.dump(ut_file_map_new, f, indent=4)\n print(\"remove_useless_file ut_file_map success!!\")", "def _unstage_folder(dir_path):\n for dir_item in os.listdir(dir_path):\n full_path = os.path.join(dir_path, dir_item)\n if os.path.isfile(full_path) and dir_item != 'load.go':\n os.remove(full_path)", "def clear_data_base():\n\n\tcommand = 'rm object_models/*.json'\n\tos.system(command)\n\tprint(\"data base cleared\")", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def purge_cache():\n for (dir_path, dir_names, file_names) in os.walk(CACHE, topdown=False):\n for file_name in file_names:\n if is_json_file(file_name):\n path = os.path.join(dir_path, file_name)\n print(\"Removing file “%s”\" % path)\n os.remove(path)\n for directory in dir_names:\n path = os.path.join(dir_path, directory)\n if not os.listdir(path):\n print(\"Removing directory “%s”\" % path)\n os.rmdir(path)", "def prune_unused_data(args):\n\n unused_list = ['image_00', 'image_01', 'image_02', 'image_03', 'velodyne_points']\n date_dirs = ['2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03']\n\n for date_dir in date_dirs:\n path1 = os.path.join(args.path_data_base, date_dir)\n if not os.path.isdir(path1):\n continue\n date_dirs2 = os.listdir(path1)\n\n for date_dir2 in date_dirs2:\n path2 = os.path.join(path1, date_dir2)\n if not os.path.isdir(path2):\n continue\n print(path2)\n for folder in unused_list:\n path3 = os.path.join(path2, folder)\n if os.path.isdir(path3):\n print(path3)\n shutil.rmtree(path3)", "def cleanDataDir(self):\n for strFile in os.listdir(self.strDataDir):\n os.remove(os.path.join(self.strDataDir, strFile))", "def rm_empty_json_in_path(path):\n assert os.path.isdir(path), \"[path] not a valid directory\"\n \n # Ensure directories are given with ending '/' for recursion\n if path[-1] != '/':\n path += '/'\n \n print('Browsing \"' + path + '\"')\n \n for f in os.listdir(path):\n filepath = path + f\n if os.path.isfile(filepath) and '.jsonl' in filepath:\n try:\n if os.path.getsize(filepath) == 0:\n print('Removing ' + filepath)\n os.remove(filepath)\n \n # Shouldn't happen, but just to make sure.\n except OSError as e:\n print(e)\n pass\n \n elif os.path.isdir(filepath):\n # Browse one dir deeper\n rm_empty_json_in_path(path + f + '/')", "def add_directory():\r\n showKeywords = input(\"\\nInput mandatory keywords for the show title seperated by a space.\\n\"\r\n \"Example: X files\\n\").lower().strip()\r\n while re.search('[^A-Za-z0-9 ]+', showKeywords) or showKeywords.startswith('defaultdirectory'):\r\n showKeywords = input(\"Invalid keywords, please input alphanumeric characters only\\n\" +\r\n \"Input mandatory keywords for the show title seperated by a space.\\n\"\r\n \"Example: X files\\n\").lower().strip()\r\n while showKeywords.lower() in data:\r\n showKeywords = input(\"Show already in database, enter new show keywords:\\n\")\r\n showPath = input(\"\\nInput path for the folder for {}:\\n\".format(showKeywords) +\r\n \"Example: C:\\\\videos\\\\x files\\n\").strip()\r\n if not os.path.exists(showPath):\r\n os.makedirs(showPath)\r\n print(\"\\n*** Directory did not exist. Created directory: '{}'\".format(showPath))\r\n print(\"*** Move '{}' shows to directory: '{}'\".format(showKeywords, showPath))\r\n data[showKeywords] = showPath\r\n save_json()", "def getcleanjson(self):\n dico=self.dico.copy() # shalow copie as we remove second level data in the copied\n for key in [\"basefiles\",\"grader\",\"soluce\"]:\n if key in dico:\n del dico[key]\n return str(json.dumps(dico))", "def eraseDatas(folderToRemove='datas'):\n directoryToRemove = os.path.join(pathtofolder(), folderToRemove)\n for i in os.listdir(directoryToRemove):\n os.remove(os.path.join(directoryToRemove, i))\n os.rmdir(directoryToRemove) # Now the folder is empty of files\n pass", "def delEvery():\n delMain()\n delFile()\n delPuls()\n delSat()\n delFreq()\n delTemp()\n delGly()\n delDlr()\n label['text'] = \"All json files have been deleted !\"", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def export_directories(self):\n print('=== Exporting all directory data...')\n\n for directory in self.client.directories:\n print('- Exporting directory:', directory.name)\n\n json = {\n 'id': self.get_id(directory),\n 'href': directory.href,\n 'name': directory.name,\n 'description': directory.description,\n 'status': directory.status,\n 'createdAt': directory.created_at.isoformat(),\n 'modifiedAt': directory.modified_at.isoformat(),\n 'customData': self.get_custom_data(directory),\n 'groups': [],\n }\n\n for group in directory.groups:\n json['groups'].append({\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n })\n\n json['provider'] = {\n 'href': directory.provider.href,\n 'providerId': directory.provider.provider_id,\n 'agent': None,\n }\n\n try:\n json['provider']['createdAt'] = directory.provider.created_at.isoformat()\n json['provider']['modifiedAt'] = directory.provider.modified_at.isoformat()\n except AttributeError:\n json['provider']['createdAt'] = None\n json['provider']['modifiedAt'] = None\n\n try:\n json['provider']['clientId'] = directory.provider.client_id\n except AttributeError:\n json['provider']['clientId'] = None\n\n try:\n json['provider']['clientSecret'] = directory.provider.client_secret\n except AttributeError:\n json['provider']['clientSecret'] = None\n\n try:\n json['provider']['redirectUri'] = directory.provider.redirect_uri\n except AttributeError:\n json['provider']['redirectUri'] = None\n\n try:\n json['provider']['agent'] = {\n 'id': self.get_id(directory.provider.agent),\n 'href': directory.provider.agent.href,\n 'status': directory.provider.agent.status,\n 'createdAt': directory.provider.agent.created_at.isoformat(),\n 'modifiedAt': directory.provider.agent.modified_at.isoformat(),\n 'config': {\n 'directoryHost': directory.provider.agent.directory_host,\n 'directoryPort': directory.provider.agent.directory_port,\n 'sslRequired': directory.provider.agent.ssl_required,\n 'agentUserDn': directory.provider.agent.agent_user_dn,\n 'agentUserDnPassword': directory.provider.agent.agent_user_dn_password,\n 'baseDn': directory.provider.agent.base_dn,\n 'pollInterval': directory.provider.agent.poll_interval,\n 'referralMode': directory.provider.agent.referral_mode,\n 'ignoreReferralIssues': directory.provider.agent.ignore_referral_issues,\n 'accountConfig': directory.provider.agent.account_config,\n 'groupConfig': directory.provider.agent.group_config,\n },\n 'download': {\n\n },\n }\n except AttributeError:\n pass\n\n if directory.password_policy:\n json['passwordPolicy'] = {\n 'id': self.get_id(directory.password_policy),\n 'href': directory.password_policy.href,\n #'createdAt': directory.password_policy.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.modified_at.isoformat(),\n 'resetEmailStatus': directory.password_policy.reset_email_status,\n 'resetEmailTemplates': [],\n 'resetSuccessEmailStatus': directory.password_policy.reset_success_email_status,\n 'resetSuccessEmailTemplates': [],\n 'resetTokenTtl': directory.password_policy.reset_token_ttl,\n 'strength': {\n 'href': directory.password_policy.strength.href,\n #'createdAt': directory.password_policy.strength.created_at.isoformat(),\n #'modifiedAt': directory.password_policy.strength.modified_at.isoformat(),\n 'maxLength': directory.password_policy.strength.max_length,\n 'minDiacritic': directory.password_policy.strength.min_diacritic,\n 'minLength': directory.password_policy.strength.min_length,\n 'minLowerCase': directory.password_policy.strength.min_lower_case,\n 'minNumeric': directory.password_policy.strength.min_numeric,\n 'minSymbol': directory.password_policy.strength.min_symbol,\n 'minUpperCase': directory.password_policy.strength.min_upper_case,\n },\n }\n\n try:\n for template in directory.password_policy.reset_email_templates:\n json['passwordPolicy']['resetEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'defaultModel': template.default_model,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n try:\n for template in directory.password_policy.reset_success_email_templates:\n json['passwordPolicy']['resetSuccessEmailTemplates'].append({\n 'id': self.get_id(template),\n 'href': template.href,\n 'createdAt': template.created_at.isoformat(),\n 'modifiedAt': template.modified_at.isoformat(),\n 'fromName': template.from_name,\n 'name': template.name,\n 'description': template.description,\n 'fromEmailAddress': template.from_email_address,\n 'textBody': template.text_body,\n 'htmlBody': template.html_body,\n 'mimeType': template.mime_type,\n 'subject': template.subject,\n })\n except AttributeError:\n pass\n\n tenant = self.get_id(directory.tenant)\n self.write('%s/%s/directories/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def _clean_datafile_set(self):\n file_list = self._meta['sets']['data file']['items']\n for item in file_list[:]:\n collection = item.split('@')[0]\n variable = item.split('@')[1]\n if not variable in self:\n file_list.remove(item)\n elif collection == 'masks':\n for s in self._get_source_ref(variable):\n while s in file_list:\n file_list.remove(s)\n elif self._is_array_item(variable):\n parent = self.parents(variable)[0]\n if not parent in file_list:\n idx = file_list.index(item)\n file_list[idx] = parent\n while item in file_list:\n file_list.remove(item)\n f_list = []\n for item in file_list:\n if not item in f_list: f_list.append(item)\n self._meta['sets']['data file']['items'] = f_list\n return None", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def trim_json_files():\n # List directory for training data files\n file_list = os.listdir(settings.PATH_TRAINING_DATA)\n file_names = []\n for file in file_list:\n file_name = file.split('.')[0]\n file_names.append(file_name)\n\n for idx, file in enumerate(file_list):\n # Import json file\n with open(settings.PATH_TRAINING_DATA + file_list[idx], 'r') as json_file:\n data = json.load(json_file)\n json_file.close()\n # Trim training set for items with no classification\n for article in list(data['TrainingData']):\n if data['TrainingData'][article]['topics'] == []:\n data['TrainingData'].pop(article)\n\n # Save trimmed training data\n with open('E:\\Python\\MultiLabel\\data\\TrimmedTrainingData\\{}.json'.format(file_names[idx]), 'w') as outfile:\n json.dump(data, outfile, indent=4, sort_keys=True)\n outfile.close()", "def cleanup(dict):\n from itertools import groupby\n from operator import itemgetter\n tuplelist = []\n for dirname, data in groupby(sorted(dict.items(),key=itemgetter(1)),key=itemgetter(1)):\n data = list(data)\n mx = max(data,key=lambda x:len(x[0]))\n tuplelist += [x for x in data if len(x[0]) == len(mx[0])]\n tuplelist.sort()\n dict = {}\n for dirname, data in tuplelist:\n #print(dirname, data)\n dict[dirname] = data\n return dict", "async def delete_raw_data():\n await expire_directories(\".rewrite\", REWRITE_DAYS)\n await expire_directories(\"undelete\", UNDELETE_DAYS)\n\n cutoff = datetime.now(timezone.utc) - timedelta(days=DATA_LAKE_DAYS)\n # wraparound to previous month, just in case\n last_month = cutoff - timedelta(days=cutoff.day + 1)\n for day in (\n last_month,\n cutoff,\n ):\n await expire_directories(\n storage.iothub_data_dir + day.strftime(\"/%Y/%m\"), DATA_LAKE_DAYS,\n )", "def reset_data():\n shutil.copy2(\n 'data/one_producer_many_consumers.ORIG.json',\n 'data/one_producer_many_consumers.json'\n )", "def _set_dirs(self, datafolder):\n self.List_of_dir = []\n self.List_of_files = dict()\n folders = os.listdir(datafolder)\n folders.sort()\n for i in folders:\n if os.path.isdir(os.path.join(datafolder,i)) and i != '.ipynb_checkpoints': # ignore .ipynb_checkpoints, allowing the generator to work in Amazon\n self.List_of_dir.append(os.path.join(datafolder,i))\n self.List_of_files[os.path.join(datafolder,i)]=[]\n for file in os.listdir(os.path.join(datafolder, i, 'Input')):\n if file.split('.')[-1] == 'hdf5':\n self.List_of_files[os.path.join(datafolder,i)].append(file.split('.')[-2])\n self._nb_dir = len(self.List_of_dir)", "def unpackage_datasets(dirname, dataobject_format=False):\n with open(join(dirname, 'room-data.json')) as f:\n lm = json.load(f)['Landmarks']\n res = {s: unpackage_dataset(join(dirname, s), dataobject_format) for s in ['train', 'valid', 'test']}\n res['landmarks'] = lm\n return res", "def fix_path(self):\n paths = self.data['path'].tolist()\n prefixes = [re.findall(r'[A-Z\\-0-9]+', path) for path in paths]\n prefix_good = [str(prefix[0]) + \".json\" for prefix in prefixes]\n self.data['path'] = prefix_good", "def removeRtree(self):\n try:\n os.remove(str(self.dim)+'d_index.data')\n os.remove(str(self.dim)+'d_index.index')\n print('Files removed')\n except:\n print('No such files')" ]
[ "0.6780833", "0.62113416", "0.6203642", "0.5911219", "0.58356", "0.57853866", "0.57088006", "0.5708134", "0.56424284", "0.5641765", "0.56255066", "0.5620272", "0.56011605", "0.5587325", "0.55452627", "0.5542413", "0.5533419", "0.55274105", "0.5480318", "0.54572684", "0.54435855", "0.54355866", "0.5428346", "0.5417279", "0.54124564", "0.5399763", "0.53949356", "0.5377527", "0.535649", "0.5323067" ]
0.679928
0
Had sni ovoce, pokud je seznam ovoce prazdny, vola se funkce na vytvoreni ovoce
def snez (seznam_ovoce, seznam_tahu, souradnice, radky, sloupce): seznam_tahu.append(souradnice) #snezeni seznam_ovoce.pop(seznam_ovoce.index(souradnice)) #vymazani ze seznamu ovoce if (len(seznam_tahu)) == radky * sloupce: #v poli jiz neni ani jedno volne policko, konec return() if seznam_ovoce == []: vytvor_ovoce(seznam_ovoce, seznam_tahu,radky, sloupce) #volam funkci, ktera vytvori dalsi ovoce
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cliquer_sur_unité(self):", "def mezclar_bolsa(self):", "def actualizar_velocidad(self):\r\n pass", "def uvozi(self, encoding=\"UTF-8\"):\n insert = self.stroskovno_mesto.dodajanje(stevilo=1)\n super().uvozi(encoding=encoding, insert=insert)", "def inscricao(self):\n\n return True", "def VotoSi(request, id_solicitud):\n #tipoItemExistente = TipoItem.objects.get(id=id_tipoitem)\n #tipoItemNuevo = TipoItem.objects.get(id=id_tipoitem)\n solicitud = SolicitudItem.objects.get(id=id_solicitud)\n solicitud.votos = solicitud.votos + 1\n solicitud.votossi = solicitud.votossi + 1\n solicitud.save()\n messages.info(request, \"Su voto se registro correctamente.\")\n if solicitud.completo:\n return HttpResponseRedirect('/admin/todo/item/')\n else:\n return HttpResponseRedirect('/admin/todo/solicituditem/')", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def duplica(self, request, pk=None):\n preventivo_da_clonare = get_object_or_404(PreventivoFornitore, pk=pk)\n \n #print(\"preventivo vecchio: id={}, codice={}, data={}\".format(preventivo_da_clonare.id, preventivo_da_clonare.codice, preventivo_da_clonare.data))\n #for r in preventivo_da_clonare.righe.all():\n # print(\"riga vecchia: id={}, quantità={}, articolo={}, descrizione={}, cancellato={}\".format(r.id, r.quantita, r.articolo, r.articolo_descrizione, r.cancellato))\n\n preventivo_nuovo = get_object_or_404(PreventivoFornitore, pk=pk)\n # resettando l'id e salvando si crea un altro record che ha gli stessi campi...\n preventivo_nuovo.id = None\n preventivo_nuovo.save()\n preventivo_nuovo.data = date.today()\n preventivo_nuovo.codice = PreventivoFornitore.objects.next_codice()\n preventivo_nuovo.accettato = False\n preventivo_nuovo.save()\n \n #print(\"preventivo nuovo: id={}, codice={}\".format(preventivo_nuovo.id, preventivo_nuovo.codice))\n #print(\"preventivo nuovo: data={}\".format(preventivo_nuovo.data))\n for r in preventivo_da_clonare.righe.non_cancellati():\n rn = RigaPreventivoFornitore()\n rn.preventivo = preventivo_nuovo\n rn.articolo = r.articolo\n rn.articolo_descrizione = r.articolo_descrizione\n rn.articolo_prezzo = r.articolo_prezzo\n rn.sconto_percentuale = r.sconto_percentuale\n rn.articolo_unita_di_misura = r.articolo_unita_di_misura\n rn.accettata = False\n rn.quantita = r.quantita\n rn.totale = r.totale\n rn.note = r.note\n rn.save()\n preventivo_nuovo.aggiorna_totale()\n\n #for r in preventivo_nuovo.righe.all():\n # print(\"riga nuova: id={}, quantità={}, articolo={}, descrizione={}\".format(r.id, r.quantita, r.articolo, r.articolo_descrizione))\n\n serializer = PreventivoFornitoreSerializer(preventivo_nuovo)\n return Response(serializer.data, status=status.HTTP_201_CREATED)", "def pohyb(seznam_tahu, seznam_ovoce, tah,radky, sloupce):\n\n x= seznam_tahu [len(seznam_tahu)-1][0] # [x,y] souradnice noveho tahu\n y= seznam_tahu [len(seznam_tahu)-1][1]\n\n if tah == \"s\": #sever\n y -= 1\n elif tah == \"j\": #jih\n y += 1\n elif tah == \"v\": #vychod\n x += 1\n elif tah == \"z\": #zapad\n x -= 1\n else:\n print(\"Zadal jsi spatne pismeno.\" )\n return()\n\n if x<0 or x>sloupce-1 or y<0 or y>radky-1: #tah mimo pole\n print(\"Tah neni mozny, je mimo hraci pole. Opakuj tah.\")\n elif [x,y] in seznam_tahu: #jiz obsazene policko hadem\n print(\"Tah neni mozny, had uz na nem je. Opakuj tah.\")\n elif [x,y] in seznam_ovoce: #policko s ovocem, vola se funkce snez\n snez (seznam_ovoce, seznam_tahu,[x,y],radky, sloupce)\n else:\n seznam_tahu.append([x,y]) #tah na volne policko, prida se tah a odebere posledni bod\n seznam_tahu.pop(0)", "def update_vie(self):\n self.essais_restant[\"text\"] = \"Vous disposez de {} vies\".format(self.jeu.get_nb_vie())", "def actualizar_puntaje(self):\r\n pass", "def dodajPrzedmiot(self, przedmiot: Przedmiot):\n self.przedmioty[przedmiot.nazwa]=przedmiot", "def VotoNo(request, id_solicitud):\n #tipoItemExistente = TipoItem.objects.get(id=id_tipoitem)\n #tipoItemNuevo = TipoItem.objects.get(id=id_tipoitem)\n solicitud = SolicitudItem.objects.get(id=id_solicitud)\n solicitud.votos = solicitud.votos + 1\n solicitud.votosno = solicitud.votosno + 1\n solicitud.save()\n messages.info(request, \"Su voto se registro correctamente.\")\n if solicitud.completo:\n return HttpResponseRedirect('/admin/todo/item/')\n else:\n return HttpResponseRedirect('/admin/todo/solicituditem/')", "def modificacion(self, socio):\n\n aux = self.buscar(socio.id)\n print('El socio a modificar en capa de datos:', aux.id, aux.nombre)\n\n if aux == None:\n return False\n else:\n #persona = session.query(Socio).filter(Socio.dni == aux.id)\n aux.nombre = socio.nombre\n aux.apellido = socio.apellido\n aux.dni = socio.dni\n\n session.commit()\n\n return aux", "def actualizar_poderes(self, event):\r\n pass", "def estaVazia(self):\n if self.getRaiz() is None:\n return True\n else:\n return False", "def vytvor_ovoce (seznam_ovoce, seznam_tahu,radky, sloupce):\n while True:\n nove_ovoce = [randrange(sloupce),randrange(radky)]\n if nove_ovoce in seznam_tahu or nove_ovoce in seznam_ovoce:\n continue\n else:\n break\n seznam_ovoce.append(nove_ovoce)", "def piskvorky(pole):\n\n print('Ahoj. Toto je hra 1D piskvorky. Pocitac hra so symbolmi \\'O\\', ty hras so symbolmi \\'X\\'.') \n while \"-\" in pole:\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_hraca(pole, cislo_policka)\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_pocitaca(pole, cislo_policka, mozne_policka1, mozne_policka2)\n if \"-\" not in pole:\n vyhodnot(pole)\n return print(\"Dakujem za hru.\")", "def successeurs(self,etat):\n pass", "def __init__(self):\n self.nombre_roues = 4\n self.nombre_fauteils = 1\n self.moteur = False\n self.volant = True", "def czyRuch(mapObj, gameStateObj, ludzikRuchW):\n\n # Sprawdzenie czy ludzik moze sie w danym kierunku poruszyc:\n playerx, playery = gameStateObj['ludzik']\n\n\t# zmienna pomocnicza \n skrzynki = gameStateObj['skrzynki']\n\n # Ruch ludzika\n if ludzikRuchW == UP:\n xOffset = 0\n yOffset = -1\n elif ludzikRuchW == RIGHT:\n xOffset = 1\n yOffset = 0\n elif ludzikRuchW == DOWN:\n xOffset = 0\n yOffset = 1\n elif ludzikRuchW == LEFT:\n xOffset = -1\n yOffset = 0\n\n # Sprawdzenie czy ruch jest mozliwy\n if czyMur(mapObj, playerx + xOffset, playery + yOffset):\n return False\n else:\n if (playerx + xOffset, playery + yOffset) in skrzynki:\n # Na drodze jest strzynka wiec nastepuje jej przesuniecie\n if not czyZablokowany(mapObj, gameStateObj, playerx + (xOffset*2), playery + (yOffset*2)):\n # Przesun skrzynke\n ind = skrzynki.index((playerx + xOffset, playery + yOffset))\n skrzynki[ind] = (skrzynki[ind][0] + xOffset, skrzynki[ind][1] + yOffset)\n else:\n return False\n # Przesuniecie ludzika\n gameStateObj['ludzik'] = (playerx + xOffset, playery + yOffset)\n return True", "def PoziomUkonczony(levelObj, gameStateObj):\n for cel in levelObj['cele']:\n\n if cel not in gameStateObj['skrzynki']:\n # Znaleziono niepokryty cel\n return False\n\n return True", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def prebaci_dan_nazad(self):\r\n value = int(self.brojDanaCombo.currentText()) #integer broj dana\r\n self.emit(QtCore.SIGNAL('promjeni_datum(PyQt_PyObject)'), -value)\r\n msg = 'request pomak {0} dana unazad'.format(value)\r\n logging.info(msg)", "def verif_victoire(self):\n\n if self._mot_en_cours == self._mot_a_trouver :\n return True\n else :\n return False", "def piskvorky(pole):\n\n print('Ahoj. Toto je hra 1D piskvorky. Pocitac hra so symbolmi \\'O\\', ty hras so symbolmi \\'X\\'.') \n while \"-\" in pole:\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_hraca(pole, str_policka)\n if vyhodnot(pole) == False:\n break\n else:\n pole = tah_pocitaca(pole, symbol=\"O\")\n if \"-\" not in pole:\n vyhodnot(pole)\n return print(\"Dakujem za hru.\")" ]
[ "0.62168837", "0.6099113", "0.60562164", "0.5946035", "0.5821264", "0.5761619", "0.5727161", "0.5719703", "0.5699508", "0.5662515", "0.56390893", "0.5636412", "0.5547472", "0.55309504", "0.55096954", "0.5478416", "0.5466994", "0.54400057", "0.54392844", "0.54176474", "0.53980327", "0.5396072", "0.5387375", "0.5387375", "0.5387375", "0.5387375", "0.5387375", "0.5386344", "0.5379728", "0.53685576" ]
0.632577
0
ensure client_authentication_required() is properly called
def test_authentication_required(self): self.auth.validate_token_request(self.request) self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_replace_o_auth_client_authorization(self):\n pass", "def test_patch_o_auth_client_authorization(self):\n pass", "def requires_auth(self):\n return True", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def test_read_o_auth_client_authorization(self):\n pass", "def test_create_o_auth_client_authorization(self):\n pass", "def test_authentication_required(self):\n self.client.logout()\n response = self.client.get(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 401", "def client_authentication_required(self, request, *args, **kwargs):\n\n if request.grant_type == 'password':\n client = self._clientgetter(request.client_id)\n return (not client) or client.client_type == 'confidential' or client.client_secret\n elif request.grant_type == 'authorization_code':\n client = self._clientgetter(request.client_id)\n return (not client) or client.client_type == 'confidential'\n return 'Authorization' in request.headers and request.grant_type == 'refresh_token'", "def test_patch_o_auth_client(self):\n pass", "def authenticate_client(self, request, *args, **kwargs):\n auth = request.headers.get('Authorization', None)\n log.debug('Authenticate client %r', auth)\n if auth:\n try:\n _, s = auth.split(' ')\n client_id, client_secret = decode_base64(s).split(':')\n client_id = to_unicode(client_id, 'utf-8')\n client_secret = to_unicode(client_secret, 'utf-8')\n except Exception as e:\n log.debug('Authenticate client failed with exception: %r', e)\n return False\n else:\n client_id = request.client_id\n client_secret = request.client_secret\n\n client = self._clientgetter(client_id)\n if not client:\n log.debug('Authenticate client failed, client not found.')\n return False\n\n request.client = client\n\n if client.client_secret and client.client_secret != client_secret:\n log.debug('Authenticate client failed, secret not match.')\n return False\n\n log.debug('Authenticate client success.')\n return True", "async def check_client(self, client_id: Identity) -> AuthResult:\n raise NotImplementedError", "def client_required(f):\n @wraps(f)\n def client_decorator(*args, **kwargs):\n if session.get('logged_in') and session.get('type') == 'Client':\n return f(*args, **kwargs)\n else:\n abort(401)\n return client_decorator", "def _do_authenticate(self, http_client):\n ks_kwargs = {\n 'username': self.opts.get('username'),\n 'password': self.opts.get('password'),\n 'tenant_name': self.opts.get('tenant_name'),\n 'auth_url': self.opts.get('auth_url'),\n }\n self._http_client = http_client\n self._ksclient = ksclient.Client(**ks_kwargs)", "def test_create_o_auth_client(self):\n pass", "def authorizeClients(self):\n pass", "def test_auth_required(self):\n\n res = self.client.get(SERVICES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_read_o_auth_client(self):\n pass", "def test_replace_o_auth_client(self):\n pass", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def test_list_o_auth_client_authorization(self):\n pass", "def test_authentication_required(self, method):\n self.client.logout()\n response = getattr(self.client, method)(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 401", "def test_loggin_required(self):\n response = self.client.get(RESGATE_URL)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def _authenticated_or_die(self):\n if not self._authenticated:\n raise Exception('The client is not authenticated!')", "def test_authentication_required(self, method):\n self.client.logout()\n response = getattr(self.client, method)(self.path())\n assert response.status_code == 401", "def __init__(self, client_authentication=None):\n super(OAuthClientAuthHandler, self).__init__()\n self._client_authentication = client_authentication", "def initial(self, request, *args, **kwargs):\n super(DRFMixin, self).initial(request, *args, **kwargs)\n\n # fetch the client associated with the user\n user = request.user\n if user.is_authenticated:\n if not user.is_active:\n raise PermissionDenied(detail='User is inactive')\n client_user = ClientUser.objects.get_or_none(user=user)\n if client_user:\n client = client_user.client\n if not client.is_enabled:\n raise PermissionDenied(detail='Client is disabled')\n request.client = client\n else:\n _logger.warning('no client defined for request user %s', request.user.username)", "def test_login_required(self):\n self.client.logout()\n response = self.client.get(self.path)\n assert response.status_code == 401", "def test_authentication_required(self, method):\n self.client.logout()\n response = getattr(self.client, method)(self.path)\n assert response.status_code == 401", "def auth_required(self, cls):\n assert cls.authentication_classes == [JWTKeyAuthentication]", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)" ]
[ "0.70707756", "0.7026512", "0.6958877", "0.6917434", "0.69145656", "0.68942106", "0.68654615", "0.6813348", "0.6727116", "0.6723939", "0.6688306", "0.6673083", "0.6637916", "0.6620205", "0.66194654", "0.6589787", "0.6528338", "0.6500637", "0.64950204", "0.64694965", "0.6442299", "0.6442", "0.6417739", "0.6390335", "0.6387679", "0.63859385", "0.63847697", "0.6374329", "0.63729966", "0.63513905" ]
0.7124051
0
Ping (GET /ping) A simple ping to check health of the image server.
def ping(): return json_response({ 'ping': 'pong', 'version': __version__, 'imgapi': False, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ping():\n\treturn HTTPResponse(status=200)", "async def ping(self):\n uri = \"/fapi/v1/ping\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "async def ping(self) -> APIReturn:\n return await self._request(\"GET\", \"/ping\")", "def ping():\n requestor = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n logger.info(f\"Health check requested by ip='{requestor}'\")\n return make_response(\n jsonify(status=\"Serving\",\n body=\"pong\"), 200)", "def ping():\n logging.info(\"Ping received...\")\n\n health = Predictor.load() is not None\n\n status = 200 if health else 404\n return Response(response=\"\\n\", status=status, mimetype=\"application/json\")", "def ping():\r\n health1 = ScoringService.get_model1() is not None # You can insert a health check here\r\n ping_response = \"Docker for Discover non prime users\"\r\n status = 200 if (health1) else 404\r\n return flask.Response(response=ping_response, status=status, mimetype='application/json')", "def ping(self):\n request = Empty()\n response = self.stub.Ping(request, timeout=5)", "def Ping(self):\n\n self.helpers(self.config).Info(\"Ping Weaviate...\")\n # get the meta endpoint\n _, _ = self.Get(\"/meta\")\n # would fail is not available.\n self.helpers(self.config).Info(\"Pong from Weaviate...\")", "def ping(self):\n\n url = self.api_url('ping')\n\n return requests.get(url, headers=self.auth_header).json()", "def ping(self):\n return 'ping'", "def ping(self):\n response = self._request(\"GET\", [ROUTE_PING])\n\n if response.status_code == 200:\n logging.info(\"OK\")\n return True\n logging.error(\"FAILED\")\n return False", "async def ping(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"ping\"], *args, **kwargs)", "def ping():\n return jsonify({'response': 'pong'}), 200", "def ping(self):\n self.connect()\n self._write('PING\\r\\n')\n return self._get_simple_response()", "def test_ping(self):\n response = self.app.get('/ping')\n\n assert response.status_code == 200\n assert response.data == b\"pong\"", "def ping():\r\n return make_response(\"pong!\", 200)", "def ping(self):\n pass", "def ping(event, context):\n logger.info(\"Ping requested.\")\n return _get_response(200, \"PONG!\")", "def test_ping(self):\n responses.add(responses.GET, \"http://localhost:3030/{0}/ping\".format(\"$\"), \"2017-09-18T11:41:19.915+00:00\", status=200)\n fuseki = GraphStore()\n result = fuseki._graph_health()\n self.assertTrue(result)", "async def _ping_(self)->str:\n return PING_RESPONSE", "def Ping(self): # real signature unknown; restored from __doc__\n pass", "def ping(self) -> Response:\n raise NotImplementedError", "def ping(self) -> None:\n ...", "def ping_response():\n\n return Response(\"ok\", status=200)", "def ping(self, *args):\n return self._command(b'PING', *args, handler=\"PONG\")", "def test_ping(self):\n response = self.client.get(reverse(\"api_hello:ping\"))\n self.assertTrue(response.json()[\"status\"])" ]
[ "0.7904067", "0.78574514", "0.7829969", "0.7829969", "0.7829969", "0.7829969", "0.77984995", "0.7741611", "0.7618637", "0.74545103", "0.7417597", "0.72806305", "0.71756196", "0.7174422", "0.71716267", "0.7128376", "0.7119203", "0.71128374", "0.70566857", "0.7026292", "0.7016241", "0.6992468", "0.69908625", "0.69882435", "0.69206357", "0.6919117", "0.69130623", "0.68763596", "0.6873253", "0.68722916" ]
0.7998161
0
Select a move to make from the given board position (game_state). The algorithm uses a combination of a neural network and a Monte Carlo tree search to search the decision tree stemming from the given board position. It returns the move associated with the most visited branch stemming from the root. This method creates a tree structure representing the search history of the algorithm and is used to save evaluations of board positions and statistics regarding nodes visited. If return_visit_counts is true, the distribution of visits over the branches of the root in the search tree will be returned along with the selected move. This distribution can be used to train the neural network.
def select_move(self, game_state, return_visit_counts=False): # Start with a tree consisting of a root node only. The root node # is associated with the given board position. root = self.create_node(game_state) # If no legal moves can be made from the given board position, pass # the turn. This happens when all of the players pieces are surrounded, # if the player has no pieces left or if the game is over. if not root.branches: if return_visit_counts: return Act.pass_turn(), {} return Act.pass_turn() for i in range(self.num_rounds): # On each iteration, walk down the tree to a leaf node and select # a move to make from the corresponding leaf game state. node = root next_move = self.select_branch(node) while node.has_child(next_move): node = node.get_child(next_move) next_move = self.select_branch(node) # Create a new tree node for the selected move and add it to # the tree. If the leaf node corresponds to a finished game # then don't create a new node and assign a value to the node # based on who won. if node.state.is_not_over(): if next_move: new_state = copy.deepcopy(node.state) new_state.take_turn_with_no_checks(Act.play(next_move)) child_node = self.create_node(new_state, move=next_move, parent=node) move = next_move value = -1 * child_node.value else: # If the current player can't make any moves from the # selected gamestate then next_move will be 'None' meaning # the player passes the turn. new_state = copy.deepcopy(node.state) new_state.take_turn_with_no_checks(Act.pass_turn()) child_node = self.create_node(new_state, move=next_move, parent=node) move = next_move value = -1 * child_node.value else: # If the game in the current state is over, then the last # player must have won the game. Thus the value/reward for the # other player is 1. The current node is not updated with # the new reward as no branches can stem from a finished game # state. move = node.last_move node = node.parent value = 1 # Update the nodes traversed to get to the leaf node with the # new value for the new move. while node is not None: node.record_visit(move, value) move = node.last_move node = node.parent value *= -1 # Get the visit counts of the branches if they were requested. if return_visit_counts: visit_counts = {} for move in root.branches.keys(): visit_counts[move] = root.branches[move].visit_count # Get a list of possible moves sorted according to visit count, # the move with the highest visit count should be first in the list. moves = [move for move in root.moves()] moves = sorted(moves, key=root.visit_count, reverse=True) # Loop through the sorted moves and return the first legal one. for move in moves: if not game_state.is_move_illegal(move): if return_visit_counts: return Act.play(move), visit_counts return Act.play(move) # If no legal move is found then pass the turn. if return_visit_counts: return Act.pass_turn(), visit_counts return Act.pass_turn()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mcts_search(self, state):\n assert state.current_player() == self.player\n root = SearchNode(None, 1)\n for _ in range(self.max_simulations):\n visit_path, working_state = self._apply_tree_policy(root, state)\n if working_state.is_terminal():\n node_value = working_state.player_return(self.player)\n else:\n node_value = self.evaluator.evaluate(\n working_state, self.player, self._random_state)\n\n for node in visit_path:\n node.total_reward += node_value * node.player_sign\n node.explore_count += 1\n\n most_visited = root.most_visited_child()\n\n if self.verbose:\n print(\"Root:\", root.to_str())\n print(\"Children:\")\n print(root.children_str(working_state))\n print(\"Children of chosen:\")\n chosen_state = state.clone()\n chosen_state.apply_action(most_visited.action)\n print(most_visited.children_str(chosen_state))\n\n return most_visited.action", "def select_move(self, from_search=False, verbose=False) -> Tuple[int, str]:\n node = self.current\n c0 = 1\n N = sum(e.visits for e in node.edges) + c0\n eps = self.cpuct * len(node.edges) * np.sqrt(N)\n ucb = [e.value + eps * e.prior / (c0 + e.visits) for e in node.edges]\n\n if from_search:\n #\n tau = 0.01 + self.temperature *\\\n (1.0 - self.current.level() / self.max_steps)\n\n probs = [(e.search_outcome + 0.01)**(1/tau) for e in node.edges]\n probs = np.array(probs) / sum(probs)\n dnoise = np.random.dirichlet(np.ones(len(probs)))\n eps = self.dnoise\n probs = (1.0 - eps) * probs + eps * dnoise\n\n # chooce proportionally\n index = np.random.choice(np.arange(len(probs)), p=probs)\n # index = np.argmax(probs)\n msg = \"Chooosing {} with probability {:.4f}\"\n print(msg.format(node.edges[index].cmd, probs[index]))\n else:\n node = self.current\n index = np.argmax(ucb)\n\n if verbose:\n msg = \"NODE: Visits: {}, Reward: {:.2f}, \" +\\\n \"Score: {:.2f}, Envscore: {}\"\n print(msg.format(\n node.visits, node.reward, node.score, node.envscore))\n # print(\"CMD HISTORY:\", node.cmd_history(cmds_only=True))\n\n values = [e.value for e in node.edges]\n counts = [e.search_outcome for e in node.edges]\n cmds = [e.cmd for e in node.edges]\n priors = [e.prior for e in node.edges]\n ix = range(len(node.edges))\n\n msg = ''.join((\"EDGE {}: V: {:.2f}, P: {:.2f}, S: {}, \",\n \"UCB: {:.2f}, cmd: {} SP: {:.2f}\"))\n if not from_search:\n probs = np.zeros(len(node.edges))\n\n for i, c, p, v, n, u, r in zip(ix, cmds, priors,\n values, counts, ucb, probs):\n print(msg.format(i, v, p, n, u, c, r))\n\n edge = node.edges[index]\n edge.search_outcome += 1\n\n return index, edge.cmd", "def move(self, state):\n \n self.depth_limit=1\n self.best_utility=-2\n action=None\n while not self.is_time_up():\n self.terminal=True\n self.cache={}\n action=self.alpha_beta_search(state,0)\n if self.terminal==True:\n break\n self.depth_limit=self.depth_limit+1\n \n return action", "def find_next_move(board: Type[Board], turn: int, colour) -> Union[Tuple[int, int], Tuple[Tuple[int, int], Tuple[int, int]]]:\n # for this version, restart search for each turn\n # not sure if we can persist the tree\n root_node = _new_tree_node(board, turn-1, get_opponent_colour(colour))\n print(\"root_node already has \", root_node.winning, '/', root_node.visited) # DEBUG\n \n start_time = datetime.datetime.now()\n elapsed = datetime.timedelta(0)\n simulation_rounds = 0\n while (elapsed <= config.MC_TIME_LIMIT):\n promising_node, path = select(root_node)\n node_to_explore = promising_node\n\n if promising_node.board.get_status(is_placing=promising_node.current_turn <= 24) \\\n == BoardStatus.ON_GOING:\n children_nodes = _get_children_nodes(node_to_explore)\n if (len(children_nodes) > 0):\n action, node_to_explore = random.choice(children_nodes)\n path.append(node_to_explore)\n\n playout_result = random_playout(node_to_explore)\n\n back_prop(path, playout_result)\n\n elapsed = datetime.datetime.now() - start_time\n simulation_rounds += 1\n\n print(f\"\\n\\n\\n[MC] {simulation_rounds} rounds of simulation run.\\n\\n\\n\")\n winning_action, winning_node = _get_child_with_max_score(root_node)\n return winning_action", "def find_best_move(state: GameState) -> None:", "def search_my_move(self, env:Chess, is_root_node=False) -> float:\n if env.over():\n if env.victor == Victor.draw:\n return 0\n # assert env.whitewon != env.white_to_move # side to move can't be winner!\n return -1\n\n state = board_state_key(env)\n\n with self.node_lock[state]:\n if state not in self.tree:\n leaf_p, leaf_v = self.expand_and_evaluate(env)\n self.tree[state].p = leaf_p\n return leaf_v # I'm returning everything from the POV of side to move\n\n # SELECT STEP\n action_t = self.select_action_q_and_u(env, is_root_node)\n\n virtual_loss = self.play_conf.virtual_loss\n\n my_visit_stats = self.tree[state]\n my_stats = my_visit_stats.a[action_t]\n\n my_visit_stats.sum_n += virtual_loss\n my_stats.n += virtual_loss\n my_stats.w += -virtual_loss\n my_stats.q = my_stats.w / my_stats.n\n\n env.make_move(action_t.uci())\n leaf_v = self.search_my_move(env) # next move from enemy POV\n leaf_v = -leaf_v\n\n # BACKUP STEP\n # on returning search path\n # update: N, W, Q\n with self.node_lock[state]:\n my_visit_stats.sum_n += -virtual_loss + 1\n my_stats.n += -virtual_loss + 1\n my_stats.w += virtual_loss + leaf_v\n my_stats.q = my_stats.w / my_stats.n\n\n return leaf_v", "def find_best_move(state) -> None:\n C: float = 2 ** 0.5 # Start with sqrt(2)\n idx: int = -1\n win_ratio: float = 0\n\n # First create the root node for the game\n root = State(None, None, state)\n root = expand(root)\n root = run_each_move(root, state)\n print(\"New Move UCBs: \")\n for m in range(len(root.move_ucbs)):\n print(\" \", root.move_ucbs[m])\n \n #while True:\n for _ in range(10000):\n _, win_ratio = MCTS(root, state, True, win_ratio) \n #print(\"\\n\\n\\nRoot Attempts: \", root.attempts)\n #print(\"Back in main\\n\\n\\n\")\n \n # Pick one of the highest ranked by UCB's", "def search_my_move(self, env: ChessEnv, is_root_node=False) -> float:\n\t\tif env.done:\n\t\t\tif env.winner == Winner.draw:\n\t\t\t\treturn 0\n\t\t\t# assert env.whitewon != env.white_to_move # side to move can't be winner!\n\t\t\treturn -1\n\n\t\tstate = state_key(env)\n\n\t\twith self.node_lock[state]:\n\t\t\tif state not in self.tree:\n\t\t\t\tleaf_p, leaf_v = self.expand_and_evaluate(env)\n\t\t\t\tself.tree[state].p = leaf_p\n\t\t\t\treturn leaf_v # I'm returning everything from the POV of side to move\n\t\t\t#assert state in self.tree\n\n\t\t\t# SELECT STEP\n\t\t\taction_t = self.select_action_q_and_u(env, is_root_node)\n\n\t\t\tvirtual_loss = self.play_config.virtual_loss\n\n\t\t\tmy_visit_stats = self.tree[state]\n\t\t\tmy_stats = my_visit_stats.a[action_t]\n\n\t\t\tmy_visit_stats.sum_n += virtual_loss\n\t\t\tmy_stats.n += virtual_loss\n\t\t\tmy_stats.w += -virtual_loss\n\t\t\tmy_stats.q = my_stats.w / my_stats.n\n\n\t\tenv.step(action_t.uci())\n\t\tleaf_v = self.search_my_move(env) # next move from enemy POV\n\t\tleaf_v = -leaf_v\n\n\t\t# BACKUP STEP\n\t\t# on returning search path\n\t\t# update: N, W, Q\n\t\twith self.node_lock[state]:\n\t\t\tmy_visit_stats.sum_n += -virtual_loss + 1\n\t\t\tmy_stats.n += -virtual_loss + 1\n\t\t\tmy_stats.w += virtual_loss + leaf_v\n\t\t\tmy_stats.q = my_stats.w / my_stats.n\n\n\t\treturn leaf_v", "def move(self, state):\n result = None\n self.currentDepthLimit = 0\t\n\tself.transposition = {}\n\tself.counter = 0\n\n\twhile True:\n u = float(\"inf\")\n\t v = float(\"-inf\")\n\t self.counter = 0\n\t result = None\n\t self.transposition = {}\n\t for a in state.actions():\n new = self.min_value(state.result(a), float(\"-inf\"), float(\"inf\"),self.currentDepthLimit)\n\t if new > v:\n\t v = new\n\t result = a\n\n\t elif new == v:\n\t if a.index < result.index:\n\t result = a\n\t if self.is_time_up():\n\t return result\n\t \n\t self.currentDepthLimit += 1\n\t \"\"\"If we never use evaluate function, it means all state are terminated, so return whatever the result is\"\"\"\n\t if self.counter == 0:\n\t break\n\t if self.is_time_up():\n \t return result\n\treturn result", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def create_node(self, game_state, move=None, parent=None):\n # Pass the game state to the neural network to both evaluate the \n # how good the board position is and get the prior probability \n # distribution over possible next moves (ie the predicted distribution \n # of visit counts).\n move_priors, value = self.network.predict(game_state)\n \n # If a root node is being created, then add some dirichlet noise\n # to the prior probabilities to help exploration.\n if parent == None:\n dirichlet_noise = np.random.dirichlet([self.alpha]*96)\n for (i, move) in enumerate(move_priors.keys()):\n move_priors[move] = (move_priors[move] + dirichlet_noise[i])/2\n \n # Create the node for the given game state, with the predicted value\n # and priors, and attach it to the tree.\n new_node = TreeNode(game_state, value, move_priors, parent, move)\n if parent is not None:\n parent.add_child(move, new_node)\n return new_node", "def select_move(self, game_state):\n raise NotImplementedError()", "def select_moves(self, worker_idx: int, state: np.ndarray, state_depth: int = 0) -> np.ndarray:\n worker = self.workers[worker_idx]\n database = self.databases[worker_idx]\n\n # Start simulation on new state\n worker.set_start_state(state, state_depth)\n worker.simulation(clear_tree=True)\n\n # Block and wait for results\n results = worker.results()\n\n if self.verbose >= 2:\n print(\"{} Nodes per second\".format(np.sum(results['total_nodes']) / self.calculation_time))\n\n return self._temperature_policy(database.get(0, self.env.hash(state))[0])", "def select_branch(self, node):\n total_n = node.total_visit_count\n \n def branch_score(move):\n q = node.expected_value(move)\n p = node.prior(move)\n n = node.visit_count(move)\n return q + self.c * p * np.sqrt(total_n)/(1+n)\n \n moves = node.moves()\n if moves:\n return max(moves, key=branch_score)\n else:\n # If moves is empty then no legal moves can be made from the game\n # state corresponding to the given node.\n return None", "def get_move(self, state):\n for n in range(self._n_playout):\n state_copy = copy.deepcopy(state)\n self._playout(state_copy)\n print(n)\n print(\"Judge done\")\n return max(self._root._children.items(),\n key=lambda act_node: act_node[1]._n_visits)[0]", "def action(self):\n\n self.start_timer()\n\n minimax_probability = self.norm.cdf(self.root.branching)\n use_minimax = boolean_from_probability(minimax_probability)\n if self.time_consumed > 53:\n # Time is starting to run low, use the faster option\n use_minimax=True\n\n if self.time_consumed < 59:\n if self.root.turn < 4:\n result = book_first_four_moves(self.root)\n elif use_minimax:\n result = minimax_paranoid_reduction(self.root)\n else:\n result = monte_carlo_tree_search(\n self.root,\n playout_amount=3,\n node_cutoff=4,\n outer_cutoff=4,\n num_iterations=1200,\n turn_time=0.75,\n exploration_constant=1.7,\n use_slow_culling=False,\n verbosity=0,\n use_prior=True,\n num_priors=4,\n use_fast_prune_eval=False,\n use_fast_rollout_eval=False,\n )\n else:\n result = greedy_choose(self.root)\n\n self.end_timer()\n\n return result", "def calculate_random_move(self, vehicleamount, visit):\n self.depth += 1\n\n while True:\n number = (randint(0, 1))\n vehicle_id = (randint(0, vehicleamount-1))\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if number == 0:\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].x += 1\n else:\n return self\n else:\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].x -= 1\n else:\n return self\n else: #vertical\n if number == 0:\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].y += 1\n else:\n return self\n else:\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].y -= 1\n else:\n return self", "def monteCarloTreeSearch(chessboard):\n init_state = NodeState(chessboard)\n init_state.setCurrentTurn(State.BLACK)\n init_node = TreeNode(init_state)\n\n for _ in range(COMPUTATION_LIMIT):\n expanded_node = treePolicy(init_node)\n reward = defaultPolicy(expanded_node)\n backPropagation(expanded_node, reward)\n \n best_child_node = findBestChild(init_node, False)\n return best_child_node.getState().getBestMovement()", "def search(board:Board, max_depth=3) -> DiGraph:\n\n n = 0 # node label which also serves as a node counter\n depth = 0\n \n G = nx.DiGraph()\n G.add_node(0, winner=None, player=0, board=board.state, board_p = board.display)\n \n # First branch in look ahead\n newleavelist=[]\n parent_node = n\n parent_board = Board(G.nodes[n]['board'][0], G.nodes[n]['board'][1])\n\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=0)\n if move not in moves_available:\n continue\n \n # Do move\n new_board = parent_board.update_board(Move(player=0, move=move))\n \n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1, board=new_board.state, board_p = new_board.display)\n G.add_edge(parent_node, n, move=move)\n if new_board.is_winner:\n continue\n newleavelist.append(n)\n \n depth=1\n # subsequent branches\n while depth < max_depth:\n leavelist = newleavelist[:]\n newleavelist = []\n for leave in leavelist: \n # Get parent board\n parent_board = Board(G.nodes[leave]['board'][0], G.nodes[leave]['board'][1])\n for move in ALL_MOVES:\n moves_available = parent_board.moves_available(player=depth%2)\n if move not in moves_available:\n continue\n # Do move\n new_board = parent_board.update_board(Move(player=depth%2, move=move))\n # Add move node to graph\n n=n+1\n G.add_node(n, winner=new_board.is_winner, player=1-depth%2, \n board=new_board.state, board_p=new_board.display)\n G.add_edge(leave, n, move=move)\n if new_board.is_winner:\n continue\n \n newleavelist.append(n)\n depth=depth+1\n return G", "def decide(self, game, state, available_moves, opponent_moves):\n\t\tstatecopy = copy.deepcopy(state)\n\t\troot = GameNode(game, None, statecopy, available_moves, None)\n\t\ttree = GameTree(root)\n\t\tminimaxAB = AlphaBeta(tree)\n\t\tbest_state = minimaxAB.alpha_beta_search(tree.root)\n\t\tmove = best_state.action\n\t\treturn [move.row, move.column, move.shift]", "def exploreNext(neighbor, move):\n if (neighbor != None and tuple(neighbor) not in explored):\n nextState = State(neighbor)\n nextState.path = currentState.path.copy()\n nextState.path.append(move)\n stateQueue.append(nextState)", "def get_move(self, rootstate, itermax, verbose = True, **kwargs):\n\n # print(rootstate.playerHands[rootstate.playerToMove])\n\n assert len(rootstate.playerHands[rootstate.playerToMove]) == 2\n\n move, victim, guess = get_move(rootstate, ismcts=True)\n\n if move:\n return move, victim, guess\n\n rootnode = Node()\n counter = 0\n for i in range(itermax):\n node = rootnode\n # determinize\n state = rootstate.clone_and_randomize(vanilla=kwargs.get('vanilla', True))\n node = self.select(state, node)\n node = self.expand(state, node)\n self.simulate(state)\n self.backpropagate(state, node)\n\n # Output some information about the tree - can be omitted\n if verbose:\n print(rootnode.children_to_string())\n\n return self.select_final_move(rootnode, rootstate)", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n self.NumAgents = gameState.getNumAgents()\n self.root = gameState\n # alpha-beta-search\n # one call of Max_Value is enough to iterate the whole tree and keep the optimal move\n self.Max_Value(gameState, -10000, 10000, self.index, self.depth)\n return self.move\n util.raiseNotDefined()", "def search(self, depth, state, curr_player):\n \n # enumerate all legal moves from this state\n legal_moves = []\n for i in range(7):\n # if column i is a legal move...\n if self.isLegalMove(i, state):\n # make the move in column i for curr_player\n temp = self.makeMove(state, i, curr_player)\n legal_moves.append(temp)\n \n # if this node (state) is a terminal node or depth == 0...\n if depth == 0 or len(legal_moves) == 0 or self.gameIsOver(state):\n # return the heuristic value of node\n return self.value(state, curr_player)\n \n # determine opponent's color\n if curr_player == self.colors[0]:\n opp_player = self.colors[1]\n else:\n opp_player = self.colors[0]\n\n alpha = -99999999\n for child in legal_moves:\n if child == None:\n print(\"child == None (search)\")\n alpha = max(alpha, -self.search(depth-1, child, opp_player))\n return alpha", "def search(self, is_max, possible_moves, state, depth, alpha, beta):\n temp_state = state.deepcopy()\n best_move = None\n best_move_val = float('-inf') if is_max else float('inf')\n \n for move in possible_moves:\n for to in move['to']:\n \n if time() > self.thinking_time:\n return best_move, best_move_val\n \n temp_state.board.move_pawn(move['from'], to)\n temp_state.next_turn()\n _, val = self.minimax(temp_state, not(is_max), depth+1, alpha, beta)\n \n temp_state.board.move_pawn(to, move['from'])\n temp_state.undo_turn()\n \n if is_max and val > best_move_val:\n alpha = max(val, alpha)\n best_move_val = val\n best_move = (move['from'], to)\n \n if not(is_max) and val < best_move_val:\n beta = min(val, beta)\n best_move_val = val\n best_move = (move['from'], to)\n \n if beta <= alpha: #pruning\n return best_move, best_move_val\n \n return best_move, best_move_val", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def make_move(self, state, actions):\n\n # Here we are doing an exploration so don't update scores\n # for selected state\n if random.random() < self._exploration:\n action = random.choice(actions)\n self._last_state = self._get_state_for_action(state, action)\n return action\n\n # So we are not doing exploration, here we want to update it\n max_selection = None\n for a, s in self._states_from_actions(state, actions).items():\n state_value = self._state_values[s]\n if max_selection is None or state_value > self._state_values[max_selection[1]]:\n max_selection = (a, s)\n\n if self._last_state is not None:\n self._update_state_value(self._last_state, max_selection[1])\n\n self._last_state = max_selection[1]\n return max_selection[0]", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def dfs(self, game):\n\n if len(self.state_val) % 1000 == 0:\n print('\\r', len(self.state_val), end='')\n\n curr_state = game.get_state_hash()\n if self.state_val.has_key(curr_state): # already seen this state\n return self.state_val[curr_state]\n\n result = game.check_win()\n\n if result is True: # current player wins\n value = 1. # black wins\n if game.current_player() == 1: # white wins\n value = -value\n self.state_val[curr_state] = value\n #print(game.game_state)\n return value\n \n if result is None: # draw\n value = 0.\n self.state_val[curr_state] = value\n return value\n\n # game continues\n values = []\n for move in game.get_valid_moves():\n if not game.check_move(move): # invalid move (this should never happen)\n continue\n values.append(self.dfs(game))\n game.delete_move() # return to previous state\n\n min_node = game.current_player() == 0 # we explored white moves => we are in min node of minimax tree\n value = 0.\n if min_node:\n value = min(values)\n else:\n value = max(values)\n self.state_val[curr_state] = value\n return value", "def move(self, board):\r\n self.start_time = time.time()\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n if disk_total < 15:\r\n # In early-game, we can allow a deeper minimax search since there's not too many possible moves.\r\n self.minimax_max_depth = 7\r\n\r\n elif disk_total < 45:\r\n # In mid-game, minimax tree has the most branches. Therefore, we must give it space to breathe.\r\n self.minimax_max_depth = 5\r\n else:\r\n # In the very end-game, minimax tree has the least branches, so we can allow a full search.\r\n self.minimax_max_depth = 8\r\n\r\n possible_moves = self.find_possible_moves(board, self.my_color)\r\n\r\n # If there's only one move available, return it\r\n if len(possible_moves) == 1:\r\n return possible_moves[0]\r\n\r\n # If we can take a corner, take it and don't consider any other options.\r\n # This rarely backfires and allows to save a tiny bit of time\r\n corners = [(0,0), (0,7), (7,0), (7,7)]\r\n for corner in corners:\r\n if corner in possible_moves:\r\n return corner\r\n\r\n # Grow a minimax tree to find the best available move\r\n alpha_init = -10000000\r\n beta_init = 10000000\r\n\r\n available_moves = self.minimax(board, 0, self.my_color, alpha_init, beta_init)\r\n print(available_moves)\r\n if available_moves != 0:\r\n best_value = max(available_moves.values())\r\n for move in available_moves:\r\n if available_moves[move] == best_value:\r\n return move\r\n\r\n return None" ]
[ "0.6655577", "0.6577943", "0.6247116", "0.62271196", "0.6220835", "0.6209169", "0.6201025", "0.61390984", "0.6119295", "0.6110233", "0.60752344", "0.6050629", "0.6048138", "0.6048097", "0.60161257", "0.60156405", "0.5996535", "0.59938693", "0.5979693", "0.59731513", "0.5936745", "0.59129137", "0.59056914", "0.5876798", "0.58708864", "0.58601534", "0.5847247", "0.58467436", "0.58341736", "0.57965046" ]
0.8414711
0
This method creates a tree node for the given board position and adds it to the tree structure. It will be linked to the given parent node and the given move is stored as the last move taken to produce the given game state. This is useful for trversing and updating the tree structure when other nodes are added to it.
def create_node(self, game_state, move=None, parent=None): # Pass the game state to the neural network to both evaluate the # how good the board position is and get the prior probability # distribution over possible next moves (ie the predicted distribution # of visit counts). move_priors, value = self.network.predict(game_state) # If a root node is being created, then add some dirichlet noise # to the prior probabilities to help exploration. if parent == None: dirichlet_noise = np.random.dirichlet([self.alpha]*96) for (i, move) in enumerate(move_priors.keys()): move_priors[move] = (move_priors[move] + dirichlet_noise[i])/2 # Create the node for the given game state, with the predicted value # and priors, and attach it to the tree. new_node = TreeNode(game_state, value, move_priors, parent, move) if parent is not None: parent.add_child(move, new_node) return new_node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddChild(self, move, state, isGameOver):\n node = Node(Move = move, state = state, isGameOver = isGameOver, parent = self)\n self.untried_moves.remove(move) # this move is now not available\n self.child.append(node)\n return node", "def create_tree(self, tree):\n # print(self)\n if len(self.available_combinations()) > 1:\n comb1 = random.choice(self.available_combinations())\n comb2 = random.choice(self.available_combinations())\n\n if self.last_move == 5:\n next_move = 7\n else:\n next_move = 5\n\n # print(next_move)\n\n board1 = copy.deepcopy(self)\n board2 = copy.deepcopy(self)\n\n board1.board[comb1[0]][comb1[1]] = next_move\n board1.last_move = 7\n tree.insert_left(board1)\n board2.board[comb2[0]][comb2[1]] = next_move\n board2.last_move = 7\n tree.insert_right(board2)\n\n board1.create_tree(tree.get_left_child())\n board2.create_tree(tree.get_left_child())", "def __init__(self, board_state, parent=None, move=None, path_cost=0):\n self.board_state = board_state\n self.parent = parent\n self.move = move\n self.path_cost = path_cost\n self.depth = 0\n if parent:\n # If a parent node exists increase the current depth by 1\n self.depth = parent.depth + 1", "def _new_tree_node(board, current_turn, side):\n global visited\n node_key = (board.get_hash_value(), current_turn)\n if (node_key not in visited):\n visited.add(node_key)\n treenodes[node_key] = TreeNode(board, current_turn, side)\n return treenodes[node_key]", "def child_node(self, problem, move):\n # Generate the next node\n next_node = problem.result(self.board_state, move)\n # Return a newly created child node with the current node set as its\n # parent\n return Node(next_node, self, move,\n problem.path_cost(self.path_cost, self.board_state, move,\n next_node))", "def expand_child(self, move):\n is_leaf, child_board = self.simulate_move(move)\n child = MCTSNode(board=child_board, score=random.random(), parent=self, is_leaf=is_leaf) #CNN.predict(simulated_board) instead of 0\n self.children.append(child)\n return child", "def constructTree(n, board, player, action, possible_position=None):\r\n max_branch_num = 7\r\n # 限制分支因子,不考虑过多局面\r\n node = Node(player=player, action=action)\r\n successors = []\r\n if possible_position == None:\r\n # 没有给定可选位置,我们自己现场计算\r\n logDebug(\"Point 1.\")\r\n possible_position = possible_pos(board)\r\n logDebug(\"Point 2.\")\r\n if possible_position == None:\r\n # 真的没有可选位置,😅\r\n return None\r\n\r\n is_critical = critical(board)\r\n new_board = copy.deepcopy(board)\r\n if is_critical == '4_1':\r\n # 我方四子连珠\r\n for pos in possible_position:\r\n new_board[pos[0]][pos[1]] = player\r\n new_special_pattern = is_special_pattern(board = new_board, player = player)\r\n old_special_pattern = is_special_pattern(board = board, player = player)\r\n if new_special_pattern[\"H4\"] != old_special_pattern[\"H4\"] or new_special_pattern[\"C4\"] != old_special_pattern[\"C4\"]:\r\n node = Node(player=player, action=action)\r\n successors = []\r\n successors.append(Node(player = 3-player, isLeaf = True, value = 1000000000, action = pos))\r\n # action 是到达这个节点,我们落子的位置\r\n node.successor = successors\r\n return node\r\n \r\n top_position = []\r\n\r\n if n == 1:\r\n # 树的深度只有一层\r\n if len(possible_position) < max_branch_num:\r\n for pos in possible_position:\r\n # :pos: 坐标 (x, y)\r\n # :prob_position: 可选位置,坐标的列表\r\n copy_board = copy.deepcopy(board)\r\n # 棋盘当前状态的拷贝(或许可以直接用深拷贝拷贝列表,不用一个一个位置去循环)\r\n copy_board[pos[0]][pos[1]] = player\r\n # 在当前位置放置当前棋手的棋子\r\n # player == 1 or 2\r\n temp_value = board_score(copy_board)\r\n # :util::board_evaluation:返回当前整个棋局的评分\r\n # 大评分对我们好,小评分对对方好\r\n # print temp_value\r\n # successors.append(Node(player=3-player, isLeaf=True, value=board_evaluation(board_copy), action=pos))\r\n successors.append(Node(player=3 - player, isLeaf=True, value=temp_value, action=pos))\r\n # 一层搜索树,下一个节点就是叶节点\r\n # player = 3 - player 完成棋手轮换\r\n # TODO: need to delete\r\n else:\r\n # 如果分支因子过大,只考虑落子后局面最好的前k个\r\n for pos in possible_position:\r\n board_copy = copy.deepcopy(board)\r\n board_copy[pos[0]][pos[1]] = player\r\n temp_value = board_score(board_copy)\r\n # :util::board_evaluation: 返回当前整个棋局的评分\r\n top_position.append(temp_value)\r\n temp = copy.deepcopy(top_position[:])\r\n # deepcopy\r\n temp.sort(reverse=True)\r\n # 从大到小排列\r\n for v in temp[0:max_branch_num]:\r\n pos = possible_position[top_position.index(v)]\r\n successors.append(Node(player=3 - player, isLeaf=True, value=v, action=pos))\r\n # 一层,后继节点是叶节点\r\n\r\n else:\r\n # 多层搜索树🌲\r\n if len(possible_position) < max_branch_num:\r\n # i = 0\r\n for pos in possible_position:\r\n # i += 1\r\n # print pos, 'else called', i\r\n copy_board = copy.deepcopy(board)\r\n copy_board[pos[0]][pos[1]] = player\r\n # print board_copy\r\n successors.append(constructTree(n-1, copy_board, 3-player, pos, update_possible_pos(pos, possible_position)))\r\n # 递归的调用\r\n else:\r\n for pos in possible_position:\r\n board_copy = copy.deepcopy(board)\r\n board_copy[pos[0]][pos[1]] = player\r\n top_position.append(board_score(board_copy))\r\n temp = copy.deepcopy(top_position[:])\r\n temp.sort(reverse=True)\r\n for v in temp[0:max_branch_num]:\r\n pos = possible_position[top_position.index(v)]\r\n copy_board = copy.deepcopy(board)\r\n copy_board[pos[0]][pos[1]] = player\r\n successors.append(constructTree(n - 1, copy_board, 3 - player, pos, update_possible_pos(pos, possible_position)))\r\n node.successor = successors\r\n return node", "def makeMove(self, board, move):\n\t\trotation, this_board = self.__getNormalizedAndRotatedBoard(board)\n\t\tthis_state = TicTacToeHelper.serializeBoard(this_board)\n\n\t\tthis_move = TicTacToeHelper.rotateMove(move, rotation)\n\n\t\tself.__state_history.append((this_state, this_move))", "def move(self, move: tuple) -> None:\n if move in self.root.children:\n child = self.root.children[move]\n child.parent = None\n self.root = child\n self.root_state.play(child.move)\n return\n\n # if for whatever reason the move is not in the children of\n # the root just throw out the tree and start over\n self.root_state.play(move)\n self.root = Node()", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def move(self, position):\n if self.win:\n raise ValueError('Game finished!')\n\n if self.board[position] != Board.CELL_EMPTY:\n raise ValueError('Cell not empty')\n\n self.board[position] = self.player\n self.__root = BinaryNode(self.board, position, self.player)\n\n self.change_player()", "def insert(self, session: sqlalchemy.orm.session.Session, recursive: bool = True,\n parent_id: Optional[int] = None, use_progress_bar: bool = True,\n progress_bar: Optional[tqdm.tqdm] = None) -> ORMTaskTreeNode:\n if use_progress_bar:\n if not progress_bar:\n progress_bar = tqdm.tqdm(desc=\"Inserting TaskTree into database\", leave=True, position=0,\n total=len(self) if recursive else 1)\n\n # insert code\n code = self.code.insert(session)\n\n # convert self to orm object\n node = self.to_sql()\n node.code = code.id\n\n # get and set metadata\n metadata = MetaData().insert(session)\n node.metadata_id = metadata.id\n\n # set parent to id from constructor\n node.parent = parent_id\n\n # add the node to database to retrieve the new id\n session.add(node)\n session.commit()\n\n if progress_bar:\n progress_bar.update()\n\n # if recursive, insert all children\n if recursive:\n [child.insert(session, parent_id=node.id, use_progress_bar=use_progress_bar, progress_bar=progress_bar)\n for child in self.children]\n\n return node", "def add_node(node, parent, card_names):\n # First create the new node and append it to its parent's children\n new_node = dict(children=[], hierarchy=1, distance=node.dist)\n # Append the name only if the node is a leaf\n if node.id < len(card_names):\n new_node.update(name=card_names[node.id])\n\n parent['children'].append(new_node)\n\n for child in parent['children']:\n if child['hierarchy'] >= parent['hierarchy']:\n parent['hierarchy'] = child['hierarchy'] + 1\n\n # Recursively add the current node's children\n if node.left:\n add_node(node.left, new_node, card_names)\n if node.right:\n add_node(node.right, new_node, card_names)", "def create_node(self, name, parent):\n\n try:\n node = self.map[name]\n return node\n except:\n node = Node(name,parent=parent.name)\n parent.children.add(node)\n\n node.parent = parent.name\n\n self.map[name] = node\n\n return node", "def make_move(the_board, color):\n root = Node(the_board, None, color)\n value = max_value(root, -math.inf, math.inf, 5, time.time())\n child = root.get_child_with_value(value)\n if child is None:\n return (-1,-1)\n else:\n return child.get_move()", "def select_move(self, game_state, return_visit_counts=False):\n \n # Start with a tree consisting of a root node only. The root node\n # is associated with the given board position.\n root = self.create_node(game_state)\n \n # If no legal moves can be made from the given board position, pass \n # the turn. This happens when all of the players pieces are surrounded,\n # if the player has no pieces left or if the game is over. \n if not root.branches:\n if return_visit_counts:\n return Act.pass_turn(), {}\n return Act.pass_turn()\n \n for i in range(self.num_rounds):\n # On each iteration, walk down the tree to a leaf node and select\n # a move to make from the corresponding leaf game state.\n node = root\n next_move = self.select_branch(node)\n while node.has_child(next_move):\n node = node.get_child(next_move)\n next_move = self.select_branch(node)\n \n # Create a new tree node for the selected move and add it to\n # the tree. If the leaf node corresponds to a finished game\n # then don't create a new node and assign a value to the node\n # based on who won.\n if node.state.is_not_over():\n if next_move:\n new_state = copy.deepcopy(node.state)\n new_state.take_turn_with_no_checks(Act.play(next_move))\n child_node = self.create_node(new_state, \n move=next_move, parent=node)\n move = next_move\n value = -1 * child_node.value \n else:\n # If the current player can't make any moves from the\n # selected gamestate then next_move will be 'None' meaning\n # the player passes the turn.\n new_state = copy.deepcopy(node.state)\n new_state.take_turn_with_no_checks(Act.pass_turn())\n child_node = self.create_node(new_state, \n move=next_move, parent=node)\n move = next_move\n value = -1 * child_node.value\n else:\n # If the game in the current state is over, then the last\n # player must have won the game. Thus the value/reward for the\n # other player is 1. The current node is not updated with\n # the new reward as no branches can stem from a finished game\n # state.\n move = node.last_move\n node = node.parent\n value = 1\n \n # Update the nodes traversed to get to the leaf node with the \n # new value for the new move.\n while node is not None:\n node.record_visit(move, value)\n move = node.last_move\n node = node.parent\n value *= -1\n \n # Get the visit counts of the branches if they were requested.\n if return_visit_counts:\n visit_counts = {}\n for move in root.branches.keys():\n visit_counts[move] = root.branches[move].visit_count\n \n # Get a list of possible moves sorted according to visit count,\n # the move with the highest visit count should be first in the list.\n moves = [move for move in root.moves()]\n moves = sorted(moves, key=root.visit_count, reverse=True)\n \n # Loop through the sorted moves and return the first legal one.\n for move in moves:\n if not game_state.is_move_illegal(move):\n if return_visit_counts:\n return Act.play(move), visit_counts\n return Act.play(move)\n \n # If no legal move is found then pass the turn.\n if return_visit_counts:\n return Act.pass_turn(), visit_counts\n return Act.pass_turn()", "def make_move(self, move, player):\n if not self.test_valid_move( move):\n return False\n self.game_state[move[0]][move[1]] = player", "def expand(self):\n\t\tfor move in self.moves:\n\t\t\tm = self.Game.create_move(self.State, move.row, move.column, move.shift, False)\n\t\t\tchildstate = self.Game.apply_move(copy.deepcopy(self.State), m)\n\t\t\tchild = GameNode(self.Game, m, childstate, self.Game.get_moves(childstate), self)\n\t\t\tself.addChild(child)", "def __init__(self, move: tuple = None, parent: object = None):\n self.move = move\n self.parent = parent\n self.N = 0 # times this position was visited\n self.Q = 0 # average reward (wins-losses) from this position\n self.Q_RAVE = 0 # times this move has been critical in a rollout\n self.N_RAVE = 0 # times this move has appeared in a rollout\n self.children = {}\n self.outcome = GameMeta.PLAYERS['none']", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def generate_new_node(self, parent, rand_node):\n dist = np.linalg.norm(parent.state - rand_node.state)\n if dist < self.Delta: # In case rand_node is very close to parent\n new_state = rand_node.state\n else:\n new_state = parent.state + (rand_node.state - parent.state) / dist * self.Delta\n new_node = Node(new_state)\n return new_node", "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def update_with_move(self, last_move):\n if last_move in self._root._children:\n self._root = self._root._children[last_move]\n self._root._parent = None\n else:\n self._root = TreeNode(None, 1.0)", "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)", "def make_move(board, position, player):\n # only valid moves are passed in here\n board[position-1] = player", "def expand(self, uct):\n state = uct.my_board\n if state.game_over:\n winner = state.judge()\n reward = 1 if winner == state.side_color else -1\n self.backup(uct, reward, winner) # 操蛋这里的winner之前写成STATE.SIDE_COLOR了\n moves = state.generate_moves(uct.state.side_color) # 可以与神经网络输出并行\n\n children = uct.my_children\n if not moves:\n node = UCT() # if it has no move to go, then create a fake node which just change the color\n node.initialize_state(uct.state) # also copy the board history and occupied discs\n node.state.turn_color()\n node.parent_action = None\n node.parent = uct\n\n node.psa = 1 # pass for sure\n children.append(node)\n else:\n for move in moves:\n node = UCT()\n curr_state = uct.state\n node.initialize_state(curr_state)\n new_state = node.state\n new_state.disc_place(curr_state.side_color, x=move[0], y=move[1])\n new_state.turn_color()\n node.parent_action = move\n node.parent = uct\n children.append(node)\n # return self.choose_best_child(uct=uct)" ]
[ "0.7166232", "0.7162426", "0.6847326", "0.6512595", "0.6462386", "0.6367318", "0.6355977", "0.6332234", "0.6067168", "0.6048204", "0.6035471", "0.60014105", "0.59406286", "0.5914846", "0.5905294", "0.5870249", "0.5810087", "0.57710886", "0.57569855", "0.57233304", "0.5707177", "0.57039803", "0.5695668", "0.5695668", "0.5695668", "0.5695668", "0.5687319", "0.5667411", "0.56441164", "0.56428885" ]
0.7931713
0
This method evaluates the current bot against a given opponent bot by letting them play a number of games against each other. The number of games played is specified by 'num_games'. A random starting position for the games is generated if a maximum number of white and black pieces is given by the parameters 'num_white_pieces' and 'num_black_pieces', otherwise the regular starting position is used. If the number of turns taken in a game exceeds the given maximum, then the game ends and drawn up as a win for the opponent bot.
def evaluate_against_bot(self, opponent_bot, num_games, num_white_pieces = None, num_black_pieces = None, max_num_of_turns = 1000): zero_bot_player = 1 score = 0 num_games_won_as_black = 0 num_games_won_as_white = 0 # Play 'num_games' games of brandubh for i in range(num_games): print('\rPlaying game {0}, score: w = {1}, b = {2}.'.format(i, num_games_won_as_white, num_games_won_as_black),end='') # If a maximum number of white or black pieces is given, then # use a random starting position for the game. if num_white_pieces or num_black_pieces: starting_board = random_starting_position(num_white_pieces, num_black_pieces) game = GameState.new_game(starting_board) else: game = GameState.new_game() # Get both bots to play a game of brandubh. turns_taken = 0 while game.is_not_over() and turns_taken < max_num_of_turns: if game.player == zero_bot_player: action = self.select_move(game) else: action = opponent_bot.select_move(game) game.take_turn_with_no_checks(action) turns_taken += 1 # At the end of the game, increment counts keeping track of how # many games the current bot won against the opponent bot and # get the bots to switch sides for the next game. if turns_taken < max_num_of_turns: score += zero_bot_player*game.winner if zero_bot_player == game.winner: if zero_bot_player == 1: num_games_won_as_white += 1 else: num_games_won_as_black += 1 zero_bot_player *= -1 else: score -= 1 zero_bot_player *= -1 print(' done.') # Return the evaluation score of the bot along with fraction of games # won as black/white, the total number of games and the number of # epochs the bot has trained for before being evaluated. return [score/num_games, 2*num_games_won_as_white/num_games, 2*num_games_won_as_black/num_games, num_games, len(self.loss_history)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_against_rand_bot(self, num_games, \n num_white_pieces = None, \n num_black_pieces = None):\n print('Evaluating against random bot')\n results = self.evaluate_against_bot(self.rand_bot, num_games,\n num_white_pieces, \n num_black_pieces)\n self.evaluation_history_ran.append(results)", "def play(self, board, opponent):\n\t\t\n\t\tstart_time = time.time()\n\t\t\n\t\tdef simulate(board, is_opponent_turn, depth, opponent, start_time, a, b):\n\t\t\t\"\"\"\n\t\t\tRecursion function computing the score of a given move.\n\t\t\t\n\t\t\tThe opponent boolean argument determines if the following turn is\n\t\t\tplayed by the opponent or not.\n\t\t\t\"\"\"\n\t\t\t\n\t\t\tif is_opponent_turn:\n\t\t\t\tplayer = opponent\n\t\t\telse:\n\t\t\t\tplayer = self\n\t\t\t\t\n\t\t\tpossible_moves = player._possible_moves(board)\n\t\t\tmove_scores = {}\n\t\t\t\n\t\t\tif not possible_moves:\n\t\t\t\t# The score is infinite, with the sign corresponding to the\n\t\t\t\t# winning color\n\t\t\t\tscore = (board.count(player.color) - board.count(-player.color)) * inf\n\t\t\t\tif isnan(score):\n\t\t\t\t\tscore = 0\n\t\t\t\treturn None, score, None\n\t\t\t\n\t\t\tfor move, mod_sq in possible_moves.items():\n\t\t\t\t# We compute the next board state\n\t\t\t\tnext_board = player._simulate_move(board, move, mod_sq)\n\t\t\t\t\n\t\t\t\tif depth == 1:\n\t\t\t\t\t# If the depth is 1, we use the evaluation function\n\t\t\t\t\tmove_scores.update(\n\t\t\t\t\t\t\t{move: (player.eval_board(next_board), mod_sq)})\n\t\t\t\telse:\n\t\t\t\t\t# Otherwise, we iterate the recursion function again\n\t\t\t\t\t_, score, _ = simulate(next_board, not is_opponent_turn, depth - 1, opponent, start_time, a, b)\n\t\t\t\t\tmove_scores.update({move: (score, mod_sq)})\n\t\t\t\t\t\n\t\t\t\t\t# Alpha-beta pruning\n\t\t\t\t\tif is_opponent_turn:\n\t\t\t\t\t\ta = max(a, score)\n\t\t\t\t\t\tif b <= a:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tb = min(b, score)\n\t\t\t\t\t\tif b <= a:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\tif time.time() - start_time > self.time_out and self.time_out:\n\t\t\t\t\t# If there is a non-zero timeout, that has been reached,\n\t\t\t\t\t# the search is stopped\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t# We take the move with the highest score if it is the\n\t\t\t# opponent's turn, otherwise the lowest\n\t\t\tif is_opponent_turn:\n\t\t\t\t# If several moves have the same score, we choose one\n\t\t\t\t# randomly between them\n\t\t\t\tmax_score = max(move_scores.values(), key=lambda e: e[0])[0]\n\t\t\t\tmax_values = [(mv, sc, sq) for mv, (sc, sq) in move_scores.items()\n\t\t\t\t\t\t\t if sc == max_score]\n\t\t\t\treturn random.choice(max_values)\n\t\t\t\n\t\t\telse:\n\t\t\t\tmin_score = min(move_scores.values(), key=lambda e: e[0])[0]\n\t\t\t\tmin_values = [(mv, sc, sq) for mv, (sc, sq) in move_scores.items()\n\t\t\t\t\t\t\t if sc == min_score]\n\t\t\t\treturn random.choice(min_values)\n\t\t\n\t\tbest_move, _, mod_sq = simulate(board, False, self.max_depth, opponent, start_time, -inf, inf)\n\t\t\n\t\treturn best_move, mod_sq", "def play(self):\n print(\"Board size: {}x{} with {} games using pieces: {}\".format(self.size[0], self.size[1], self.num_games, self.pieces))\n print(\"Player 1 using layout '{}' and play strategy '{}'\".format(self.layouts[0], self.plays[0]))\n print(\"Player 2 using layout '{}' and play strategy '{}'\".format(self.layouts[1], self.plays[1]))\n print(\"Running...\")\n self.start_time = time.time()\n\n for game in range(self.num_games):\n if self.verbose: print(\"Playing game {}:\".format(game))\n players = (Player(\"Player 1\", self.size[0], self.size[1], self.pieces, self.layouts[0], self.plays[0], self.verbose),\n Player(\"Player 2\", self.size[0], self.size[1], self.pieces, self.layouts[1], self.plays[1], self.verbose))\n\n finished = False\n game_round = 0\n\n while not finished:\n game_round += 1\n for i in range(2):\n player = players[i]\n opponent = players[0] if i == 1 else players[1]\n\n attack_pos = player.get_next_attack()\n player.set_attack_result(attack_pos, *opponent.is_hit(attack_pos))\n\n if opponent.is_player_dead() is True:\n self.wins[i] += 1\n self.tries[i] += game_round\n finished = True\n if self.verbose: print(\"Player {} won the game on round {}\\n\".format(i+1, game_round))\n break", "def playGames(self, num, verbose=False):\n\n num = int(num / 2)\n prevWon = 0\n newWon = 0\n draws = 0\n for _ in tqdm(range(num), desc=\"Arena.playGames (1)\"):\n gameResult = self.playGame(verbose=verbose)\n if gameResult == 1:\n prevWon += 1\n elif gameResult == -1:\n newWon += 1\n else:\n draws += 1\n\n self.landlord, self.farmers = self.farmers, self.landlord\n\n for _ in tqdm(range(num), desc=\"Arena.playGames (2)\"):\n gameResult = self.playGame(verbose=verbose)\n if gameResult == -1:\n prevWon += 1\n elif gameResult == 1:\n newWon += 1\n else:\n draws += 1\n\n return prevWon, newWon, draws", "def computer_vs_computer(number_of_games, print_board=False):\n games_won = 0\n games_lost = 0\n games_tied = 0\n\n for i in range(number_of_games):\n board = Board(3)\n players = [YellowJ('X', ['O']), RandomPlayer('O', ['X'])]\n \n if print_board:\n print('GAME', (i+1))\n\n while not board.game_over():\n player_to_move = players[board.get_number_of_moves()%2]\n move = player_to_move.move(board)\n\n board.move(int(move[0]), int(move[1]), player_to_move.get_piece())\n if print_board:\n board.print_board()\n\n if board.game_over() and board.winner_found():\n winner = players[(board.get_number_of_moves()+1)%2]\n \n if print_board:\n print('%s won' % (winner.get_piece()))\n\n if winner.get_piece() == 'X':\n games_won += 1\n else:\n games_lost += 1\n else:\n games_tied += 1\n\n if print_board:\n print()\n print('-------')\n\n\n print('%d - Games Won' % (games_won))\n print('%d - Games Lost' % (games_lost))\n print('%d - Games Tied' % (games_tied))", "def playGames(self, num, sonete,secuence,mcts,verbose=False):\n eps_time = AverageMeter()\n bar = Bar('Arena.playGames', max=num)\n end = time.time()\n eps = 0\n maxeps = int(num)\n finalScore1=0\n finalScore2=0\n\n num = int(num/2)\n oneWon = 0\n twoWon = 0\n draws = 0\n gameResults=[]\n global nround\n actions=[]\n self.player1, self.player2 = self.player1, self.player1\n board = self.game.getInitBoard()\n for i in range(100):\n nround = i\n #action,sonete = self.playGame(sonete,sequences,nround,verbose=verbose)\n pi = mcts.getActionProb(sonete, temp=1)\n #actions.append(action)\n\n eps_time.update(time.time() - end)\n end = time.time()\n\n\n return actions#finalScore1, finalScore2#oneWon, twoWon, draws", "def test_play_game_hard(self):\r\n wins = [0,0,0]\r\n\r\n for i in range(1,10):\r\n a_player_1_id = 1\r\n a_player_2_id = 2\r\n a_players = [RandomPlayer(a_player_1_id), RandomPlayer(a_player_2_id)]\r\n a_x_dist = i\r\n a_y_dist = i\r\n a_num_to_win = 3\r\n a_game = Game(a_players,a_x_dist,a_y_dist,a_num_to_win)\r\n a_game.play_game()\r\n\r\n wins[a_game.winner] += 1\r\n\r\n print(wins)", "def play_board(bots, n_iter=25, grid_size=5, sleep_time=0.3):\n \n food_icon = chr(1160)\n counter = grid_size * 4\n food_list = []\n # finds positions for food \n while counter is not 0:\n food_list.append([random.randrange(grid_size),\n random.randrange(grid_size)])\n counter -= 1\n \n # If input is a single bot, put it in a list so that procedures work\n if not isinstance(bots, list):\n bots = [bots]\n \n # Update each bot to know about the grid_size they are on\n for bot in bots:\n bot.grid_size = grid_size\n\n for it in range(n_iter):\n\n # Create the grid\n grid_list = [['.'] * grid_size for ncols in range(grid_size)]\n \n # bots will eat food if in same location\n eat(bots, food_list)\n \n # Add bot(s) to the grid\n for bot in bots:\n grid_list[bot.position[0]][bot.position[1]] = bot.character \n \n # Add food to the grid\n for food_loc in food_list:\n grid_list[food_loc[0]][food_loc[1]] = food_icon\n\n \n # Clear the previous iteration, print the new grid, and wait\n clear_output(True)\n print('\\n'.join([' '.join(lst) for lst in grid_list]))\n sleep(sleep_time)\n\n # Update bot position(s) for next turn\n for bot in bots:\n bot.move()", "def play_multiple_games(players, num_games=10, seed=2):\n total_games_winners = {}\n for player in players:\n if player.name not in total_games_winners:\n total_games_winners[player.name] = 0\n random.seed(seed)\n for game in range(num_games):\n print('-------- Game', game, '--------')\n random.shuffle(players)\n print('Initial game positions: ', players, '\\n')\n if all(x > 1 for x in [p.amount for p in players]):\n rotation_winners = play_multiple_rotations(players)\n for player_name in total_games_winners:\n total_games_winners[player_name] += rotation_winners[player_name]\n print()\n # print('Final Win Count: ', total_games_winners)\n print(players)", "def bot():\n table = [ \n \"-\", \"-\", \"-\", \n \"-\", \"-\", \"-\", \n \"-\", \"-\", \"-\", \n ]\n choices = choice()\n turn = [0,1,2,3,4,5,6,7,8]\n \n while len(turn) != 0:\n \n # Player1 turn\n move_index, turn = table_check(table, turn) # Check if the index is valid\n table[move_index] = choices[0] # Fill X or O to the table base on the index chosen\n display_board(table) # Display to let them see for 2nd player's turn\n\n # The game cannot be won unless 5 moves has been played, so when turn has been reduced to 4 moves or less, check win\n # Check win before tie since last move might make it a win\n if len(turn) <= 4:\n win_condition, win = win_check(table)\n if win_condition == True:\n print(f\"\\nYou won!!!\\nThanks for playing!\")\n retry()\n\n # \"X\" will be the one who finish the game, so after filling the X into the table\n # We need to check if it's the last turn, if yes than break\n if len(turn) == 0:\n break\n \n # Bot's turn\n move_index = random.choice(turn) # Bot moves can just be chosen randomly from the\n turn.remove(move_index) # available moves from turn, so doesnt need to table_check()\n table[move_index] = choices[1] # Fill X or O to the table base on the index chosen\n print(\"Bot is thinking....\")\n time.sleep(random.randint(1,2)) # Make it realistic\n\n # The game cannot be won unless 5 moves has been played, so when turn has been reduced to 4 moves or less, check win\n if len(turn) <= 4:\n win_condition, win = win_check(table)\n if win_condition == True:\n display_board(table)\n print(f\"The bot won!!!\\nThanks for playing!\")\n retry()\n\n\n print(\"\\nDRAW!\")\n retry()", "def evaluate_against_old_bot(self, num_games,\n num_white_pieces = None, \n num_black_pieces = None,\n prefix=\"model_data/old_bot/\"):\n print('Evaluating against old bot')\n old_bot = ZeroBot(1)\n old_bot.load_old_bot(prefix)\n results = self.evaluate_against_bot(old_bot, num_games,\n num_white_pieces, \n num_black_pieces)\n self.evaluation_history_old.append(results)", "def play_game(plus_player_func, minus_player_func, board_size=5, winning_length=4, log=False):\n board_state = _new_board(board_size)\n player_turn = 1\n\n while True:\n _available_moves = list(available_moves(board_state))\n if len(_available_moves) == 0:\n # draw\n if log:\n print(\"no moves left, game ended a draw\")\n return 0.\n if player_turn > 0:\n move = plus_player_func(board_state, 1)\n else:\n move = minus_player_func(board_state, -1)\n\n if move not in _available_moves:\n # if a player makes an invalid move the other player wins\n if log:\n print(\"illegal move \", move)\n return -player_turn\n\n board_state = apply_move(board_state, move, player_turn)\n print(board_state)\n\n winner = has_winner(board_state, winning_length)\n if winner != 0:\n if log:\n print(\"we have a winner, side: %s\" % player_turn)\n return winner\n player_turn = -player_turn", "def test_play_game(self):\r\n\r\n \r\n a_players = [RandomPlayer(1), RandomPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n #Game is played to competion\r\n a_game.play_game()\r\n\r\n a_history = a_game.get_history()\r\n\r\n #Go through each move and check to be sure it's valid\r\n for i in range(1,len(a_history)):\r\n #Get copy of the board\r\n prev_board = a_history[i-1]\r\n cur_board = a_history[i]\r\n\r\n #Check if the board chosen is in valid states\r\n self.assertTrue(cur_board in prev_board.get_states(a_players[0].get_id()) or cur_board in prev_board.get_states(a_players[1].get_id()),\\\r\n \"An invalid board state was added to the history\")\r\n\r\n if i == len(a_history) - 1:\r\n self.assertTrue(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())\r\n else: \r\n self.assertFalse(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())", "def play(self):\n if self.rounds == 0:\n # When the game has not begun yet, the game must\n # give the players their pieces and a corner to start.\n max_x = ((self.board).size[1] - 1)\n max_y = ((self.board).size[0] - 1)\n starts = [(0, 0), (max_y, max_x), (0, max_x), (max_y, 0)]\n\n for i in xrange(len(self.players)):\n (self.players[i]).add_pieces(self.all_pieces)\n (self.players[i]).start_corner(starts[i])\n\n # if there is no winner, print out the current player's name and\n # let current player perform a move\n if self.winner() == \"None\":\n current = self.players[0]\n print\n \"Current player: \" + current.name\n proposal = current.do_move(self)\n if proposal == None:\n # move on to next player, increment rounds\n first = (self.players).pop(0)\n self.players = self.players + [first]\n self.rounds += 1\n\n\n # ensure that the proposed move is valid\n elif self.valid_move(current, proposal.points):\n # update the board with the move\n (self.board).update(current, proposal.points)\n # let the player update itself accordingly\n current.update_player(proposal, self.board)\n # remove the piece that was played from the player\n current.remove_piece(proposal)\n # place the player at the back of the queue\n first = (self.players).pop(0)\n self.players = self.players + [first]\n # increment the number of rounds just played\n self.rounds += 1\n\n # interrupts the game if an invalid move is proposed\n else:\n raise Exception(\"Invalid move by \" + current.name + \".\")\n\n else:\n print\n \"Game over! And the winner is: \" + self.winner()", "def play_strategic_game():\n board, winner = create_board(), 0\n board[1,1] = 1\n while winner == 0:\n for player in [2,1]:\n board = random_place(board, player)\n winner = evaluate(board)\n if winner != 0:\n break\n return winner", "def check_boards(self):\n succesful = True\n marker = self.game.player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != -10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n marker = self.game.ai_player\n print(f\"-----Starting check_winning_boards-----\")\n winning_boards= [\n [\n [marker]*3,\n [\" \"]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [marker]*3,\n [\" \"]*3\n ],\n [\n [\" \"]*3,\n [\" \"]*3,\n [marker]*3\n ],\n [\n [marker, \" \", \" \"],\n [marker, \" \", \" \"],\n [marker, \" \", \" \"]\n ],\n [\n [\" \",marker, \" \"],\n [\" \",marker, \" \"],\n [\" \",marker, \" \"]\n ],\n [\n [\" \", \" \",marker],\n [\" \", \" \",marker],\n [\" \", \" \",marker]\n ],\n [\n [marker, \" \", \" \"]\n ,[\" \", marker,\" \"],\n [\" \", \" \",marker]\n ],\n [\n [\" \", \" \", marker],\n [\" \",marker, \" \"],\n [marker, \" \", \" \"]\n ]\n ]\n for board in winning_boards:\n if self.game.check_win_conditions(board) != 10:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n \n tie_boards = [\n [ \n [\"O\",\"O\",\"X\"],\n [\"X\",\"O\",\"O\"],\n [\"X\",\"X\",\" \"]\n ],\n [\n [\"O\",\"X\",\" \"],\n [\" \",\"X\",\" \"],\n [\" \",\"O\",\" \"]\n ],\n [\n ['O', 'O', 'X'],\n ['X', 'X', 'O'],\n ['O', 'O', 'X']\n ]\n ]\n for board in tie_boards:\n if self.game.check_win_conditions(board) != 0:\n succesful = False\n print(f\"board failed checkWins \\n{board}\")\n\n print(f\"-----Ending check_winning_boards-----\")", "def play_games(binary, map_width, map_height, bot_commands, number_of_runs):\n print(\"Comparing Bots!\")\n result = {}\n if not(len(bot_commands) == 4 or len(bot_commands) == 2):\n raise IndexError(\"The number of bots specified must be either 2 or 4.\")\n for current_run in range(0, number_of_runs):\n match_output = _play_game(binary, map_width, map_height, bot_commands)\n winner = _determine_winner(match_output)\n result[winner] = result.setdefault(winner, 0) + 1\n print(\"Finished {} runs.\".format(current_run + 1))\n print(\"Win Ratio: {}\".format(result))", "def simulate(board, is_opponent_turn, depth, opponent, start_time, a, b):\n\t\t\t\n\t\t\tif is_opponent_turn:\n\t\t\t\tplayer = opponent\n\t\t\telse:\n\t\t\t\tplayer = self\n\t\t\t\t\n\t\t\tpossible_moves = player._possible_moves(board)\n\t\t\tmove_scores = {}\n\t\t\t\n\t\t\tif not possible_moves:\n\t\t\t\t# The score is infinite, with the sign corresponding to the\n\t\t\t\t# winning color\n\t\t\t\tscore = (board.count(player.color) - board.count(-player.color)) * inf\n\t\t\t\tif isnan(score):\n\t\t\t\t\tscore = 0\n\t\t\t\treturn None, score, None\n\t\t\t\n\t\t\tfor move, mod_sq in possible_moves.items():\n\t\t\t\t# We compute the next board state\n\t\t\t\tnext_board = player._simulate_move(board, move, mod_sq)\n\t\t\t\t\n\t\t\t\tif depth == 1:\n\t\t\t\t\t# If the depth is 1, we use the evaluation function\n\t\t\t\t\tmove_scores.update(\n\t\t\t\t\t\t\t{move: (player.eval_board(next_board), mod_sq)})\n\t\t\t\telse:\n\t\t\t\t\t# Otherwise, we iterate the recursion function again\n\t\t\t\t\t_, score, _ = simulate(next_board, not is_opponent_turn, depth - 1, opponent, start_time, a, b)\n\t\t\t\t\tmove_scores.update({move: (score, mod_sq)})\n\t\t\t\t\t\n\t\t\t\t\t# Alpha-beta pruning\n\t\t\t\t\tif is_opponent_turn:\n\t\t\t\t\t\ta = max(a, score)\n\t\t\t\t\t\tif b <= a:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tb = min(b, score)\n\t\t\t\t\t\tif b <= a:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\tif time.time() - start_time > self.time_out and self.time_out:\n\t\t\t\t\t# If there is a non-zero timeout, that has been reached,\n\t\t\t\t\t# the search is stopped\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t# We take the move with the highest score if it is the\n\t\t\t# opponent's turn, otherwise the lowest\n\t\t\tif is_opponent_turn:\n\t\t\t\t# If several moves have the same score, we choose one\n\t\t\t\t# randomly between them\n\t\t\t\tmax_score = max(move_scores.values(), key=lambda e: e[0])[0]\n\t\t\t\tmax_values = [(mv, sc, sq) for mv, (sc, sq) in move_scores.items()\n\t\t\t\t\t\t\t if sc == max_score]\n\t\t\t\treturn random.choice(max_values)\n\t\t\t\n\t\t\telse:\n\t\t\t\tmin_score = min(move_scores.values(), key=lambda e: e[0])[0]\n\t\t\t\tmin_values = [(mv, sc, sq) for mv, (sc, sq) in move_scores.items()\n\t\t\t\t\t\t\t if sc == min_score]\n\t\t\t\treturn random.choice(min_values)", "def next_game(self, winner):\n self.winner_list_temp.append(winner)\n if not self.opponents_queue:\n self.winner_list.append(self.winner_list_temp)\n self.opponents_queue = update_opponents(self.winner_list_temp, self.waiting_players)\n self.tournament_depth += 1\n self.winner_list_temp = []\n if self.winner_list:\n if ((len(self.start_player_list) == 3) & (self.tournament_depth == 2)) | \\\n ((len(self.start_player_list) == 4) & (self.tournament_depth == 2)) | \\\n ((len(self.start_player_list) == 5) & (self.tournament_depth == 3)) | \\\n ((len(self.start_player_list) == 6) & (self.tournament_depth == 3)) | \\\n ((len(self.start_player_list) == 7) & (self.tournament_depth == 4)) | \\\n ((len(self.start_player_list) == 8) & (self.tournament_depth == 3)):\n self.winner_state = 1\n self.opponents = []\n return ()\n if len(self.winner_list[self.tournament_depth - 1]) == 3:\n player1 = self.winner_list[self.tournament_depth - 1][0]\n player2 = self.winner_list[self.tournament_depth - 1][1]\n self.waiting_players.append(self.winner_list[self.tournament_depth - 1][-1])\n self.opponents_queue = [[player1, player2]]\n if (len(self.winner_list[self.tournament_depth - 1]) == 1) & (len(self.winner_list) == 2) & (len(self.start_player_list) != 4):\n player1 = self.winner_list[self.tournament_depth - 1][0]\n player2 = self.winner_list[0][2]\n self.opponents_queue = [[player1, player2]]\n if (len(self.waiting_players) % 2 == 1) & (len(self.start_player_list) == 3):\n self.waiting_players.remove(self.opponents[0])\n if (len(self.waiting_players) == 2) & (len(self.start_player_list) == 5):\n self.waiting_players.remove(self.opponents_queue[1][0])\n self.waiting_players.remove(self.opponents_queue[1][1])\n self.all_opponents.append(self.opponents_queue.copy())\n self.opponents = self.opponents_queue[0]\n self.opponents_queue.remove(self.opponents)", "def start(self):\n with self.players['w'], self.players['b']:\n\n game = 0\n\n while game < self.num_games:\n\n # Print info.\n print \"Game %d - %s [%s] (White) VS: %s [%s] (Black)\" % (game + 1,\n self.players['w'].name,\n type(self.players['w']).__name__,\n self.players['b'].name,\n type(self.players['b']).__name__)\n # Reset board\n self.board.reset()\n\n # Signal to players that a new game is being played.\n [p.new_game() for p in self.players.itervalues()]\n\n curr_player_idx = 'w'\n\n game_pgn = chess.pgn.Game()\n game_pgn.headers[\"White\"] = self.players['w'].name\n game_pgn.headers[\"Black\"] = self.players['b'].name\n game_pgn.headers[\"Date\"] = time.strftime(\"%Y.%m.%d\")\n game_pgn.headers[\"Event\"] = \"Test\"\n game_pgn.headers[\"Round\"] = game\n game_pgn.headers[\"Site\"] = \"My PC\"\n\n _, time_taken = self.play(curr_player_idx, game_pgn=game_pgn)\n\n result = self.board.result(claim_draw=True)\n if result == '1-0':\n winner = self.players['w']\n elif result == '0-1':\n winner = self.players['b']\n else:\n winner = None\n self.data['draws'] += 1\n print \"Draw.\" \n\n if winner is not None:\n self.data['wins'][winner.name] += 1\n print \"%s wins.\" % winner.name\n\n for color, p in self.players.iteritems():\n print \"Player %s took %f seconds in total\" % (p.name, time_taken[color])\n p.time_taken = 0\n\n game_pgn = game_pgn.root()\n game_pgn.headers[\"Result\"] = result\n with open(resource_filename('guerilla', 'data/played_games/') + self.players['w'].name + '_' +\n self.players['b'].name + '_' + str(game) + '.pgn', 'w') as pgn:\n try:\n pgn.write(str(game_pgn))\n except AttributeError as e:\n print \"Error writing pgn file: %s\" % (e)\n\n self.swap_colours()\n game += 1", "def play_game(self):\n while self.over is False:\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p1.get_move(self.board)\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p2.get_move(self.board)", "def game(white_engine, black_engine, game_time=60.0, verbose=False):\n\n # Initialize variables\n board = Board()\n totaltime = { -1 : game_time*60, 1 : game_time*60 }\n engine = { -1 : black_engine, 1 : white_engine }\n\n if verbose:\n print \"INITIAL BOARD\\n--\\n\"\n board.display(totaltime)\n\n # Do rounds \n # 每方最多走60次\n for move_num in range(60):\n moves = []\n for color in [-1, 1]:\n start_time = timeit.default_timer()\n move = get_move(board, engine[color], color, move_num, totaltime)\n end_time = timeit.default_timer()\n # Update user totaltime\n time = round(end_time - start_time, 1)\n totaltime[color] -= time\n\n if time > game_time or totaltime[color] < 0:\n raise RuntimeError(color)\n\n # Make a move, otherwise pass\n if move is not None:\n board.execute_move(move, color)\n moves.append(move)\n\n if verbose:\n print \"--\\n\"\n print \"Round \" + str(move_num + 1) + \": \" + player[color] + \" plays in \" + move_string(move) + '\\n'\n board.display(totaltime)\n\n if not moves:\n # No more legal moves. Game is over.\n break\n\n print \"FINAL BOARD\\n--\\n\"\n board.display(totaltime)\n\n return board", "def randwalk(number_of_players=4, number_of_pieces=4):\n\n # `ghost_players` is a LUDOpy specific way to specify the number of\n # players. So, if we want 2 players, the code below will generate a list:\n #\n # [3, 2, 1, 0]\n #\n # and slice it so it omits players 2 and 3.\n #\n # [3, 2, 1, 0][:2] == [3, 2]\n\n g = ludopy.Game(\n ghost_players=list(reversed(range(0, 4)))[:-number_of_players],\n number_of_pieces=number_of_pieces\n )\n there_is_a_winner = False\n\n while not there_is_a_winner:\n (dice, move_pieces, player_pieces, enemy_pieces, player_is_a_winner,\n there_is_a_winner), player_i = g.get_observation()\n\n if len(move_pieces):\n piece_to_move = \\\n move_pieces[np.random.randint(0, len(move_pieces))]\n else:\n piece_to_move = -1\n\n _, _, _, _, _, there_is_a_winner = g.answer_observation(piece_to_move)\n\n return g", "def play_game(self):\n self.welcome()\n while (self.winner is None) and (not self.exit_flag) and (not self.board.full()):\n self.play_round()\n self.exit_game()", "def play_game():\n board = create_board()\n while True:\n for player in [1, 2]:\n random_place(board, player)\n result = evaluate(board)\n if result != 0:\n return result", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def eval_board(board, colour, num_defeated):\n \n beats_dict = {\"r\": \"s\", \"p\": \"r\", \"s\": \"p\"}\n\n # reorganises board data into a dictionary of tokens and their positions\n own_tokens = {'r': [], 'p': [], 's': []}\n opp_tokens = {'r': [], 'p': [], 's': []}\n \n for pos, token in board.items():\n if token:\n token = token[0]\n if colour == \"upper\":\n if token.isupper():\n own_tokens[token.lower()].append(pos)\n else: # token is lower\n opp_tokens[token].append(pos)\n else: # colour = \"lower\"\n if token.isupper():\n opp_tokens[token.lower()].append(pos)\n else: # token is lower\n own_tokens[token].append(pos)\n\n score = num_defeated * 1.5\n\n # assign values to own tokens as [# beatable]/[# identical * closest enemy dist]\n for hand, positions in own_tokens.items():\n if len(positions) > 0: # check that there are instances of the hand first\n beats = beats_dict[hand]\n targets = opp_tokens[beats]\n init_value = len(targets)/len(positions)\n \n for position in positions:\n min_target_dist = inf\n for target in targets:\n target_dist = dist(position, target)\n if target_dist < min_target_dist:\n min_target_dist = target_dist\n if min_target_dist == 0: # this shouldn't happen as the opponent should have died\n print(\"position: {}, target position: {}\".format(position, target))\n \n score += init_value/min_target_dist\n\n # subtract number of opponent tokens on the board\n for hand, positions in opp_tokens.items():\n for position in positions:\n score -= 1\n\n return score", "def get_scoreboard(self):\n cases = len(self.start_player_list)\n\n opponent03 = \"N/A\"\n opponent04 = \"N/A\"\n opponent05 = \"N/A\"\n opponent06 = \"N/A\"\n opponent07 = \"N/A\"\n opponent08 = \"N/A\"\n opponent09 = \"N/A\"\n opponent10 = \"N/A\"\n opponent11 = \"N/A\"\n opponent12 = \"N/A\"\n opponent13 = \"N/A\"\n opponent14 = \"N/A\"\n winner = \"N/A\"\n\n display = \"\\n\"\n\n # Game with 3 players\n if cases == 3:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n if len(self.all_opponents) == 2:\n opponent03 = self.all_opponents[1][0][0]\n opponent04 = self.all_opponents[1][0][1]\n if len(self.winner_list) == 2:\n winner = self.winner_list[1][0]\n\n first_game = [opponent01, opponent02]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust((int(max_string-string_size1)), '-')\n string_size2 = len(max([opponent01, opponent02], key=len))\n padding2 = \"\".ljust(string_size2, ' ')\n string_size3 = len(opponent02)\n padding3 = \"\".ljust((int(max_string)-string_size3), '-')\n string_size4 = len(padding2+\"|---\")\n padding4 = \"\".ljust(string_size4, ' ')\n string_size6 = max(len(opponent03), len(opponent04)) - len(opponent03)\n padding6 = \"\".ljust(string_size6, '-')\n string_size7 = max(len(opponent03), len(opponent04)) - len(opponent04)\n padding7 = \"\".ljust(string_size7, '-')\n string_size5 = len(padding4+opponent04+padding7) - len(opponent02+padding3) - 1\n padding5 = \"\".ljust(string_size5, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding2+\"|---\"+opponent03+padding6+\"|\"+\"\\n\"\n display += opponent02+padding3+\"|\"+padding5+\"|---\"+winner+\"\\n\"\n display += padding4+opponent04+padding7+\"|\"+\"\\n\"\n\n # Game with 4 players\n if cases == 4:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n opponent03 = self.all_opponents[0][1][0]\n opponent04 = self.all_opponents[0][1][1]\n if len(self.winner_list_temp) == 1:\n opponent05 = self.winner_list_temp[0]\n if len(self.winner_list) >= 1:\n opponent05 = self.winner_list[0][0]\n opponent06 = self.winner_list[0][1]\n if self.winner_state == 1:\n winner = self.winner_list[1][0]\n\n first_game = [opponent01, opponent02, opponent03, opponent04]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = max_string\n padding2 = \"\".ljust(string_size2, ' ')\n string_size3 = len(opponent02)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size5 = len(opponent03)\n padding5 = \"\".ljust(int(max_string-string_size5), '-')\n string_size6 = max_string\n padding6 = \"\".ljust(string_size6, ' ')\n string_size7 = len(opponent04)\n padding7 = \"\".ljust(int(max_string-string_size7), '-')\n string_size8 = max(len(opponent05), len(opponent06)) - len(opponent05)\n padding8 = \"\".ljust(string_size8, '-')\n string_size9 = max(len(opponent05), len(opponent06)) - len(opponent06)\n padding9 = \"\".ljust(string_size9, '-')\n string_size4 = max(len(\"\"+padding2+\"|---\"+opponent05+padding8+\"|\"), (len(\"\"+padding6+\"|---\"+opponent06+padding9+\"|\"))) - 1\n padding4 = \"\".ljust(string_size4, ' ')\n string_size10 = len(padding4) - len(\"\"+opponent02+padding3+\"|\")\n padding10 = \"\".ljust(string_size10, ' ')\n string_size11 = len(padding4) - len(\"\"+opponent03+padding5+\"|\")\n padding11 = \"\".ljust(string_size11, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding2+\"|---\"+opponent05+padding8+\"|\"+\"\\n\"\n display += opponent02+padding3+\"|\"+padding10+\"|\"+\"\\n\"\n display += padding4+\"|---\"+winner+\"\\n\"\n display += opponent03+padding5+\"|\"+padding11+\"|\"+\"\\n\"\n display += padding6+\"|---\"+opponent06+padding9+\"|\"+\"\\n\"\n display += opponent04+padding7+\"|\"+\"\\n\"\n\n # Game with 5 players\n if cases == 5:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n if len(self.all_opponents) >= 2:\n opponent03 = self.all_opponents[1][0][0]\n opponent04 = self.all_opponents[1][0][1]\n opponent05 = self.all_opponents[1][1][0]\n opponent06 = self.all_opponents[1][1][1]\n if len(self.winner_list_temp) == 1:\n opponent07 = self.winner_list_temp[0]\n if len(self.winner_list) >= 2:\n opponent07 = self.winner_list[1][0]\n opponent08 = self.winner_list[1][1]\n if len(self.winner_list) == 3:\n winner = self.winner_list[2][0]\n\n first_game = [opponent01, opponent02]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = max_string\n padding2 = \"\".ljust(string_size2, ' ')\n string_size3 = len(opponent02)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size4 = max(len(\"\"+opponent01+padding1), len(\"\"+opponent01+padding1)) + 4\n padding4 = \"\".ljust(string_size4, ' ')\n string_size5 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06))\n padding5 = \"\".ljust((string_size5 - len(opponent03)), '-')\n string_size7 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06))\n padding7 = \"\".ljust((string_size7 - len(opponent04)), '-')\n string_size8 = len(\"\"+padding2+\"|---\")\n padding8 = \"\".ljust(string_size8, ' ')\n string_size6 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) + 3\n padding6 = \"\".ljust(string_size6, ' ')\n string_size9 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) - len(opponent05)\n padding9 = \"\".ljust(string_size9, '-')\n string_size10 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) - len(opponent06)\n padding10 = \"\".ljust(string_size10, '-')\n string_size11 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06)) + len(padding4)\n padding11 = \"\".ljust(string_size11, ' ')\n string_size12 = max(len(opponent07), len(opponent08)) - len(opponent07)\n padding12 = \"\".ljust(string_size12, '-')\n string_size13 = max(len(opponent07), len(opponent08)) - len(opponent08)\n padding13 = \"\".ljust(string_size13, '-')\n string_size14 = max(len(opponent07), len(opponent08)) + 3\n padding14 = \"\".ljust(string_size14, ' ')\n string_size15 = max(len(opponent07), len(opponent08)) + 3\n padding15 = \"\".ljust(string_size15, ' ')\n string_size16 = len(\"\"+padding11+\"|---\"+opponent08+padding13)\n padding16 = \"\".ljust(string_size16, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding2+\"|---\"+opponent03+padding5+\"|\"+\"\\n\"\n display += opponent02+padding3+\"|\"+padding6+\"|---\"+opponent07+padding12+\"|\"+\"\\n\"\n display += padding4+opponent04+padding7+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding16+\"|---\"+winner+\"\\n\"\n display += padding8+opponent05+padding9+\"|\"+padding15+\"|\"+\"\\n\"\n display += padding11+\"|---\"+opponent08+padding13+\"|\"+\"\\n\"\n display += padding8+opponent06+padding10+\"|\"+\"\\n\"\n\n # Game with 6 players\n if cases == 6:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n opponent03 = self.all_opponents[0][1][0]\n opponent04 = self.all_opponents[0][1][1]\n opponent05 = self.all_opponents[0][2][0]\n opponent06 = self.all_opponents[0][2][1]\n if len(self.winner_list_temp) >= 1:\n opponent07 = self.winner_list_temp[0]\n if len(self.winner_list_temp) >= 2:\n opponent08 = self.winner_list_temp[1]\n if len(self.winner_list) >= 1:\n print(self.winner_list)\n opponent07 = self.winner_list[0][0]\n opponent08 = self.winner_list[0][1]\n opponent09 = self.winner_list[0][2]\n if len(self.all_opponents) >= 3:\n opponent10 = self.all_opponents[2][0][0]\n if len(self.winner_list) == 3:\n winner = self.winner_list[2][0]\n\n first_game = [opponent01, opponent02, opponent03, opponent04, opponent05, opponent06,]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = len(opponent02)\n padding2 = \"\".ljust(int(max_string-string_size2), '-')\n string_size3 = len(opponent03)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size4 = len(opponent04)\n padding4 = \"\".ljust(int(max_string-string_size4), '-')\n string_size5 = len(opponent05)\n padding5 = \"\".ljust(int(max_string-string_size5), '-')\n string_size6 = len(opponent06)\n padding6 = \"\".ljust(int(max_string-string_size6), '-')\n string_size7 = max_string\n padding7 = \"\".ljust(string_size7, ' ')\n string_size8 = max(len(opponent07), len(opponent08), len(opponent09)) - len(opponent07)\n padding8 = \"\".ljust(string_size8, '-')\n string_size9 = max(len(opponent07), len(opponent08), len(opponent09)) - len(opponent08)\n padding9 = \"\".ljust(string_size9, '-')\n string_size10 = max(len(opponent07), len(opponent08), len(opponent09)) - len(opponent09)\n padding10 = \"\".ljust(string_size10, '-')\n string_size12 = len(\"\"+padding7+\"|---\"+opponent09+padding10)\n padding12 = \"\".ljust(string_size12, ' ')\n string_size14 = max(len(opponent07), len(opponent08), len(opponent09)) + 3\n padding14 = \"\".ljust(string_size14, ' ')\n string_size15 = len(\"\"+padding12+\"|---\"+opponent10) - len(\"\"+padding7+\"|---\"+opponent09)\n padding15 = \"\".ljust(string_size15, '-')\n string_size16 = len(padding7+\"|---\"+opponent09+padding15) - len(opponent05+padding5+\"|\")\n padding16 = \"\".ljust(string_size16, ' ')\n string_size17 = len(padding7+\"|---\"+opponent09+padding15)\n padding17 = \"\".ljust(string_size17, ' ')\n string_size18 = len(opponent09 + padding15) + 3\n padding18 = \"\".ljust(string_size18, ' ')\n string_size19 = len(\"|---\"+opponent10) - 1\n padding19 = \"\".ljust(string_size19, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding7+\"|---\"+opponent07+padding8+\"|\"+\"\\n\"\n display += opponent02+padding2+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding12+\"|---\"+opponent10+\"|\"+\"\\n\"\n display += opponent03+padding3+\"|\"+padding14+\"|\"+padding19+\"|\"+\"\\n\"\n display += padding7+\"|---\"+opponent08+padding9+\"|\"+padding19+\"|\"+\"\\n\"\n display += opponent04+padding4+\"|\"+padding18+\"|---\"+winner+\"\\n\"\n display += padding17+\"|\"+\"\\n\"\n display += opponent05+padding5+\"|\"+padding16+\"|\"+\"\\n\"\n display += padding7+\"|---\"+opponent09+padding15+\"|\"+\"\\n\"\n display += opponent06+padding6+\"|\"+\"\\n\"\n\n # Game with 7 players\n if cases == 7:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n if len(self.all_opponents) >= 2:\n\n opponent03 = self.all_opponents[1][0][0]\n opponent04 = self.all_opponents[1][0][1]\n opponent05 = self.all_opponents[1][1][0]\n opponent06 = self.all_opponents[1][1][1]\n opponent07 = self.all_opponents[1][2][0]\n opponent08 = self.all_opponents[1][2][1]\n if len(self.winner_list_temp) >= 1:\n opponent09 = self.winner_list_temp[0]\n if len(self.winner_list_temp) >= 2:\n opponent10 = self.winner_list_temp[1]\n if len(self.winner_list) >= 2:\n opponent09 = self.winner_list[1][0]\n opponent10 = self.winner_list[1][1]\n opponent11 = self.winner_list[1][2]\n if len(self.all_opponents) >= 4:\n opponent12 = self.all_opponents[3][0][0]\n if len(self.winner_list) == 4:\n winner = self.winner_list[3][0]\n\n string_size1 = max(len(opponent01), len(opponent02)) - len(opponent01)\n padding1 = \"\".ljust(string_size1, '-')\n string_size2 = max(len(opponent01), len(opponent02)) - len(opponent02)\n padding2 = \"\".ljust(string_size2, '-')\n string_size3 = len(opponent01+padding1)\n padding3 = \"\".ljust(string_size3, ' ')\n string_size4 = len(opponent01+padding1) + 4\n padding4 = \"\".ljust(string_size4, ' ')\n string_size5 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent03)\n padding5 = \"\".ljust(string_size5, '-')\n string_size6 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent04)\n padding6 = \"\".ljust(string_size6, '-')\n string_size7 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent05)\n padding7 = \"\".ljust(string_size7, '-')\n string_size8 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent06)\n padding8 = \"\".ljust(string_size8, '-')\n string_size9 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent07)\n padding9 = \"\".ljust(string_size9, '-')\n string_size10 = max(len(opponent03), len(opponent04), len(opponent05), len(opponent06), len(opponent07), len(opponent08)) - len(opponent08)\n padding10 = \"\".ljust(string_size10, '-')\n string_size11 = len(padding4+opponent08+padding10)\n padding11 = \"\".ljust(string_size11, ' ')\n string_size12 = len(padding4+opponent08+padding10) - len(opponent02+padding2) - 1\n padding12 = \"\".ljust(string_size12, ' ')\n string_size13 = max(len(opponent09), len(opponent10)) - len(opponent09)\n padding13 = \"\".ljust(string_size13, '-')\n string_size14 = max(len(opponent09), len(opponent10)) - len(opponent10)\n padding14 = \"\".ljust(string_size14, '-')\n string_size15 = len(opponent09+padding13) + 3\n padding15 = \"\".ljust(string_size15, ' ')\n string_size16 = len(opponent02+padding2+\"|\"+padding12+\"|---\"+opponent09+padding13)\n padding16 = \"\".ljust(string_size16, ' ')\n string_size17 = len(\"---\"+opponent12)\n padding17 = \"\".ljust(string_size17, ' ')\n string_size18 = len(\"---\"+opponent12) + len(\"---\"+opponent10+padding14) + 1\n padding18 = \"\".ljust(string_size18, ' ')\n string_size19 = len(padding4+opponent06+padding8+\"|\"+padding18)\n padding19 = \"\".ljust(string_size19, ' ')\n string_size20 = len(padding18) - len(opponent11) - 3\n padding20 = \"\".ljust(string_size20, '-')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding3+\"|---\"+opponent03+padding5+\"|\"+\"\\n\"\n display += opponent02+padding2+\"|\"+padding12+\"|---\"+opponent09+padding13+\"|\"+\"\\n\"\n display += padding4+opponent04+padding6+\"|\"+padding15+\"|\"+\"\\n\"\n display += padding16+\"|---\"+opponent12+\"|\"+\"\\n\"\n display += padding4+opponent05+padding7+\"|\"+padding15+\"|\"+padding17+\"|\"+\"\\n\"\n display += padding11+\"|---\"+opponent10+padding14+\"|\"+padding17+\"|\"+\"\\n\"\n display += padding4+opponent06+padding8+\"|\"+padding18+\"|---\"+winner+\"\\n\"\n display += padding19+\"|\"+\"\\n\"\n display += padding4+opponent07+padding9+\"|\"+padding18+\"|\"+\"\\n\"\n display += padding11+\"|---\"+opponent11+padding20+\"|\"+\"\\n\"\n display += padding4+opponent08+padding10+\"|\"+\"\\n\"\n\n # Game with 8 players\n if cases == 8:\n opponent01 = self.all_opponents[0][0][0]\n opponent02 = self.all_opponents[0][0][1]\n opponent03 = self.all_opponents[0][1][0]\n opponent04 = self.all_opponents[0][1][1]\n opponent05 = self.all_opponents[0][2][0]\n opponent06 = self.all_opponents[0][2][1]\n opponent07 = self.all_opponents[0][3][0]\n opponent08 = self.all_opponents[0][3][1]\n if len(self.all_opponents) == 1:\n if len(self.winner_list_temp) >= 1:\n opponent09 = self.winner_list_temp[0]\n if len(self.winner_list_temp) >= 2:\n opponent10 = self.winner_list_temp[1]\n if len(self.winner_list_temp) >= 3:\n opponent11 = self.winner_list_temp[2]\n if len(self.all_opponents) >= 2:\n opponent09 = self.all_opponents[1][0][0]\n opponent10 = self.all_opponents[1][0][1]\n opponent11 = self.all_opponents[1][1][0]\n opponent12 = self.all_opponents[1][1][1]\n if len(self.winner_list_temp) >= 1:\n opponent13 = self.winner_list_temp[0]\n if len(self.all_opponents) >= 3:\n opponent13 = self.all_opponents[2][0][0]\n opponent14 = self.all_opponents[2][0][1]\n if len(self.winner_list) == 3:\n winner = self.winner_list[2][0]\n\n first_game = [opponent01, opponent02, opponent03, opponent04, opponent05, opponent06, opponent07, opponent08]\n max_string = len(max(first_game, key=len))\n string_size1 = len(opponent01)\n padding1 = \"\".ljust(int(max_string-string_size1), '-')\n string_size2 = len(opponent02)\n padding2 = \"\".ljust(int(max_string-string_size2), '-')\n string_size3 = len(opponent03)\n padding3 = \"\".ljust(int(max_string-string_size3), '-')\n string_size4 = len(opponent04)\n padding4 = \"\".ljust(int(max_string-string_size4), '-')\n string_size5 = len(opponent05)\n padding5 = \"\".ljust(int(max_string-string_size5), '-')\n string_size6 = len(opponent06)\n padding6 = \"\".ljust(int(max_string-string_size6), '-')\n string_size7 = len(opponent07)\n padding7 = \"\".ljust(int(max_string-string_size7), '-')\n string_size8 = len(opponent08)\n padding8 = \"\".ljust(int(max_string-string_size8), '-')\n string_size9 = len(opponent01+padding1)\n padding9 = \"\".ljust(string_size9, ' ')\n string_size10 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent09)\n padding10 = \"\".ljust(string_size10, '-')\n string_size11 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent10)\n padding11 = \"\".ljust(string_size11, '-')\n string_size12 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent11)\n padding12 = \"\".ljust(string_size12, '-')\n string_size13 = max(len(opponent09), len(opponent10), len(opponent11), len(opponent12)) - len(opponent12)\n padding13 = \"\".ljust(string_size13, '-')\n string_size14 = len(padding9+\"|---\"+opponent09+padding10) - len(opponent02+padding2) - 1\n padding14 = \"\".ljust(string_size14, ' ')\n string_size15 = len(padding9+\"|---\"+opponent09+padding10)\n padding15 = \"\".ljust(string_size15, ' ')\n string_size16 = max(len(opponent13), len(opponent14)) - len(opponent13)\n padding16 = \"\".ljust(string_size16, '-')\n string_size17 = max(len(opponent13), len(opponent14)) - len(opponent14)\n padding17 = \"\".ljust(string_size17, '-')\n string_size18 = len(opponent14+padding17) + 3\n padding18 = \"\".ljust(string_size18, ' ')\n string_size19 = len(padding15+\"|---\"+opponent14+padding17) - len(opponent08+padding8) - 1\n padding19 = \"\".ljust(string_size19, ' ')\n string_size20 = len(padding15+\"|---\"+opponent14+padding17)\n padding20 = \"\".ljust(string_size20, ' ')\n\n display += opponent01+padding1+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent09+padding10+\"|\"+\"\\n\"\n display += opponent02+padding2+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding15+\"|---\"+opponent13+padding16+\"|\"+\"\\n\"\n display += opponent03+padding3+\"|\"+padding14+\"|\"+padding18+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent10+padding11+\"|\"+padding18+\"|\"+\"\\n\"\n display += opponent04+padding4+\"|\"+padding19+\"|\"+\"\\n\"\n display += padding20+\"|---\"+winner+\"\\n\"\n display += opponent05+padding5+\"|\"+padding19+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent11+padding12+\"|\"+padding18+\"|\"+\"\\n\"\n display += opponent06+padding6+\"|\"+padding14+\"|\"+padding18+\"|\"+\"\\n\"\n display += padding15+\"|---\"+opponent14+padding17+\"|\"+\"\\n\"\n display += opponent07+padding7+\"|\"+padding14+\"|\"+\"\\n\"\n display += padding9+\"|---\"+opponent12+padding13+\"|\"+\"\\n\"\n display += opponent08+padding8+\"|\"+\"\\n\"\n\n return display", "def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()", "async def evaluate(self):\n if self.players[1].id == bot.user.id:\n self.p2_move = random.choice((\"Rock\", \"Paper\", \"Scissors\"))\n\n if None in self.moves:\n return\n\n if len(self.moves) == 1:\n tie_embed = discord.Embed(title=\"It's a Draw\")\n await self.channel.send(embed=tie_embed)\n return await self.end()\n\n if self.moves == {\"Rock\", \"Paper\"}:\n winner = \"Paper\"\n elif self.moves == {\"Scissors\", \"Paper\"}:\n winner = \"Scissors\"\n elif self.moves == {\"Rock\", \"Scissors\"}:\n winner = \"Rock\"\n\n # P1 Wins\n if self.p1_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[0].name}'s **{winner}** beats {self.players[1].name}'s **{self.p2_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[0])\n\n # P2 Wins\n elif self.p2_move == winner:\n embed = discord.Embed(\n title=f\"{self.players[1].name}'s **{winner}** beats {self.players[0].name}'s **{self.p1_move}**\")\n await self.channel.send(embed=embed)\n await self.end(winner=self.players[1])" ]
[ "0.74361", "0.6622078", "0.65925574", "0.65638083", "0.6523164", "0.6452009", "0.64427716", "0.6395758", "0.6380628", "0.6225637", "0.6210784", "0.6170021", "0.61530846", "0.614373", "0.61072975", "0.60683894", "0.6060449", "0.6058013", "0.6040808", "0.60289985", "0.602816", "0.5999954", "0.59998786", "0.59929866", "0.5980426", "0.59722275", "0.5953709", "0.59534836", "0.5952332", "0.5939572" ]
0.85560715
0
Method to save the attributes of the current bot and the weights of its neural network under the directory given by the parameter 'prefix'
def save_bot(self, prefix="model_data/"): network_load_command = self.network.save_network(prefix) attributes = {"num_rounds" : self.num_rounds, "c" : self.c, "alpha" : self.alpha, "loss_history" : self.loss_history, "evaluation_history_old" : self.evaluation_history_old, "evaluation_history_ran" : self.evaluation_history_ran, "network_load_command": network_load_command} np.save(prefix + "model_attributes.npy", attributes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n json.dump(self.model.to_json(), open(model_file, \"w\"))\n self.model.save_weights(weight_file)\n return self", "def saveWeights(self, basename, generation):\n for i,wt in enumerate(self.weights):\n np.save(\"./data/\"+basename+\"/\"+basename + \"_W\"+str(i)+\"_G\" + str(generation),wt)\n for i,bs in enumerate(self.bias):\n np.save(\"./data/\"+basename+\"/\"+basename + \"_B\"+str(i)+\"_G\" + str(generation),bs)", "def save_networks(self, dir_weights):\n\n path_p = dir_weights + 'p_{}.ckpt'\n path_D = dir_weights + 'D_{}.ckpt'\n path_U = dir_weights + 'U_{}.ckpt'\n\n self.net_p.save_weights(path_p.format(self.idx_save))\n self.net_D.save_weights(path_D.format(self.idx_save))\n self.net_U.save_weights(path_U.format(self.idx_save))", "def load_bot(self, prefix=\"model_data/\"):\n attributes = np.load(prefix + \"model_attributes.npy\",\n allow_pickle='TRUE').item()\n \n self.num_rounds = attributes[\"num_rounds\"]\n self.c = attributes[\"c\"]\n self.alpha = attributes[\"alpha\"]\n if \"loss_history\" in attributes:\n self.loss_history = attributes[\"loss_history\"]\n self.evaluation_history_old = attributes[\"evaluation_history_old\"]\n self.evaluation_history_ran = attributes[\"evaluation_history_ran\"]\n \n network_load_command = attributes[\"network_load_command\"]\n exec(network_load_command)\n self.network.load_network(prefix)", "def save_network(self, epoch):\n path = os.path.join(os.getcwd(), 'models', 'unit')\n\n torch.save(self.generator_A.state_dict(), '%s/generator_A_epoch_%d.pth' % (path, epoch))\n torch.save(self.generator_B.state_dict(), '%s/generator_B_epoch_%d.pth' % (path, epoch))\n torch.save(self.discriminator_A.state_dict(), '%s/discriminator_A_epoch_%d.pth' % (path, epoch))\n torch.save(self.discriminator_B.state_dict(), '%s/discriminator_B_epoch_%d.pth' % (path, epoch))\n torch.save(self.discriminator_optimizer.state_dict(), '%s/discriminator_optimizer_epoch_%d.pth' % (path, epoch))\n torch.save(self.generator_optimizer.state_dict(), '%s/generator_optimizer_epoch_%d.pth' % (path, epoch))", "def save_state(self, path='/home/lukas/weights/'):\r\n stuff_in_path = os.listdir(path)\r\n counter = 0\r\n for i in stuff_in_path:\r\n if 'parameters' in i:\r\n counter += 1\r\n with open(path + 'info.txt', mode='a') as f:\r\n f.write('counter: %i \\taccuracy: %.8f%% \\tloss: %.8f\\n' % (counter, returnList(self.accuracy)[-1] * 100, returnList(self.loss)[-1]))\r\n\r\n parameters = [ self.batchsize_train,\r\n self.iterator,\r\n self.n_hidden_layers,\r\n self.n_hidden_neurons,\r\n self.n_input_neurons,\r\n self.n_output_neurons,\r\n self.hid_transfer.__name__,\r\n self.out_transfer.__name__]\r\n try:\r\n print '[Network] Saving network status ...'\r\n np.save(path + 'parameters' + str(counter), parameters)\r\n np.save(path + 'weights' + str(counter), self.weights)\r\n np.save(path + 'bias' + str(counter), self.bias)\r\n np.save(path + 'weights_gradient' + str(counter), self.weights_gradient)\r\n np.save(path + 'bias_gradient' + str(counter), self.bias_gradient)\r\n np.save(path + 'loss' + str(counter), self.loss)\r\n np.save(path + 'accuracy' + str(counter), self.accuracy)\r\n np.save(path + 'r_weights' + str(counter), self.r_weights)\r\n np.save(path + 'r_bias' + str(counter), self.r_bias)\r\n print '\\033[92m' + '[Network] Network status succesfully saved' + '\\033[0m'\r\n\r\n except Exception as e:\r\n print '\\033[1m' + '\\033[91m' + '[Network] Could not correctly save network status:' + '\\033[0m'\r\n print e.message", "def save_utility_network(self,path_save):\n print(\"Save the neural network to : \"+path_save)\n self.nn.save_on_file(path_save)", "def save_model(self, model_dir, model_prefix):\n torch.save(self.policy.state_dict(), os.path.join(model_dir, '{}_policy_{}.model'.format(model_prefix, self.global_step)))\n torch.save(self.policy_optimizer.state_dict(), os.path.join(model_dir, '{}_policy_{}.optimizer'.format(model_prefix, self.global_step)))\n torch.save(self.discrim.state_dict(), os.path.join(model_dir, '{}_discrim_{}.model'.format(model_prefix, self.global_step)))\n torch.save(self.discrim_optimizer.state_dict(), os.path.join(model_dir, '{}_discrim_{}.optimizer'.format(model_prefix, self.global_step)))\n self.logger.info('Model and optimizer saved in {}, with prefix {} and global step {}.'.format(model_dir, model_prefix, self.global_step))", "def save_weights(self, the_path):\n torch.save(self.model.state_dict(), the_path)", "def save_weights(self, location: str):\n\n # import torch\n torch = import_optional_dependency(\"torch\")\n torch.save(self.model.state_dict(), self._set_save_location(location))", "def save(self, directory='saves/'):\n # Create dirpath for temporary dir\n if directory[-1] != '/':\n directory += '/'\n dirpath = directory + self.name + '/'\n\n if not os.path.exists(dirpath): \n os.makedirs(dirpath)\n else:\n raise Exception(f'Path {dirpath} already exists.')\n\n # DQNs & Optimizer\n torch.save(self.policy_net.state_dict(), f'{dirpath}dqn.pth')\n torch.save(self.optimizer.state_dict(), f'{dirpath}optimizer.pth')\n\n # Trainer pamameters\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n\n with open(f'{dirpath}trainer_parameters.pick', 'wb') as file:\n pickle.dump(params, file)\n\n # Zip the saves in one .zip archive\n zippath = f'{directory}{self.name}'\n shutil.make_archive(zippath, 'zip', dirpath)\n\n # Remove the directory dirpath and files inside\n shutil.rmtree(dirpath)\n\n # Display\n print(f'Model saved at {zippath}.zip')", "def save_weights(self, path: str):\n torch.save(self.state_dict(), path)", "def save_weights(self, path: str):\n torch.save(self.state_dict(), path)", "def save_weights(self):\n np.save(\"weights.npy\", self.model.parameters)\n return \"weights.npy\"", "def save_networks(self, epoch):\n for name in self.network_names:\n if isinstance(name, str):\n save_filename = '{0}_net_{1}.pth'.format(epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net')\n\n if self.use_cuda:\n torch.save(net.cpu().state_dict(), save_path)\n net.to(self.device)\n else:\n torch.save(net.cpu().state_dict(), save_path)", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name in [\"encoder\", \"decoder\"]:\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n if model_name == 'encoder':\n to_save = self.encoder.state_dict()\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n else:\n to_save = self.decoder.state_dict()\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.optimizer.state_dict(), save_path)", "def save(self, folder):\n self.generator.save_weights('%s/generator.h5'%folder)\n self.critic.save_weights('%s/critic.h5'%folder)", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def save_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.save(sess, dir_path)", "def save_nn(self, networkname= 'nn'):\n np.save(f\"{networkname}_data.npy\", self.weights_and_biases)\n print(f\"Data saved to {networkname}_data.npy\")", "def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n print(\"MODEL NAME = {}\".format(model_name))\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.height\n to_save['width'] = self.width\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)", "def save_networks(self, which_epoch):\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (which_epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net_' + name)\n torch.save(net.cpu().state_dict(), save_path)\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n net.cuda()", "def save_weights(model, weights_dir, epoch):\n weights_fname = 'weights-%s.pth' % (epoch)\n weights_fpath = os.path.join(weights_dir, weights_fname)\n torch.save({'state_dict': model.state_dict()}, weights_fpath)\n return weights_fpath", "def save_networks(self, epoch):\n torch.save(self.netG.state_dict(),\n '%s/netG_epoch_%d.pth' % (self.opt.out_dir, epoch))\n torch.save(self.netD.state_dict(),\n '%s/netD_epoch_%d.pth' % (self.opt.out_dir, epoch))\n torch.save(self.netE.state_dict(),\n '%s/netE_epoch_%d.pth' % (self.opt.out_dir, epoch))\n torch.save(self.netD2.state_dict(),\n '%s/netD2_epoch_%d.pth' % (self.opt.out_dir, epoch))", "def save_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n torch.save(net.module.cpu().state_dict(), save_path)\n net.cuda(self.gpu_ids[0])\n else:\n torch.save(net.cpu().state_dict(), save_path)", "def save(self):\n self.save_network_architecture( network_path=self.network_path )\n self.save_network_parameters(\n file_name='net_parameters', file_path=self.network_path )", "def _save_model_weights(model_weights, model_weights_dir, epoch_idx):\n model_weights_path = os.path.join(model_weights_dir,\n f\"model_epoch_{epoch_idx}.pth\")\n torch.save(model_weights, model_weights_path)", "def load_weigths_into_target_network(self):\n logging.debug(\"Transfer Weight!\")\n self.network.save_weights(self._save_path)\n self.target_network.load_weights(self._save_path)", "def save(uNetwork, uDir):\n ## first of all, save the network spec\n out = open(uDir + \"net_spec.py\", \"w\")\n out.write(\"spec = \" + str(uNetwork.spec))\n out.close()\n \n ## then, save the state of each layer\n layers = uNetwork.layers\n\n for l in range(len(layers) - 1):\n if layers[l].node_sharing:\n ## save just one node\n layers[l].pipes[0][0].send(\"clone_state\")\n state = layers[l].pipes[0][0].recv()\n \n ## save coincidences and PCG\n np.save(uDir + str(l) + \".0.0.coincidences\", state['coincidences'])\n np.save(uDir + str(l) + \".0.0.PCG\", state['PCG'])\n\n else:\n for i in range(len(layers[l].nodes)): \n for j in range(len(layers[l].nodes[i])):\n layers[l].pipes[i][j].send(\"clone_state\")\n state = layers[l].pipes[i][j].recv()\n\n np.save(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".coincidences\", state['coincidences'])\n np.save(uDir + str(l) + \".\" + str(i) + \".\" + str(j) + \".PCG\", state['PCG'])\n\n ## then, save also the output layer\n layers[-1].pipes[0][0].send(\"clone_state\")\n state = layers[-1].pipes[0][0].recv()\n np.save(uDir + str(len(layers) - 1) + \".0.0.coincidences\", state['coincidences'])\n np.save(uDir + str(len(layers) - 1) + \".0.0.cls_prior_prob\", state['cls_prior_prob'])\n np.save(uDir + str(len(layers) - 1) + \".0.0.PCW\", state['PCW'])", "def save(nntagger, args):\n outdir = args.save\n modelname = outdir + \".model\"\n nntagger.model.save(modelname)\n import pickle\n myparams = {\"num_words\": len(nntagger.w2i),\n \"num_chars\": len(nntagger.c2i),\n \"w2i\": nntagger.w2i,\n \"c2i\": nntagger.c2i,\n \"tag2idx\": nntagger.tag2idx,\n \"activation\": nntagger.activation,\n \"in_dim\": nntagger.in_dim,\n \"h_dim\": nntagger.h_dim,\n \"c_in_dim\": nntagger.c_in_dim,\n \"h_layers\": nntagger.h_layers\n }\n pickle.dump(myparams, open( modelname+\".pickle\", \"wb\" ) )\n print(\"model stored: {}\".format(modelname), file=sys.stderr)" ]
[ "0.695643", "0.67557275", "0.6676642", "0.65116364", "0.64193225", "0.64169306", "0.639074", "0.6336839", "0.6331544", "0.6298259", "0.6251713", "0.618245", "0.618245", "0.61786866", "0.61513954", "0.614478", "0.61215967", "0.6093279", "0.60930675", "0.6048004", "0.6039565", "0.60216707", "0.6019363", "0.60066664", "0.60051376", "0.5959991", "0.5932953", "0.5918968", "0.5916914", "0.5895226" ]
0.7971842
0
Method to save the current bot as the 'old_bot' used in bot evaluation.
def save_as_old_bot(self, prefix="model_data/old_bot/"): self.save_bot(prefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_old_bot(self, prefix=\"model_data/old_bot/\"):\n self.load_bot(prefix)", "def evaluate_against_old_bot(self, num_games,\n num_white_pieces = None, \n num_black_pieces = None,\n prefix=\"model_data/old_bot/\"):\n print('Evaluating against old bot')\n old_bot = ZeroBot(1)\n old_bot.load_old_bot(prefix)\n results = self.evaluate_against_bot(old_bot, num_games,\n num_white_pieces, \n num_black_pieces)\n self.evaluation_history_old.append(results)", "def get_bot_save_state_to_file(self):\n return self.bot_data_file[\"bot_status\"][\"save_to_file\"][\"save_state_to_file\"]", "def changeStrunctureCurrentChatbot(self,sentence):\n existChatbotName = [value for key, value in self.dictChatBots.items() if\n sentence.lower() == str(key).lower()]\n if len(existChatbotName)==0:\n self.output.exec('No existe ese Chatbot.')\n else:\n if not self.currentStructureChatBot is None:\n self.output.exec('Se ha cambiado \"'+ self.currentStructureChatBot.name+ '\" por \"'+ existChatbotName[0].name+ '\".')\n else:\n self.output.exec('Ahora \"'+ sentence+ '\" es el actual Chatbot.')\n self.currentStructureChatBot = existChatbotName[0] # establece el nuevo chatbot", "def get_bot_save_state_to_server(self):\n return self.bot_data_file[\"bot_status\"][\"server_state_saving\"][\"save_state_to_server\"]", "def save_net(self):\n self.saved_net = copy.deepcopy(self.net)", "def save_state(self):\n # add (turn number, active player, player 1, player 2) to game history\n # player 1 and player 2 contain data about active mods\n turn_number = self.turn_number\n player_1 = Class.copy_monster(self.player1)\n player_2 = Class.copy_monster(self.player2)\n # save which player's turn it is\n if self.current_player == self.player1:\n active_player = 'player 1'\n else:\n active_player = 'player 2'\n\n # add this information to history list\n self.history.append((turn_number, active_player, player_1, player_2))", "def save_bgn(self):\n self.savedata = ''", "def save_backup(\n self):\n self.backup = self.data", "def save(self) -> str:\n return self._bettor.save()", "def set_builder_bot(self, builder_bot): \n self.builder_bot = builder_bot # pragma: no cover", "def saveCurrentEditor(self):\n aw = self.activeWindow()\n self.saveEditorEd(aw)", "def get_bot_save_state_file_name(self):\n return self.bot_data_file[\"bot_status\"][\"save_to_file\"][\"file_name\"]", "def _save_changes(self):\n copy2(self._cfg_filename, self._cfg_filename + \".bak\")\n with open(self._cfg_filename, \"w\", encoding=\"utf-8\") as self._cfg_file:\n self.write(self._cfg_file)", "async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username", "def _save_best(self, epoch, holdout_loss):\n updated = False\n\n current = holdout_loss\n _, best = self._snapshot\n improvement = (best - current) / best # Notice this is different with the one used in bnn._save_best\n print(\"improvement {} and updates steps {} and current holdout_loss {}, best loss {}\".format(improvement,\n self._epochs_since_update,\n current, best))\n if improvement > 0.01:\n self._snapshot = (epoch, current)\n # save current state\n # saver.save(self.sess_ssm, '')\n updated = True\n\n # early stopping\n if updated:\n self._epochs_since_update = 0\n else:\n self._epochs_since_update += 1\n\n if self._epochs_since_update > self._early_stop_patience:\n return True\n else:\n return False", "def _save_current_as_new(self):\n highest_workfile = self._helper._get_highest_workfile(self._context)\n\n # create new workfile based on context, use existing workfile with highest version number as base\n if highest_workfile:\n self.workfile = self._helper._create_workfile_from(\n self._context, highest_workfile\n )\n else:\n self.workfile = self._helper._create_new_workfile(self._context)\n\n # save as new created workfile\n # save as will also change engine context\n self._engine.save_as(self.workfile)\n\n self._update_context()", "def _save_state_as_orig(self):\n self._orig = None\n self._orig = deepcopy(self)", "def get_original_robot(self):\n if isinstance(self.robot, EquivRobot):\n return self.robot.get_original_robot()\n else:\n return self.robot", "def old_post_save(model, os_path, contents_manager):\n os_path.append(\"old_post_save\")", "def save_other(self,\n new_host):\n\n return new_host", "def saveCopyCurrentEditor(self):\n aw = self.activeWindow()\n self.saveCopyEditorEd(aw)", "def save_state(self):\n pass", "def SaveProgramState(self, sess=None, global_step=None):\n pass", "def addbot(self, bot):\n\n if bot:\n for i in range(len(self.bots)-1, -1, -1):\n if self.bots[i].name == bot.name:\n del self.bots[i]\n self.bots.append(bot)", "def save_bot(self, prefix=\"model_data/\"):\n network_load_command = self.network.save_network(prefix)\n \n attributes = {\"num_rounds\" : self.num_rounds,\n \"c\" : self.c,\n \"alpha\" : self.alpha,\n \"loss_history\" : self.loss_history,\n \"evaluation_history_old\" : self.evaluation_history_old,\n \"evaluation_history_ran\" : self.evaluation_history_ran,\n \"network_load_command\": network_load_command}\n \n np.save(prefix + \"model_attributes.npy\", attributes)", "def bot(self):\n return self._bot", "def save(self):\n\n for i in self.bots:\n try:\n i.save()\n except Exception, ex:\n handle_exception()", "def save():\n pass", "def saveGame(self) -> None:\n self.state[\"phase\"] = self._phase\n\n state_as_string = json.dumps(self.state)\n with open(self.save_location, \"w\") as File:\n File.write(state_as_string)" ]
[ "0.62592715", "0.60499334", "0.5963151", "0.57505435", "0.5727542", "0.5663036", "0.55086035", "0.5487756", "0.54278994", "0.53914005", "0.5381366", "0.53616077", "0.5353202", "0.5349034", "0.5345035", "0.53073835", "0.5297173", "0.52896994", "0.5284209", "0.52690077", "0.5246753", "0.5239394", "0.52309704", "0.52288425", "0.52169144", "0.5207556", "0.5165459", "0.51432616", "0.509858", "0.5080923" ]
0.7734785
0
Method to load the old_bot for evaluating the current bot.
def load_old_bot(self, prefix="model_data/old_bot/"): self.load_bot(prefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_against_old_bot(self, num_games,\n num_white_pieces = None, \n num_black_pieces = None,\n prefix=\"model_data/old_bot/\"):\n print('Evaluating against old bot')\n old_bot = ZeroBot(1)\n old_bot.load_old_bot(prefix)\n results = self.evaluate_against_bot(old_bot, num_games,\n num_white_pieces, \n num_black_pieces)\n self.evaluation_history_old.append(results)", "def save_as_old_bot(self, prefix=\"model_data/old_bot/\"):\n self.save_bot(prefix)", "def reload(bot, event, *args):\n bot.config.load()\n bot.memory.load()", "def loadChatbots(self):\n pathChatbots = os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) # ruta donde se guardarán los chatbots\n listAllChatbots = os.listdir(pathChatbots) # lista de chatbots en la ruta\n if len(listAllChatbots) == len(self.listNoChatbots): # si son iguales es que no hay más chatbots que los que están por defecto\n self.output.exec('No hay chatbots para cargar.')\n else:\n currentChatbotLoaded = False # variable para establecer que ya hay un chatbot actual\n for nameChatbot in listAllChatbots:\n if not nameChatbot in self.listNoChatbots:\n pathJson = os.path.join(os.path.sep, pathChatbots, nameChatbot,nameChatbot+'.json') # path del json del chatbot\n if os.path.isfile(pathJson):\n chatbot = CStructureChatBot() # objeto chatbot\n\n with open(pathJson, 'r', encoding='utf-8') as json_data:\n dictChatBot = json.load(json_data) # carga el json\n nameWithoutTranform = list(dictChatBot.keys())[0] # se obtiene el nombre del chatbot\n chatbot.setName(nameWithoutTranform)\n intents = dictChatBot[nameWithoutTranform] # guarda las intenciones del chatbot\n chatbot.codeToStructureChatbot(chatbot, intents) # convierte el json en un chatbot\n chatbot.nameTransformed = nameChatbot # guarda el nombre del chatbot sin caracteres especiales\n self.dictChatBots[nameWithoutTranform] = chatbot # se añade el chatbot como esté en el JSON\n\n\n if not currentChatbotLoaded :\n self.currentStructureChatBot =chatbot # se establece el primer chatbot como chatbot actual\n currentChatbotLoaded = True # se cambia el boleano\n self.output.exec('Ahora el chatbot actual es \"'+self.currentStructureChatBot.name+'\".')", "def changeStrunctureCurrentChatbot(self,sentence):\n existChatbotName = [value for key, value in self.dictChatBots.items() if\n sentence.lower() == str(key).lower()]\n if len(existChatbotName)==0:\n self.output.exec('No existe ese Chatbot.')\n else:\n if not self.currentStructureChatBot is None:\n self.output.exec('Se ha cambiado \"'+ self.currentStructureChatBot.name+ '\" por \"'+ existChatbotName[0].name+ '\".')\n else:\n self.output.exec('Ahora \"'+ sentence+ '\" es el actual Chatbot.')\n self.currentStructureChatBot = existChatbotName[0] # establece el nuevo chatbot", "def parse_bot(self) -> None:\n if not self.skip_bot_detection and not self.bot:\n self.bot = Bot(\n self.user_agent,\n self.ua_hash,\n self.ua_spaceless,\n self.VERSION_TRUNCATION,\n ).parse()\n self.all_details['bot'] = self.bot.ua_data", "def set_builder_bot(self, builder_bot): \n self.builder_bot = builder_bot # pragma: no cover", "def get_original_robot(self):\n if isinstance(self.robot, EquivRobot):\n return self.robot.get_original_robot()\n else:\n return self.robot", "def on_load(self, bot):\n self.bot = bot\n self.connection = bot.get_connection()\n self.plugin_manager = bot.get_plugin_manager()\n self.config = bot.get_config_manager()\n self.data_manager = bot.get_data_manager()", "def reload(bot, event, *args):\n\n yield from bot.coro_send_message(event.conv, \"<b>reloading config.json</b>\")\n bot.config.load()\n\n yield from bot.coro_send_message(event.conv, \"<b>reloading memory.json</b>\")\n bot.memory.load()", "def get_bot_data(self) -> BD:\n if self.bot_data:\n pass\n elif not self.single_file:\n filename = f\"{self.filename}_bot_data\"\n data = self._load_file(filename)\n if not data:\n data = self.context_types.bot_data()\n self.bot_data = data\n else:\n self._load_singlefile()\n return self.bot_data # type: ignore[return-value]", "def bot(self):\n return self._bot", "def load(self,previous=True):\n\n\t\tincoming = pickle.load(open(self.filename,'rb'))\n\t\t#---reconstitute things that were bootstrapped\n\t\t#---we do not load spots because e.g. paths might have changed slightly in paths.yaml\n\t\tself.post = incoming.post\n\t\tself.groups = incoming.groups\n\t\tself.slices = incoming.slices\n\t\tself.vars = incoming.vars\n\t\tself.meta = incoming.meta\n\t\tself.calc = incoming.calc\n\t\tself.toc = incoming.toc\n\n\t\t#---retain the incoming workspace for comparison\n\t\tif previous: self.previous = incoming", "def __init__(self, bot=BNBot):\n self.bot = bot", "def rehash(self):\n logging.info(\"Rehashing started\")\n modules = self.cmd_plugins.get_modules()\n CommandBot.pause(self)\n PlugBot.stop(self)\n\n logging.info(\"Reloading config file\")\n self.botconfig = self.load_config(self.config_file)\n for module in modules:\n reload(module)\n CommandBot.reset(self)\n\n PlugBot.start(self)\n CommandBot.resume(self)\n self.join_rooms()", "async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username", "async def tool_reload(self, ctx, *, cog: str):\n\n try:\n self.bot.unload_extension(cog)\n self.bot.load_extension(cog)\n except Exception as e:\n await zb.bot_errors(ctx,sp.format(e))\n else:\n await ctx.send('**`SUCCESS`**')", "def __init__(self, bot):\n self.bot = bot", "def __init__(self, bot):\n self.bot = bot", "def load_bot(self, prefix=\"model_data/\"):\n attributes = np.load(prefix + \"model_attributes.npy\",\n allow_pickle='TRUE').item()\n \n self.num_rounds = attributes[\"num_rounds\"]\n self.c = attributes[\"c\"]\n self.alpha = attributes[\"alpha\"]\n if \"loss_history\" in attributes:\n self.loss_history = attributes[\"loss_history\"]\n self.evaluation_history_old = attributes[\"evaluation_history_old\"]\n self.evaluation_history_ran = attributes[\"evaluation_history_ran\"]\n \n network_load_command = attributes[\"network_load_command\"]\n exec(network_load_command)\n self.network.load_network(prefix)", "def addbot(self, bot):\n\n if bot:\n for i in range(len(self.bots)-1, -1, -1):\n if self.bots[i].name == bot.name:\n del self.bots[i]\n self.bots.append(bot)", "async def _reload(self, ctx, *, module: str=None):\n if module is None or module == \"all\":\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n for extension in startup_extensions:\n self.bot.unload_extension(extension)\n self.bot.load_extension(extension)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')\n else:\n await ctx.message.add_reaction('\\N{HOURGLASS}')\n try:\n self.bot.unload_extension(module)\n self.bot.load_extension(module)\n except Exception as e:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{CROSS MARK}')\n await ctx.send('{}: {}'.format(type(e).__name__, e))\n traceback.print_exc()\n else:\n await ctx.message.remove_reaction('\\N{HOURGLASS}', ctx.me)\n await ctx.message.add_reaction('\\N{WHITE HEAVY CHECK MARK}')", "def load_real_robot_state(self):\n self.robotModel.setConfig(motion.robot.getKlamptSensedPosition())", "async def giveaway_reload(self, ctx):\n self._load_games()\n await ctx.send(\n f\"Reloaded list of games ({len(self.steam_keys)} games)\")", "def old(self, old):\n\n self._old = old", "def reload(self):\n\n\t\tif self.module is None:\n\t\t\t# Do nothing, as the module will be imported on attribute access.\n\t\t\tpass\n\t\telse:\n\t\t\texec \"reload(\" + self.name + \")\"\n\t\t\t# The module object is still identical, only its code has been\n\t\t\t# replaced. Thus no eval(self.name) is necessary.", "def force_load(self):\n pass", "async def reload(self, ctx, extension_name: str):\n if await ctx.bot.is_owner(ctx.message.author):\n unload = ctx.bot.get_command('unload')\n load = ctx.bot.get_command('load')\n await ctx.invoke(unload, extension_name=extension_name)\n await ctx.invoke(load, extension_name=extension_name)\n else:\n await ctx.send(dis())", "def LoadProgramState(self, restored_checkpoint_path=None, sess=None):\n pass", "def reload(self):\n self.read(self._cfg_path)" ]
[ "0.67853993", "0.63987565", "0.5985643", "0.54981875", "0.5478145", "0.53499985", "0.53470945", "0.5338737", "0.5289171", "0.5090192", "0.5078427", "0.5064622", "0.50458825", "0.50444275", "0.50434035", "0.49430028", "0.4917829", "0.49122745", "0.49122745", "0.4912221", "0.49091083", "0.48567393", "0.48434237", "0.48258126", "0.48097482", "0.47783247", "0.47734213", "0.47463354", "0.47230744", "0.4706276" ]
0.79424274
0
Initialize a BigBiGAN from the given TF Hub module.
def __init__(self, module_path='https://tfhub.dev/deepmind/bigbigan-resnet50/1', allow_growth=True): self._module = hub.Module(module_path) # encode graph self.enc_ph = self.make_encoder_ph() self.z_sample = self.encode_graph(self.enc_ph) self.z_mean = self.encode_graph(self.enc_ph, return_all_features=True)['z_mean'] # decode graph self.gen_ph = self.make_generator_ph() self.gen_samples = self.generate_graph(self.gen_ph, upsample=True) # session init = tf.global_variables_initializer() gpu_options = tf.GPUOptions(allow_growth=allow_growth) self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self.sess.run(init)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, BurnExpFireP, StartNNodes, ForwBurnProb, BackBurnProb, DecayProb, Take2AmbasPrb, OrphanPrb):\n _snap.TFfGGen_swiginit(self, _snap.new_TFfGGen(BurnExpFireP, StartNNodes, ForwBurnProb, BackBurnProb, DecayProb, Take2AmbasPrb, OrphanPrb))", "def create_tokenizer_from_hub_module(self):\n with tf.Graph().as_default():\n bert_module = hub.Module(self.params[\"BERT_MODEL_HUB\"])\n tokenization_info = bert_module(\n signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n return bert.tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)", "def initialize_module():\n global ngram_model\n ngram_model = load_model()", "def create_tokenizer_from_hub_module(bert_path):\n bert_module = hub.Module(bert_path)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n sess = tf.Session()\n vocab_file, do_lower_case = sess.run(\n [tokenization_info[\"vocab_file\"], tokenization_info[\"do_lower_case\"]]\n )\n sess.close()\n return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)", "def __init__(self):\r\n torch.nn.Module.__init__(self)\r\n # Convolution and pooling layers of VGG-16.\r\n self.features = torchvision.models.vgg19_bn(pretrained=False).features\r\n self.features = torch.nn.Sequential(*list(self.features.children())\r\n [:-1]) # Remove pool5.\r\n # Linear classifier.\r\n self.fc = torch.nn.Linear(512**2, 11)", "def dump_tfhub_to_hdf5(module_path, hdf5_path, redownload=False):\n if os.path.exists(hdf5_path) and (not redownload):\n print('Loading BigGAN hdf5 file from:', hdf5_path)\n return h5py.File(hdf5_path, 'r')\n\n print('Loading BigGAN module from:', module_path)\n tf.reset_default_graph()\n hub.Module(module_path)\n print('Loaded BigGAN module from:', module_path)\n\n initializer = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(initializer)\n\n print('Saving BigGAN weights to :', hdf5_path)\n h5f = h5py.File(hdf5_path, 'w')\n for var in tf.global_variables():\n val = sess.run(var)\n h5f.create_dataset(var.name, data=val)\n print(f'Saving {var.name} with shape {val.shape}')\n h5f.close()\n return h5py.File(hdf5_path, 'r')", "def get_badgr_setup(self):\n\n badgr = BadgrLite(token_filename=self.sample_token_file)\n badgr.load_token()\n return badgr", "def create_tokenizer_from_hub_module(bert_model_hub):\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_model_hub)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]])\n \n return bert.tokenization.FullTokenizer(\n vocab_file=vocab_file, do_lower_case=do_lower_case)", "def __init__(self, grid, grid_bnds, **kwargs):\n super().__init__(dynamic=True, **kwargs)\n self.grid = tf.Variable(initial_value=grid, trainable=True)\n self.grid_bnds = grid_bnds", "def create_tokenizer_from_hub_module(bert_model_hub):\n with tf.Graph().as_default():\n bert_module = hub.Module(bert_model_hub)\n tokenization_info = bert_module(signature=\"tokenization_info\",\n as_dict=True)\n with tf.Session() as sess:\n vocab_file, do_lower_case = sess.run([\n tokenization_info[\"vocab_file\"],\n tokenization_info[\"do_lower_case\"]\n ])\n\n return bert.tokenization.FullTokenizer(vocab_file=vocab_file,\n do_lower_case=do_lower_case)", "def create_tokenizer_from_hub_module(bert_path, sess):\n bert_module = hub.Module(bert_path)\n tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n vocab_file, do_lower_case = tf.print(\n [tokenization_info[\"vocab_file\"], tokenization_info[\"do_lower_case\"]]\n )\n\n return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)", "def init_efficientnet(num_classes: int) -> nn.Module:\n\n return EfficientNet.from_pretrained('efficientnet-b1', num_classes=num_classes)", "def __init__(self, weight_path: str = None):\n self.model = BBBSeg()\n if weight_path is not None:\n self.model.load_weights(weight_path)", "def init_bn(bn):\n \n bn.bias.data.fill_(0.)\n bn.running_mean.data.fill_(0.)\n bn.weight.data.fill_(1.)\n bn.running_var.data.fill_(1.)", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def __init__(self, mb_info, gr_pin):\n if gr_pin not in [PMOD_GROVE_G1,\n PMOD_GROVE_G2,\n PMOD_GROVE_G3,\n PMOD_GROVE_G4]:\n raise ValueError(\"Group number can only be G1 - G4.\")\n\n self.microblaze = Pmod(mb_info, PMOD_GROVE_EAR_HR_PROGRAM)\n self.microblaze.write_mailbox(0, gr_pin[0])\n self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH)", "def initialize(self):\n logging.info(\"Loading model.\")\n\n self._bleurt_graph = tf.Graph()\n with self._bleurt_graph.as_default():\n\n imported = tf.saved_model.load(self.checkpoint)\n bleurt_model_ops = imported.signatures[\"serving_default\"]\n self._bleurt_ops = bleurt_model_ops(\n input_ids=tf.compat.v1.placeholder(tf.int64, name=\"input_ids\"),\n input_mask=tf.compat.v1.placeholder(tf.int64, name=\"input_mask\"),\n segment_ids=tf.compat.v1.placeholder(tf.int64, name=\"segment_ids\"))\n\n init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.tables_initializer())\n\n self.session = tf.compat.v1.Session(graph=self._bleurt_graph)\n self.session.run(init_op)\n\n logging.info(\"Done.\")", "def __init__(self, backbone_name, config):\n\n backbone_config = Schema(\n {\n Required(\"input_shape\"): Schema((int, int, int)),\n Required(\"include_top\"): bool,\n Required(\"weights\"): str,\n Optional(\"alpha\"): float,\n }\n )\n\n config = backbone_config(config)\n\n if backbone_name == \"MobileNetV2\":\n self.model = tf.keras.applications.MobileNetV2(**config)\n elif backbone_name == \"ResNet50\":\n self.model = tf.keras.applications.ResNet50(**config)\n elif backbone_name == \"InceptionV3\":\n self.model = tf.keras.applications.InceptionV3(**config)\n\n # Remove Layers until Conv4\n for i, layer in enumerate(reversed(self.model.layers)):\n if backbone_name == \"ResNet50\" and layer._name == \"conv4_block6_out\":\n break\n elif (\n backbone_name == \"MobileNetV2\" and layer._name == \"block_13_expand_relu\"\n ):\n break\n else:\n self.model._layers.pop()\n\n self.model.layers[-1]._name = \"feature_map\"\n\n self.model = Model(\n self.model.input, self.model.layers[-1].output, name=\"Backbone\"\n )", "def vgg16_bn(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGGBase(make_layers(), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))\n return model", "def __init__(self, b=0, drop_rate=0.5, pretrained=True):\n\n # Calls the super for the nn.Module.\n super(Classifier, self).__init__()\n\n # Sets the drop rate for the dropout layers.\n self.drop_rate = drop_rate\n\n # Loads the EfficientNet encoder.\n if pretrained:\n self.encoder = EfficientNet.from_pretrained(f\"efficientnet-b{str(b)}\")\n else:\n self.encoder = EfficientNet.from_name(f\"efficientnet-b{str(b)}\")\n self.encoder_pool = nn.AdaptiveAvgPool2d(1)\n\n # Defines a hidden layer.\n self.hidden = nn.Linear(2560, 512)\n\n # Defines the output layer of the neural network.\n self.classifier = nn.Linear(512, 2)", "def create_model(bert_model_hub, is_predicting, input_ids, input_mask,\n segment_ids, labels, num_labels):\n\n bert_module = hub.Module(bert_model_hub, trainable=True)\n bert_inputs = dict(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\"output_bias\", [num_labels],\n initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n predicted_labels = tf.squeeze(\n tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predicted_labels, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, predicted_labels, log_probs)", "def backbone_generator(params):\n if params.architecture.backbone == 'resnet':\n resnet_params = params.resnet\n backbone_fn = resnet.Resnet(\n resnet_depth=resnet_params.resnet_depth,\n dropblock=dropblock_generator(params.dropblock),\n activation=params.batch_norm_activation.activation,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation),\n init_drop_connect_rate=resnet_params.init_drop_connect_rate,\n space_to_depth_block_size=params.architecture.space_to_depth_block_size)\n elif params.architecture.backbone == 'spinenet':\n spinenet_params = params.spinenet\n backbone_fn = spinenet.spinenet_builder(\n model_id=spinenet_params.model_id,\n min_level=params.architecture.min_level,\n max_level=params.architecture.max_level,\n use_native_resize_op=spinenet_params.use_native_resize_op,\n activation=params.batch_norm_activation.activation,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation),\n init_drop_connect_rate=spinenet_params.init_drop_connect_rate)\n elif params.architecture.backbone == 'spinenet_mbconv':\n spinenet_mbconv_params = params.spinenet_mbconv\n backbone_fn = spinenet_mbconv.spinenet_mbconv_builder(\n model_id=spinenet_mbconv_params.model_id,\n min_level=params.architecture.min_level,\n max_level=params.architecture.max_level,\n use_native_resize_op=spinenet_mbconv_params.use_native_resize_op,\n se_ratio=spinenet_mbconv_params.se_ratio,\n activation=params.batch_norm_activation.activation,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation),\n init_drop_connect_rate=spinenet_mbconv_params.init_drop_connect_rate)\n elif 'efficientnet' in params.architecture.backbone:\n backbone_fn = efficientnet.Efficientnet(params.architecture.backbone)\n else:\n raise ValueError(\n 'Backbone model %s is not supported.' % params.architecture.backbone)\n\n return backbone_fn", "def __init__(self, bias_config, headers=None, label=None):\n self.analysis_config = bias_config.get_config()\n if headers is not None:\n self.analysis_config[\"headers\"] = headers\n if label is not None:\n self.analysis_config[\"label\"] = label", "def __init__(self, in_channels, BN, bn_eps=1e-5):\n super(InceptionE, self).__init__()\n self.branch1x1 = omth_blocks.conv_block(in_channels, filters=[320], kernel_sizes=[1], stride=[1],\n padding=[0], batch_norm=BN)\n\n self.branch3x3_1 = omth_blocks.conv_block(in_channels, filters=[384], kernel_sizes=[1], stride=[1],\n padding=[0], batch_norm=BN)\n self.branch3x3_2 = self.sub_inception_module(BN)\n\n self.branch3x3dbl_1 = omth_blocks.conv_block(in_channels, filters=[384, 384], kernel_sizes=[1, 3], stride=[1, 1],\n padding=[0, 1], batch_norm=BN)\n self.branch3x3dbl_2 = self.sub_inception_module(BN)\n\n self.branch_pool = omth_blocks.conv_block(in_channels, filters=[192], kernel_sizes=[1], stride=[1],\n padding=[0], batch_norm=BN)", "def _import_bh_(self):", "def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)", "def __init__(self, backboneNet, projection_head) -> None:\n super(SimCLR, self).__init__()\n self.Net = backboneNet\n self.projection_head = projection_head", "def __init__(self, **kwargs: dict) -> None:\n super(AnimeGAN_v2, self).__init__()\n self.model_name: str = 'animeGAN_v2'\n self.model_version: str = '1.0.0'\n \n self.pretrained_model_path: str = kwargs['pretrained_model_path']\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n torch.set_grad_enabled(False)\n\n self.model = Generator().eval().to(self.device)\n ckpt = torch.load(self.pretrained_model_path, map_location=self.device)\n self.model.load_state_dict(ckpt)", "def birealnet34(pretrained=False, **kwargs):\n model = BiRealNet(BasicBlock, [6, 8, 12, 6], **kwargs)\n return model", "def __init__(\n self,\n feature_map: FeatureMap,\n weight: Union[Parameter, Tensor],\n bias_module: Optional[Module] = None,\n input_transform: Optional[TInputTransform] = None,\n output_transform: Optional[TOutputTransform] = None,\n ):\n super().__init__()\n self.feature_map = feature_map\n if not isinstance(weight, Parameter):\n self.register_buffer(\"weight\", weight)\n self.weight = weight\n self.bias_module = bias_module\n self.input_transform = input_transform\n self.output_transform = output_transform" ]
[ "0.5314377", "0.52984065", "0.52972853", "0.5297207", "0.5283063", "0.52782154", "0.5270465", "0.5240721", "0.51944226", "0.51933235", "0.51820195", "0.51775867", "0.516507", "0.51644117", "0.51597863", "0.51499027", "0.5143408", "0.5113054", "0.51060724", "0.50795203", "0.50728816", "0.5050153", "0.5047092", "0.5038773", "0.5038377", "0.5037714", "0.502425", "0.5013792", "0.5011967", "0.5001376" ]
0.6927771
0
Creates a tf.placeholder with the dtype & shape of generator inputs.
def make_generator_ph(self): info = self._module.get_input_info_dict('generate')['z'] return tf.placeholder(dtype=info.dtype, shape=info.get_shape())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_placeholders(self):\n # \"None\" means the batches may have a variable batch size and length.\n self.x = tf.placeholder(tf.int64, shape=[None, None])", "def placeholder_input():\r\n source_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1, S_ENGLISH, T_ENGLISH), name='source')\r\n target_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1, S_FRENCH, T_FRENCH), name='target')\r\n training_placeholder = tf.placeholder(tf.bool, shape=[], name='training')\r\n return source_placeholder, target_placeholder, training_placeholder", "def _build_placeholder(self):\n self.x = tf.placeholder(tf.float32, (None, self.input_size))\n self.y = tf.placeholder(tf.float32, [None, self.nb_targets])\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n self.learning_rate_placholder = tf.placeholder(dtype=tf.float32)\n self.optimizer = self.optimizer(learning_rate=self.learning_rate_placholder)", "def __create_placeholders(self):\n with tf.name_scope(\"data\"):\n self.x = tf.placeholder(tf.float32, [self.batch_size, self.input_dim], 'x')", "def _create_placeholders(self, n_features, n_classes):\n\n self.input_data = tf.placeholder(\"float\", [None, n_features], name='x-input')\n self.input_labels = tf.placeholder(\"float\", [None, n_classes], name='y-input')", "def make_placeholders(data):\r\n\tdata_shape = data.feature_shape()\r\n\twith tf.name_scope(\"input\"):\r\n\t\tinput = tf.placeholder(tf.float32, [None, *data_shape], name=\"image\")\r\n\t\tlabel = tf.placeholder(tf.int32, [None], name=\"label\")\r\n\t\tval_unit = tf.constant(1.0, tf.float32)\r\n\t\trate_retain = tf.placeholder_with_default(val_unit, val_unit.shape, name=\"rate_retain\")\r\n\t\tval_false = tf.constant(False, tf.bool)\r\n\t\tnorm_switch = tf.placeholder_with_default(val_false, val_false.shape, name=\"norm_switch\")\r\n\treturn input, label, rate_retain, norm_switch", "def __init__(self, shape, dtype=tf.float32, name=None):\n super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))", "def placeholder(self):\n shape = (None,) + self.shape\n return tf.placeholder(tf.float32, shape=shape)", "def placeholder_inputs(sample_size, dim_input):\n\tinput_placeholder = tf.placeholder(tf.float32, [1, dim_input])\n\tobj_placeholder = tf.placeholder(tf.float32, [sample_size, dim_input])\n\n\treturn input_placeholder, obj_placeholder", "def create_placeholders(self):\n # Create Placeholders of shape (n_x, n_y)\n X = tf.placeholder(tf.float32, shape=(None, self.in_seq_length), name=\"X\")\n Y = tf.placeholder(tf.float32, shape=(None, self.out_seq_length), name=\"Y\")\n # Indicator to indicate when training or testing\n is_training = tf.placeholder(tf.bool, shape=(), name='is_training')\n return X, Y, is_training", "def create_placeholder(n_x, n_y):\n X = tf.placeholder(tf.float32,shape=(n_x,None))\n Y = tf.placeholder(tf.float32, shape=(n_y,None))\n return X, Y", "def add_placeholders(self):\n ### YOUR CODE HERE\n #print self.config.num_steps\n self.input_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.num_steps))\n self.labels_placeholder = tf.placeholder(tf.int32, shape=[None, self.config.num_steps])\n self.dropout_placeholder = tf.placeholder(tf.float32)\n ### END YOUR CODE", "def create_placeholders(nx, classes):\n x = tf.placeholder('float', [None, nx], name='x')\n y = tf.placeholder('float', [None, classes], name='y')\n\n return x, y", "def _create_placeholders(self):\n\n\t\twith tf.name_scope(\"input_data\"):\n\t\t\tself.input_words=tf.placeholder(shape=(None,self.look_back), dtype=tf.int32,name='input_tokens')\n\t\twith tf.name_scope(\"output_data\"):\t\n\t\t\tself.output_words=tf.placeholder(shape=(None,self.look_back),dtype=tf.int32,name='output_tokens')", "def create_placeholders(nx, classes):\n x1 = tf.placeholder(\"float\", (None, nx), name=\"x\")\n y1 = tf.placeholder(\"float\", (None, classes), name=\"y\")\n\n return x1, y1", "def placeholder_inputs(batch_size, time_step, column_number):\n # Note that the shapes of the placeholders match the shapes of the full\n # unit and label tensors, except the first dimension is now batch_size\n # rather than the full size of the train or test data sets.\n units_placeholder = tf.placeholder(tf.float32, [batch_size, time_step, column_number])\n labels_placeholder = tf.placeholder(tf.float32, [batch_size])\n return units_placeholder, labels_placeholder", "def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )", "def add_placeholders(self):\n ### YOUR CODE HERE\n self.input_placeholder=tf.placeholder(dtype=tf.float32,shape=(None, Config.n_features),name='input_placeholder')\n self.labels_placeholder=tf.placeholder(dtype=tf.int32,shape=(None, Config.n_classes), name='labels_placeholder')\n ### END YOUR CODE", "def create_placeholder(tensor, dtype=None):\r\n if isinstance(tensor, np.ndarray):\r\n if dtype is None:\r\n if tensor.dtype in {np.float32, np.float64, np.float16, np.float}:\r\n placeholder = tf.placeholder(tf.float32, tensor.shape)\r\n elif tensor.dtype in {np.int, np.int32, np.int64}:\r\n placeholder = tf.placeholder(tf.int32, tensor.shape)\r\n else:\r\n raise NotImplementedError('The dtype {} is not implemented.'.format(tensor.dtype))\r\n else:\r\n placeholder = tf.placeholder(dtype, tensor.shape)\r\n elif isinstance(tensor, tf.Tensor):\r\n raise TypeError('The input to placeholder cannot be tf.Tensor.')\r\n elif isinstance(tensor, (list, tuple)):\r\n if isinstance(dtype, (list, tuple)):\r\n placeholder = tuple([create_placeholder(\r\n single_tensor, single_dtype) for single_tensor, single_dtype in zip(tensor, dtype)])\r\n else:\r\n placeholder = tuple([create_placeholder(\r\n single_tensor, dtype) for single_tensor in tensor])\r\n else:\r\n raise NotImplementedError(\r\n 'Placeholder can only be created for numpy array, tf.Tensor, list or tuple')\r\n\r\n return placeholder", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(\n tf.float32,\n (None, self.max_seq_len, self.embedding_size),\n \"input\"\n )\n self.batch_seq_length_placeholder = tf.placeholder(tf.int32, (None, ),\n \"batch_seq_length\")\n self.batch_unique_count_placeholder = tf.placeholder(tf.float32, (None, ),\n \"batch_unique_count\")\n self.labels_placeholder = tf.placeholder(tf.int32, (None, ), \"labels\")\n\n self.dropout_placeholder = tf.placeholder(tf.float32, (), \"dropout\")\n self.lr_placeholder = tf.placeholder(tf.float32, (), \"dropout\")", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(\n tf.float32,\n (None, self.max_seq_len, self.embedding_size),\n \"input\"\n )\n self.batch_seq_length_placeholder = tf.placeholder(tf.int32, (None, ),\n \"batch_seq_length\")\n self.batch_unique_count_placeholder = tf.placeholder(tf.int32, (None, ),\n \"batch_unique_count\")\n self.labels_placeholder = tf.placeholder(tf.int32, (None, ), \"labels\")\n\n self.dropout_placeholder = tf.placeholder(tf.float32, (), \"dropout\")\n self.lr_placeholder = tf.placeholder(tf.float32, (), \"lr\")", "def placeholder_inputs(batch_size):\n # Note that the shapes of the placeholders match the shapes of the full\n # image and label tensors, except the first dimension is now batch_size\n # rather than the full size of the train or test data sets.\n images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,\n c3d_model.NUM_FRAMES_PER_CLIP,\n c3d_model.CROP_SIZE,\n c3d_model.CROP_SIZE,\n c3d_model.CHANNELS))\n labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))\n return images_placeholder, labels_placeholder", "def build_input(self):\n n_input = tf.placeholder(tf.int32, [None, None], name='n_input')\n t_input = tf.placeholder(tf.int32, [None, None], name='t_input')\n n_target = tf.placeholder(tf.int32, [None, None], name='n_target')\n t_target = tf.placeholder(tf.int32, [None, None], name='t_target')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n return n_input, t_input, n_target, t_target, keep_prob", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(tf.int32, shape = (self.config.batch_size, None, self.config.n_features))\n self.labels_placeholder = tf.placeholder(tf.int32, shape = (self.config.batch_size, None))\n self.mask_placeholder = tf.placeholder(tf.bool, shape = (self.config.batch_size, None))\n self.dropout_placeholder = tf.placeholder(tf.float32, shape = ())\n self.attribute_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.n_attributes))\n self.init_state_placeholder = tf.placeholder(tf.float32, [2, 2, None, self.config.hidden_size])", "def get_placeholders(batch_size, sequence_length, num_features):\n inputs = tf.placeholder(tf.float32, name='all_inputs',\n shape=[sequence_length,\n batch_size,\n num_features])\n targets = tf.placeholder(tf.float32, name='all_targets',\n shape=[sequence_length,\n batch_size,\n num_features])\n\n return tf.unpack(inputs), tf.unpack(targets)", "def placeholder_inputs(batch_size):\n images_ph = tf.placeholder(tf.float32, shape=(batch_size, FLAGS.img_s, FLAGS.img_s, 3), name='images_placeholder') \n labels_ph = tf.placeholder(tf.float32, shape=(batch_size, FLAGS.n_classes), name='labels_placeholder')\n keep_prob_ph = tf.placeholder(tf.float32, shape=(), name='keep_prob_placeholder')\n\n return images_ph, labels_ph, keep_prob_ph", "def add_placeholders(self):\n ### YOUR CODE HERE (~4-6 lines)\n self.input_placeholder = tf.placeholder(tf.int32, [None, self.max_length, Config.n_features])\n self.labels_placeholder = tf.placeholder(tf.int32, [None, self.max_length])\n self.length_placeholder = tf.placeholder(tf.int32, [None])\n self.mask_placeholder = tf.placeholder(tf.bool, [None, self.max_length])\n self.dropout_placeholder = tf.placeholder(tf.float32)\n ### END YOUR CODE", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(\n tf.float32, [None, self.config.seq_len, 1])\n self.target_placeholder = tf.placeholder(tf.float32, [None, 1])\n self.dropout_placeholder = tf.placeholder(tf.float32)\n self.Hin_placeholder = tf.placeholder(\n tf.float32, [None, self.config.state_size * self.config.num_layers])", "def _add_placeholders(self):\n\n # 1 = Batch Size\n self.input_placeholder = tf.placeholder( # Batch, Max_length\n tf.float32, shape=[None, self.dataset.max_input_size,\n self.config.word_vector_length], name=\"context\")\n self.input_length_placeholder = tf.placeholder(\n tf.int32, shape=[None], name=\"context_length\")\n self.end_of_sentences_placeholder = tf.placeholder(\n tf.int32, shape=[None, self.dataset.max_input_size], name=\"context_mask\")\n\n self.question_placeholder = tf.placeholder(\n tf.float32, shape=[None, self.dataset.max_question_size,\n self.config.word_vector_length], name=\"question\")\n self.question_length_placeholder = tf.placeholder(\n tf.int32, shape=[None], name=\"question_question\")\n\n self.labels_placeholder = tf.placeholder(\n tf.float32, shape=[None, self.dataset.vocab_size],\n name=\"answer\")\n #self.gate_placeholder = tf.placeholder(tf.float32, shape=[self.config.batch_size])", "def regressor_placeholders(input_size=30, output_size=2, num_step=9):\r\n ffs = tf.placeholder(tf.float32, shape=[num_step, None, input_size], name='flow_fields')\r\n labels = tf.placeholder(tf.float32, shape=[None,output_size], name='labels')\r\n return ffs, labels" ]
[ "0.73510855", "0.6947808", "0.68811524", "0.6843197", "0.6786487", "0.6739757", "0.67395514", "0.6737801", "0.6683461", "0.65602267", "0.6556576", "0.65447986", "0.65442115", "0.654158", "0.6479429", "0.64442724", "0.6391434", "0.63816166", "0.63666093", "0.6313228", "0.6293027", "0.629108", "0.62812567", "0.62691474", "0.62656325", "0.62644976", "0.62564075", "0.6240706", "0.62390083", "0.6232324" ]
0.7611539
0
Creates a tf.placeholder with the dtype & shape of encoder inputs.
def make_encoder_ph(self): info = self._module.get_input_info_dict('encode')['x'] return tf.placeholder(dtype=info.dtype, shape=info.get_shape())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_placeholders(self):\n\n\n # encoder part\n self._enc_batch = tf.placeholder(tf.int32, [config.batch_size, None], name='enc_batch')\n self._enc_lens = tf.placeholder(tf.int32, [config.batch_size], name='enc_lens')\n self._enc_padding_mask = tf.placeholder(tf.float32, [config.batch_size, None], name='enc_padding_mask')\n\n\n # decoder part\n self._dec_batch = tf.placeholder(tf.int32, [config.batch_size, None], name='dec_batch')\n self._dec_lens = tf.placeholder(tf.int32, [config.batch_size], name='dec_lens')\n self._dec_padding_mask = tf.placeholder(tf.float32, [config.batch_size, None], name='dec_padding_mask')\n\n #targets\n self._targets = tf.placeholder(tf.float32, [config.batch_size, 2], name='target')", "def _build_placeholder(self):\n self.x = tf.placeholder(tf.float32, (None, self.input_size))\n self.y = tf.placeholder(tf.float32, [None, self.nb_targets])\n self.is_training = tf.placeholder(tf.bool, name=\"is_training\")\n self.learning_rate_placholder = tf.placeholder(dtype=tf.float32)\n self.optimizer = self.optimizer(learning_rate=self.learning_rate_placholder)", "def _create_placeholders(self):\n # \"None\" means the batches may have a variable batch size and length.\n self.x = tf.placeholder(tf.int64, shape=[None, None])", "def _add_placeholders(self):\n hps = self._hps\n\n # encoder part\n self._enc_batch = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch')\n self._enc_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='enc_padding_mask')\n if self._hps.hier:\n self._enc_batch_sections = tf.placeholder(tf.int32, [hps.batch_size, hps.num_sections, None], name='enc_batch_sections')\n self._doc_sec_lens = tf.placeholder(tf.int32, [hps.batch_size]) # length of doc in num sections\n self._batch_sections_len = tf.placeholder(tf.int32, [hps.batch_size, hps.num_sections])\n self._enc_section_padding_mask = tf.placeholder(tf.int32, [hps.batch_size, hps.num_sections, None], name='enc_section_padding_mask')\n self._enc_lens = tf.placeholder(tf.int32, [hps.batch_size], name='enc_lens')\n if FLAGS.pointer_gen:\n self._enc_batch_extend_vocab = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch_extend_vocab')\n self._max_art_oovs = tf.placeholder(tf.int32, [], name='max_art_oovs')\n\n # decoder part\n self._dec_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='dec_batch')\n self._target_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='target_batch')\n self._dec_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='padding_mask')\n\n if hps.mode==\"decode\" and hps.coverage:\n self.prev_coverage = tf.placeholder(tf.float32, [hps.batch_size, None], name='prev_coverage')", "def autoencoder(dimensions=[784, 512, 256, 64]):\n # %% input to the network\n x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')\n current_input = x\n\n # %% Build the encoder\n encoder = []\n for layer_i, n_output in enumerate(dimensions[1:]):\n n_input = int(current_input.get_shape()[1])\n W = tf.Variable(\n tf.random_uniform([n_input, n_output],\n -1.0 / math.sqrt(n_input),\n 1.0 / math.sqrt(n_input)))\n b = tf.Variable(tf.zeros([n_output]))\n encoder.append(W)\n output = tf.nn.tanh(tf.matmul(current_input, W) + b)\n current_input = output\n\n # Latent representation (embedding, neural coding)\n z = current_input\n encoder.reverse()\n\n # Build the decoder using the same weights\n for layer_i, n_output in enumerate(dimensions[:-1][::-1]):\n W = tf.transpose(encoder[layer_i])\n b = tf.Variable(tf.zeros([n_output]))\n output = tf.nn.tanh(tf.matmul(current_input, W) + b)\n current_input = output\n\n # Now have the reconstruction through the network\n y = current_input\n\n # Cost function measures pixel-wise difference\n cost = tf.reduce_sum(tf.square(y - x))\n return {'x': x, 'z': z, 'y': y, 'cost': cost}", "def _build_encoder(self, hparams, is_training):\n with tf.variable_scope(\"trajectory_encoder\"):\n with tf.name_scope(\"source_placeholder\"):\n input_phs = list_ops.list_placeholder(self.num_gpu, (None, self.input_length, self.input_dims), tf.float32)\n for ph in input_phs:\n tf.add_to_collection('placeholder', ph)\n \n if hparams.encoder_type == \"rnn\":\n net = input_phs\n with tf.variable_scope(\"projection\"):\n net = self._build_input_projection(hparams, net, is_training)\n\n with tf.name_scope(\"batch_time_transpose\"):\n net = list_ops.list_transpose(net, perm=[1, 0, 2])\n\n with tf.variable_scope(\"rnn\"):\n net, state = self._build_rnn_encoder(hparams, net, is_training)\n\n if hparams.relu_reconfiguration:\n with tf.variable_scope(\"reconfiguration\"):\n net = list_ops.list_dense_with_bn(net,\n hparams.cnn_input_projector_filters[-1],\n is_training,\n self.bn_decay,\n seed=self.random_seed)\n\n elif hparams.encoder_type == \"cnn\":\n net = self._build_cnn_encoder(hparams, input_phs, is_training)\n state = None\n \n else:\n raise ValueError(\"Unknown encoder type {:s}.\".format(hparams.encoder_type))\n\n return net, state", "def placeholder_input():\r\n source_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1, S_ENGLISH, T_ENGLISH), name='source')\r\n target_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1, S_FRENCH, T_FRENCH), name='target')\r\n training_placeholder = tf.placeholder(tf.bool, shape=[], name='training')\r\n return source_placeholder, target_placeholder, training_placeholder", "def add_placeholders(self):\n ### YOUR CODE HERE\n self.input_placeholder=tf.placeholder(dtype=tf.float32,shape=(None, Config.n_features),name='input_placeholder')\n self.labels_placeholder=tf.placeholder(dtype=tf.int32,shape=(None, Config.n_classes), name='labels_placeholder')\n ### END YOUR CODE", "def __create_placeholders(self):\n with tf.name_scope(\"data\"):\n self.x = tf.placeholder(tf.float32, [self.batch_size, self.input_dim], 'x')", "def add_placeholders(self):\n ### YOUR CODE HERE\n #print self.config.num_steps\n self.input_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.num_steps))\n self.labels_placeholder = tf.placeholder(tf.int32, shape=[None, self.config.num_steps])\n self.dropout_placeholder = tf.placeholder(tf.float32)\n ### END YOUR CODE", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(\n tf.float32,\n (None, self.max_seq_len, self.embedding_size),\n \"input\"\n )\n self.batch_seq_length_placeholder = tf.placeholder(tf.int32, (None, ),\n \"batch_seq_length\")\n self.batch_unique_count_placeholder = tf.placeholder(tf.float32, (None, ),\n \"batch_unique_count\")\n self.labels_placeholder = tf.placeholder(tf.int32, (None, ), \"labels\")\n\n self.dropout_placeholder = tf.placeholder(tf.float32, (), \"dropout\")\n self.lr_placeholder = tf.placeholder(tf.float32, (), \"dropout\")", "def _create_placeholders(self, n_features, n_classes):\n\n self.input_data = tf.placeholder(\"float\", [None, n_features], name='x-input')\n self.input_labels = tf.placeholder(\"float\", [None, n_classes], name='y-input')", "def __init__(self, shape, dtype=tf.float32, name=None):\n super().__init__(tf.placeholder(dtype, [None] + list(shape), name=name))", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(\n tf.float32,\n (None, self.max_seq_len, self.embedding_size),\n \"input\"\n )\n self.batch_seq_length_placeholder = tf.placeholder(tf.int32, (None, ),\n \"batch_seq_length\")\n self.batch_unique_count_placeholder = tf.placeholder(tf.int32, (None, ),\n \"batch_unique_count\")\n self.labels_placeholder = tf.placeholder(tf.int32, (None, ), \"labels\")\n\n self.dropout_placeholder = tf.placeholder(tf.float32, (), \"dropout\")\n self.lr_placeholder = tf.placeholder(tf.float32, (), \"lr\")", "def construct(input_placeholder):\n\t\t###############################\n\t\t# MODEL ARCHITECTURE #\n\t\t###############################\n\t\t# First block of convolutions\n\t\twith tf.variable_scope(\"conv_1\"):\n\t\t\tconv_1_1 = conv2d(input_placeholder,\n\t\t\t\tinput_channels=1,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_1_2 = conv2d(conv_1_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_1 = conv_1_2\n\n\t\t# Second block of convolutions.\n\t\twith tf.variable_scope(\"conv2\"):\n\t\t\tconv_2_1 = conv2d(bn_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_2_2 = conv2d(conv_2_1,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\n\t\t\t# TODO batchn\n\t\t\tbn_2 = conv_2_2\n\n\t\twith tf.variable_scope(\"conv3\"):\n\t\t\tconv_3_1 = conv2d(bn_2,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_2 = conv2d(conv_3_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_3 = conv2d(conv_3_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_3 = conv_3_3\n\n\n\t\t# DILATED LAYERS:\n\t\twith tf.variable_scope(\"conv4\"):\n\t\t\tconv_4_1 = conv2d(bn_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_2 = conv2d(conv_4_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_3 = conv2d(conv_4_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_4 = conv_4_3\n\n\t\twith tf.variable_scope(\"conv5\"):\n\t\t\tconv_5_1 = conv2d(bn_4,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_2 = conv2d(conv_5_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_3 = conv2d(conv_5_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_5 = conv_5_3\n\n\t\twith tf.variable_scope(\"conv6\"):\n\t\t\tconv_6_1 = conv2d(bn_5,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_2 = conv2d(conv_6_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_3 = conv2d(conv_6_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_6 = conv_6_3\n\n\n\t\twith tf.variable_scope(\"conv7\"):\n\t\t\tconv_7_1 = conv2d(bn_6,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_2 = conv2d(conv_7_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_3 = conv2d(conv_7_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_7 = conv_7_3\n\n\n\t\twith tf.variable_scope(\"conv8\"):\n\t\t\tconv_8_1 = deconv2d(bn_7,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_size=[None, 64, 64, 256],\n\t\t\t\tkernel_size=4,\n\t\t\t\tstride=2,\n\t\t\t\tpad=1)\n\t\t\tconv_8_2 = conv2d(conv_8_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_8_3 = conv2d(conv_8_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\t\t\tconv_8_313 = conv2d(conv_8_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=313,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\n\n\t\treturn conv_8_313", "def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label", "def make_placeholders(data):\r\n\tdata_shape = data.feature_shape()\r\n\twith tf.name_scope(\"input\"):\r\n\t\tinput = tf.placeholder(tf.float32, [None, *data_shape], name=\"image\")\r\n\t\tlabel = tf.placeholder(tf.int32, [None], name=\"label\")\r\n\t\tval_unit = tf.constant(1.0, tf.float32)\r\n\t\trate_retain = tf.placeholder_with_default(val_unit, val_unit.shape, name=\"rate_retain\")\r\n\t\tval_false = tf.constant(False, tf.bool)\r\n\t\tnorm_switch = tf.placeholder_with_default(val_false, val_false.shape, name=\"norm_switch\")\r\n\treturn input, label, rate_retain, norm_switch", "def _add_input_encoder(self, inputs, seq_len):\n with tf.variable_scope(\"encoder\"):\n cell_fw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(config.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n ((fw_states, bw_states), (final_fw, final_bw)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n\n return fw_states, bw_states, final_fw, final_bw", "def build_encoder(self):\n with tf.variable_scope(\"encoder\") as scope:\n length1 = tf.to_int32(tf.reduce_sum(self.encode_mask1, 1), name=\"length1\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n thought_vectors1 = tf.concat(states, 1, name=\"thought_vectors1\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb1,\n sequence_length=length1,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors1 = tf.identity(state, name=\"thought_vectors1\")\n \n scope.reuse_variables()\n\n length2 = tf.to_int32(tf.reduce_sum(self.encode_mask2, 1), name=\"length2\")\n\n if self.config.bidirectional_encoder:\n if self.config.encoder_dim % 2:\n raise ValueError(\n \"encoder_dim must be even when using a bidirectional encoder.\")\n num_units = self.config.encoder_dim // 2\n cell_fw = self._initialize_gru_cell(num_units) # Forward encoder\n cell_bw = self._initialize_gru_cell(num_units) # Backward encoder\n _, states = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n thought_vectors2 = tf.concat(states, 1, name=\"thought_vectors2\")\n else:\n cell = self._initialize_gru_cell(self.config.encoder_dim)\n _, state = tf.nn.dynamic_rnn(\n cell=cell,\n inputs=self.encode_emb2,\n sequence_length=length2,\n dtype=tf.float32,\n scope=scope)\n # Use an identity operation to name the Tensor in the Graph.\n thought_vectors2 = tf.identity(state, name=\"thought_vectors2\")\n\n self.thought_vectors1 = thought_vectors1\n self.thought_vectors2 = thought_vectors2", "def _create_placeholders(self):\n\n\t\twith tf.name_scope(\"input_data\"):\n\t\t\tself.input_words=tf.placeholder(shape=(None,self.look_back), dtype=tf.int32,name='input_tokens')\n\t\twith tf.name_scope(\"output_data\"):\t\n\t\t\tself.output_words=tf.placeholder(shape=(None,self.look_back),dtype=tf.int32,name='output_tokens')", "def _build_encoder(self, hparams):\n\t\tnum_layers = self.num_encoder_layers\n\t\tnum_redisual_layers = self.num_encoder_residual_layers\n\n\t\twith tf.variable_scope('encoder') as _:\n\t\t\tself.encoder_emb_inp = tf.nn.embedding_lookup(self.embedding_encoder, self.encoder_input_data)\n\n\t\t\tif hparams.encoder_type == 'uni':\n\t\t\t\t_info('num_layers = {} num_residual_layers = {}'.format(num_layers, num_redisual_layers))\n\t\t\t\t# 1. build a list of cells\n\t\t\t\tcell = self._build_encoder_cell(hparams, num_layers, num_redisual_layers)\n\t\t\t\t# 2. forward\n\t\t\t\t# encoder_outputs: [batch, time, hidden]\n\t\t\t\t# encoder_state: ([batch, hidden] for _ in range(layers))\n\t\t\t\tencoder_outputs, encoder_state = tf.nn.dynamic_rnn(\n\t\t\t\t\tcell,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\t\t\telif hparams.encoder_type == 'bi':\n\t\t\t\tif not num_layers % 2 == 0:\n\t\t\t\t\t_error('Bi-directional requires num_layers={} should be divided by 2'.format(num_layers))\n\t\t\t\t\traise ValueError\n\t\t\t\tnum_bi_layers = int(num_layers / 2)\n\t\t\t\tnum_bi_residual_layers = num_bi_layers - 1\n\t\t\t\t_info(' num_bi_layers={} num_bi_residual_layers={}'.format(num_bi_layers, num_bi_residual_layers))\n\n\t\t\t\tcell_fw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\t\t\t\tcell_bw = self._build_encoder_cell(hparams, num_bi_layers, num_bi_residual_layers)\n\n\t\t\t\t# bi_outputs: (fw, bw): fw: [batch, seq, hidden]\n\t\t\t\t# bi_state: (fw, bw): fw : [[batch, hidden] for _ in range(layers)]\n\t\t\t\tbi_outputs, bi_state = tf.nn.bidirectional_dynamic_rnn(\n\t\t\t\t\tcell_fw,\n\t\t\t\t\tcell_bw,\n\t\t\t\t\tself.encoder_emb_inp,\n\t\t\t\t\tdtype=self.dtype,\n\t\t\t\t\tsequence_length=self.seq_length_encoder_input_data,\n\t\t\t\t\tswap_memory=True)\n\n\t\t\t\tif num_bi_layers == 1:\n\t\t\t\t\tencoder_state = bi_state\n\t\t\t\telse:\n\t\t\t\t\tencoder_state = []\n\t\t\t\t\tfor layer_id in range(num_bi_layers):\n\t\t\t\t\t\tencoder_state.append(bi_state[0][layer_id])\t\t# fw state in layer id\n\t\t\t\t\t\tencoder_state.append(bi_state[1][layer_id])\t\t# bw state in layer id\n\t\t\t\t\tencoder_state = tuple(encoder_state)\n\t\t\t\tencoder_outputs = tf.concat(bi_outputs, -1)\t\t# [batch, seq, hidden * 2]\n\t\t\telse:\n\t\t\t\t_error('Unknow encoder type: {}'.format(hparams.encoder_type))\n\t\t\t\traise ValueError\n\t\t\n\t\treturn encoder_outputs, encoder_state", "def test_encoder(device='/gpu:0'):\n tf.reset_default_graph()\n B, H, W, C = 64, 256, 256, 1\n latent_dim = 16\n with tf.device(device):\n gray_imgs = tf.zeros((B, H, W, C))\n latent_samples, latent_mean, latent_sd = encoder(gray_imgs, latent_dim)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n latent_samples_np, latent_mean_np, latent_sd_np = sess.run([latent_samples, latent_mean, latent_sd])\n print('Output shape should be (%d, %d)' % (B, latent_dim))\n print('latent_samples shape: ' + str(latent_samples_np.shape))\n print('latent_mean shape: ' + str(latent_mean_np.shape))\n print('latent_sd shape: ' + str(latent_sd_np.shape))", "def add_placeholders(self):\n ### YOUR CODE HERE (~4-6 lines)\n self.input_placeholder = tf.placeholder(tf.int32, [None, self.max_length, Config.n_features])\n self.labels_placeholder = tf.placeholder(tf.int32, [None, self.max_length])\n self.length_placeholder = tf.placeholder(tf.int32, [None])\n self.mask_placeholder = tf.placeholder(tf.bool, [None, self.max_length])\n self.dropout_placeholder = tf.placeholder(tf.float32)\n ### END YOUR CODE", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def placeholder(self):\n shape = (None,) + self.shape\n return tf.placeholder(tf.float32, shape=shape)", "def add_placeholders(self):\n \n with tf.variable_scope(\"Inputs\"):\n \n self.X_input = tf.placeholder(\"float\", [None, self.dim_input], name='X_input')\n \n self.T = tf.placeholder(\"float\", [None], name='T')\n self.O = tf.placeholder(\"float\", [None], name='O')\n self.At_Risk = tf.placeholder(\"float\", [None], name='At_Risk')\n \n # type conversions\n self.T = tf.cast(self.T, tf.float32)\n self.O = tf.cast(self.O, tf.int32)\n self.At_Risk = tf.cast(self.At_Risk, tf.int32)", "def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)", "def generate_encoder(input_shape: Tuple[int]=(100,1), lstm_units:int = 100, latent_dim:int=20)->tf.keras.Model:\n\n input = tf.keras.layers.Input(shape=input_shape , name=\"encoder_input\")\n #create a bi-directional LSTM layer\n encoded = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=lstm_units, return_sequences=True))(input)\n encoded = tf.keras.layers.Flatten()(encoded)\n encoded = tf.keras.layers.Dense(units=latent_dim, name=\"latent_encoding\")(encoded)\n encoded = tf.keras.layers.Reshape(target_shape=(latent_dim, 1) , name=\"output_encoder\")(encoded)\n\n model = tf.keras.Model(inputs=input, outputs=encoded, name=\"encoder\")\n\n return model", "def _add_encoder(self, encoder_inputs, seq_len):\n with tf.variable_scope('encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states\n return encoder_outputs, fw_st, bw_st", "def add_placeholders(self):\n self.input_placeholder = tf.placeholder(\n tf.float32, [None, self.config.seq_len, 1])\n self.target_placeholder = tf.placeholder(tf.float32, [None, 1])\n self.dropout_placeholder = tf.placeholder(tf.float32)\n self.Hin_placeholder = tf.placeholder(\n tf.float32, [None, self.config.state_size * self.config.num_layers])" ]
[ "0.67486584", "0.6601982", "0.6598257", "0.6536034", "0.6417243", "0.6407907", "0.6378354", "0.6350807", "0.63072497", "0.6267904", "0.62510544", "0.6248996", "0.62423813", "0.62370676", "0.62248755", "0.62113446", "0.6210726", "0.6207823", "0.62027955", "0.61931306", "0.6176293", "0.61503005", "0.6140225", "0.61391646", "0.6125805", "0.61223197", "0.61080146", "0.60816675", "0.60697323", "0.60565716" ]
0.767189
0
Interface to numbajitted Stokes Kernels
def Stokes_Kernel_Apply_numba(source, target, forces=None, dipstr=None, dipvec=None, weights=None): weights = 1.0 if weights is None else weights weighted_weights1 = 0.25*weights/np.pi weighted_weights2 = weights/np.pi sx = source[0] sy = source[1] tx = target[0] ty = target[1] velocity = np.zeros([2,target.shape[1]], dtype=float) u = velocity[0] v = velocity[1] ifforces = forces is not None ifdipole = dipstr is not None zero_vec = np.zeros(source.shape[1], dtype=float) fx = zero_vec if forces is None else forces[0]*weighted_weights1 fy = zero_vec if forces is None else forces[1]*weighted_weights1 dipx = zero_vec if dipstr is None else dipstr[0]*weighted_weights2 dipy = zero_vec if dipstr is None else dipstr[1]*weighted_weights2 nx = zero_vec if dipvec is None else dipvec[0] ny = zero_vec if dipvec is None else dipvec[1] _SKANB(sx, sy, tx, ty, fx, fy, dipx, dipy, nx, ny, u, v, ifforces, ifdipole) return velocity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tuto_kernel_overview(optimize=True, plot=True):\r\n ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)\r\n ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)\r\n ker3 = GPy.kern.rbf(1, .5, .5)\r\n\r\n print ker2\r\n\r\n if plot:\r\n ker1.plot()\r\n ker2.plot()\r\n ker3.plot()\r\n\r\n k1 = GPy.kern.rbf(1,1.,2.)\r\n k2 = GPy.kern.Matern32(1, 0.5, 0.2)\r\n\r\n # Product of kernels\r\n k_prod = k1.prod(k2) # By default, tensor=False\r\n k_prodtens = k1.prod(k2,tensor=True)\r\n\r\n # Sum of kernels\r\n k_add = k1.add(k2) # By default, tensor=False\r\n k_addtens = k1.add(k2,tensor=True)\r\n\r\n k1 = GPy.kern.rbf(1,1.,2)\r\n k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5)\r\n\r\n k = k1 * k2 # equivalent to k = k1.prod(k2)\r\n print k\r\n\r\n # Simulate sample paths\r\n X = np.linspace(-5,5,501)[:,None]\r\n Y = np.random.multivariate_normal(np.zeros(501),k.K(X),1)\r\n\r\n k1 = GPy.kern.rbf(1)\r\n k2 = GPy.kern.Matern32(1)\r\n k3 = GPy.kern.white(1)\r\n\r\n k = k1 + k2 + k3\r\n print k\r\n\r\n k.constrain_positive('.*var')\r\n k.constrain_fixed(np.array([1]),1.75)\r\n k.tie_params('.*len')\r\n k.unconstrain('white')\r\n k.constrain_bounded('white',lower=1e-5,upper=.5)\r\n print k\r\n\r\n k_cst = GPy.kern.bias(1,variance=1.)\r\n k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3)\r\n Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True)\r\n print Kanova\r\n\r\n # sample inputs and outputs\r\n X = np.random.uniform(-3.,3.,(40,2))\r\n Y = 0.5*X[:,:1] + 0.5*X[:,1:] + 2*np.sin(X[:,:1]) * np.sin(X[:,1:])\r\n\r\n # Create GP regression model\r\n m = GPy.models.GPRegression(X, Y, Kanova)\r\n\r\n if plot:\r\n fig = pb.figure(figsize=(5,5))\r\n ax = fig.add_subplot(111)\r\n m.plot(ax=ax)\r\n\r\n pb.figure(figsize=(20,3))\r\n pb.subplots_adjust(wspace=0.5)\r\n axs = pb.subplot(1,5,1)\r\n m.plot(ax=axs)\r\n pb.subplot(1,5,2)\r\n pb.ylabel(\"= \",rotation='horizontal',fontsize='30')\r\n axs = pb.subplot(1,5,3)\r\n m.plot(ax=axs, which_parts=[False,True,False,False])\r\n pb.ylabel(\"cst +\",rotation='horizontal',fontsize='30')\r\n axs = pb.subplot(1,5,4)\r\n m.plot(ax=axs, which_parts=[False,False,True,False])\r\n pb.ylabel(\"+ \",rotation='horizontal',fontsize='30')\r\n axs = pb.subplot(1,5,5)\r\n pb.ylabel(\"+ \",rotation='horizontal',fontsize='30')\r\n m.plot(ax=axs, which_parts=[False,False,False,True])\r\n\r\n return(m)", "def nlm_fast(data,FS,BS,sigma,dev = None, proc = None):\n\n if dev is None:\n dev = imgtools.__DEFAULT_OPENCL_DEVICE__\n\n if dev is None:\n raise ValueError(\"no OpenCLDevice found...\")\n\n if proc is None:\n proc = OCLProcessor(dev,absPath(\"kernels/nlm_fast.cl\"),options=\"-D FS=%i -D BS=%i\"%(FS,BS))\n\n img = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data)\n\n distImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n tmpImg2 = dev.createImage_like(data, mem_flags = \"READ_WRITE\")\n\n accBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n weightBuf = dev.createBuffer(data.size,\n mem_flags = cl.mem_flags.READ_WRITE,\n dtype = np.float32)\n\n\n dev.writeImage(img,data);\n dev.writeBuffer(weightBuf,np.zeros_like(data,dtype=np.float32));\n\n for dx in range(BS+1):\n for dy in range(-BS,BS+1):\n proc.runKernel(\"dist\",img.shape,None,img,tmpImg,np.int32(dx),np.int32(dy))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg,tmpImg2,np.int32(1))\n proc.runKernel(\"convolve\",img.shape,None,tmpImg2,distImg,np.int32(2))\n\n proc.runKernel(\"computePlus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n if any([dx,dy]):\n proc.runKernel(\"computeMinus\",img.shape,None,img,distImg,accBuf,weightBuf,\n np.int32(img.shape[0]),np.int32(img.shape[1]),\n np.int32(dx),np.int32(dy),np.float32(sigma))\n\n acc = dev.readBuffer(accBuf,dtype=np.float32).reshape(data.shape)\n weights = dev.readBuffer(weightBuf,dtype=np.float32).reshape(data.shape)\n\n return acc/weights", "def miniBatchKSVRG(loss,X,C,y,yC,kernel,la,Nepochs,mratio=2,tau = None,Kmax = 1,option = 1,om1 = None,memToUse = None,useGPU = None,cobj = dp.cobjK()):\n cobj.start()\n \n ################################################################################################\n #Creating Kernel matrices and functions\n ################################################################################################\n \n n = X.size(0)\n m = C.size(0)\n d = X.size(1)\n if isinstance(useGPU,type(None)):\n useGPU = torch.cuda.is_available()\n if useGPU :\n torch.cuda.empty_cache()\n if isinstance(memToUse,type(None)):\n memToUse = 0.9*psutil.virtual_memory().available\n print(\"no memory limit specified. At most {} GB of \\\n RAM will be used\".format(memToUse/10**9))\n \n factKnmP, kern, freeDoubles, freeGPU = nm.computeMemory(memToUse, kernel, d, n, m, useGPU)\n \n print(\"there is {} GiB free on the GPU \".format(freeGPU*8/1024**3))\n \n T = nm.createT(kern, C, freeGPU)\n cholT,cholTt = lambda x : nm.tr_solve(x,T,freeGPU),\\\n lambda x: nm.tr_solve(x,T,freeGPU,transpose = True)\n \n KnmP = factKnmP(X,C)\n \n l_fun,l_grad = l_fg(loss,n)\n KnmP_fun,KnmP_grad = lambda u,lobj : KnmP(u,l_fun,lobj), \\\n lambda u,lobj : KnmP(u,l_grad,lobj), \\\n \n \n \n ################################################################################################\n #Setting parameters of the method \n ################################################################################################\n \n #batch size\n if isinstance(tau,type(None)):\n tau = m\n \n #number of iterations (divide by batch size)\n niterBatch = (mratio*n)//tau + 1\n print(\"--- m = {}, tau = {} ---\".format(m,tau))\n \n #Smoothness constant\n if isinstance(loss.Lmax,type(None)):\n Lmax = Kmax\n else:\n Lmax = loss.Lmax*Kmax\n \n #om1 and om2, parameters of Katyusha acceleration\n om2 = 1/(2*tau)\n if isinstance(om1,type(None)):\n if m >= tau:\n om1 = float(min(np.sqrt((8*la*m*tau)/(3*Lmax)),1)*om2)\n else:\n om1 = float(min(np.sqrt((2*la)/(3*Lmax)),1/(2*m)))\n \n #Stepsize \n eta = 1/(3*om1*Lmax)\n \n #Theta\n theta = 1 + min(eta*la,1/(4*m))\n \n cobj.keepInfo(loss,X,C,y,yC,kernel,la,freeDoubles,freeGPU,cholT,KnmP_fun,mratio,om1,om2,niterBatch)\n \n beta_prev = torch.zeros(m,1, dtype = dtype)\n x = torch.zeros(m,1,dtype = dtype)\n z = torch.zeros(m,1, dtype = dtype)\n yy = torch.zeros(m,1, dtype = dtype)\n \n for epoch in range(Nepochs):\n \n cobj.cbIterates(beta_prev,yy)\n \n #Computing big gradient \n lobj = [y,torch.zeros(n,1,dtype = dtype)]\n grad = cholTt(KnmP_grad(cholT(beta_prev),lobj))\n d_stock = lobj[1]\n \n beta = torch.zeros(m,1,dtype = dtype)\n for t in range(niterBatch):\n S = np.random.choice(n,tau,replace = True)\n x = om1*z + om2*beta_prev + (1-om1-om2)*yy\n \n KtaumP = factKnmP(X[S,:],C)\n l_grad_tau = lgtau(loss,tau)\n KtaumP_grad = lambda u,lobj : KtaumP(u,l_grad_tau,lobj) \n \n \n lobjS = [y[S,:],d_stock[S,:]]\n grad_proxy = cholTt(KtaumP_grad(cholT(x),lobjS)) + grad\n\n dz = (1/(1 + la*eta))*(z - eta*grad_proxy) - z\n if option == 1:\n yy = (1/(1+la/(3*Lmax)))*(x - (1/(3*Lmax))*grad_proxy)\n if option == 2:\n yy = x + om1*dz\n z = z+dz\n \n beta = (theta - 1)*((theta**t)/(theta**(t+1) - 1))*yy + (theta**t - 1)/(theta**(t+1) - 1) * beta\n \n beta_prev = beta\n \n cobj.cbIterates(beta_prev,yy)\n \n alpha = makeFinal(om1,om2,niterBatch,beta_prev,yy)\n return cholT(alpha)", "def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))", "def get_default_kernel():\n return ConstantKernel() * RBF() + WhiteKernel()", "def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn", "def interrupt_kernel(self):", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def verhulst(nb_init, t0, tf, eps, methode, gamma, K) :\n f=lambda y,t : gamma*y*(1-y/K)\n Y=meth_epsilon(nb_init, t0, tf, eps, f, methode)\n return Y", "def start_kernel(self, **kw):", "def sbil_kernel(delta, obs_stats, t, ar, s, kernel='Gaussian'):\n #np.random.shuffle(delta)\n print(delta)\n sbil_kernel_estimate = []\n obs_stats = obs_stats[delta > 0]\n\n sim_theta = [select.generate_theta_sv(ar) for i in range(s)]\n sim_theta = np.matrix(sim_theta).T\n\n # Generate out sample of time series.\n sim_y = [sim.sim_sv(t, sim_theta[0, i], sim_theta[1, i], sim_theta[2, i],\n sim_theta[3, i], 1) for i in range(s)]\n \n # Generate out sample statistics.\n sim_stats = [sum_stat.sv_stats(delta, sim_y[i]) for i\n in range(s)]\n\n sim_theta_mean = sum(sim_theta.T)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_theta[:, i] - sim_theta_mean.T)\n for i in range(s)])/s\n\n # Standardize parameter vectors.\n sim_theta = np.hstack([(sim_theta[:, i] - sim_theta_mean.T)/np.sqrt(u)\n for i in range(s)])\n\n global theta_sigma\n global theta_mean\n theta_sigma = np.sqrt(u)\n theta_mean = sim_theta_mean\n\n # Standardize observed statistics.\n obs_stats = (obs_stats - np.mean(sim_stats, 0))/np.std(sim_stats, 0)\n\n # Compute sample mean.\n sim_stats_mean = sum(sim_stats)/s\n\n # Compute sample variance.\n u = sum([np.square(sim_stats[i]-sim_stats_mean) for i in range(s)])/s\n\n # Standardize simulated statistics.\n sim_stats = [(sim_stats[i] - sim_stats_mean)/np.sqrt(u) for i in range(s)]\n\n # Identify k nearest neighbors.\n norms = [np.linalg.norm(obs_stats-sim_stats[i]) for i in range(s)]\n closest_index = np.argsort(norms)\n closest_thetas = [sim_theta[:, i] for i in closest_index[0:round(s*0.03)]]\n\n # Compute k-nn estimate.\n estimate_standard = (sum(closest_thetas)/len(closest_thetas))\n\n estimate = np.array(estimate_standard.T)*np.array(\n theta_sigma.T) + np.array(theta_mean)\n\n return estimate", "def ker_class():\n ker = Kernel()\n return ker", "def Kernel(x, y):\n\n Result = (np.dot(x_train[x, :], x_train[y, :])+1)**5 # Polynomial\n #Result = (np.dot(x_train[x, :], x_train[y, :])+1) # Linear\n #Gaussian\n \"\"\"\n sigma = 1\n if np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) == 1:\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2)) ** 2 / (2 * sigma ** 2))\n elif (np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) == 1) or (np.ndim(x_train[x, :]) == 1 and np.ndim(x_train[y, :]) > 1):\n Result = np.exp(- (np.linalg.norm(x_train[x, :] - x_train[y, :], 2, axis=1) ** 2) / (2 * sigma ** 2))\n elif np.ndim(x_train[x, :]) > 1 and np.ndim(x_train[y, :]) > 1:\n Result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=2) ** 2) / (2 * sigma ** 2))\n \"\"\"\n return Result", "def get_kernel(ktype):\n \n kernel = np.zeros(kernsize + 1)\n this_kern = partial(inp_kernel, ktype=ktype)\n\n bins = np.arange(0, 1., 1./kernsize)\n bins = np.append(bins, 1.)\n\n for ii in range(kernsize):\n\n y, yerr = integrate.quad(integral_func(this_kern, bins[ii]), 0, np.sqrt(1.-bins[ii]**2))\n kernel[ii] = y * 2.\n \n return kernel", "def _kernel(self, x, y, t):\n return (self.C / (2 * np.pi * self.sigma_x * self.sigma_y * t)) * \\\n tf.exp(- self.beta * t - (tf.square(x)/tf.square(self.sigma_x) + tf.square(y)/tf.square(self.sigma_y)) / (2*t))", "def njit(func):\n return func", "def c_src_kernel_tiling(self, node, nodename):\r\n\r\n #The kernel is intended to be structured roughly like this:\r\n \"\"\"\r\n static __global__ void kernel()\r\n {\r\n for (int v = blockIdx.y; v < dim0; v += gridDim.x)\r\n {\r\n for (int w = blockIdx.y; w < dim1; w += gridDim.y)\r\n {\r\n for (int x = threadIdx.x; x < dim2; x += blockDim.x)\r\n {\r\n for (int y = threadIdx.y; y < dim3; y += blockDim.y)\r\n {\r\n for (int z = threadIdx.z; z < dim4; z += blockDim.z)\r\n {\r\n out[v * out_stride[0] + ...] = f(in1[...], in2[...])\r\n }\r\n }\r\n }\r\n }\r\n }\r\n }\r\n\r\n \"\"\"\r\n\r\n nd = node.outputs[0].type.ndim\r\n sio = StringIO()\r\n #print 'C_SRC_KERNEL', sio.getvalue()\r\n\r\n if nd in (4,):\r\n # print some leading comments to make the code easier to read\r\n for ipos, i in enumerate(node.inputs):\r\n print >> sio, \"// Input \", ipos, str(i.type)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \"// Output \", ipos, str(i.type)\r\n print >> sio, \"static __global__ void kernel_%s_%s_%s(unsigned int numEls\" %(\r\n self.scalar_op.__class__.__name__,\r\n nodename,\r\n 'tiling%i'%nd)\r\n if (nd):\r\n print >> sio, \"\\t,\", \", \".join(\"const int dim%i\" % i for i in xrange(nd))\r\n #declare inputs\r\n for ipos, i in enumerate(node.inputs):\r\n s = \", \".join([\"const float * i%i_data\" % ipos] + list(\"int i%i_str_%i\" % (ipos, d) for d in xrange(nd)))\r\n print >> sio, \"\\t,\", s\r\n #declare outputs\r\n for ipos, i in enumerate(node.outputs):\r\n s = \", \".join([\"float * o%i_data\" % ipos] + list(\"int o%i_str_%i\" % (ipos, d) for d in xrange(nd)))\r\n print >> sio, \"\\t,\", s\r\n #print >> sio, \"\\t,\", \", \".join(\"int o%i_str_%i\" % (ipos, d) for d in xrange(nd))\r\n #print >> sio, \"\\t,\", \"float * o%i_data\" % ipos\r\n print >> sio, \"\\t)\\n{\"\r\n\r\n # For each input that is a scalar which has been broadcasted to a tensor,\r\n # load it into a local variable\r\n print >> sio, \" __shared__ float value0[%i];\" % len(node.inputs)\r\n print >> sio, \" __shared__ int shared_dims[%(nd)s];\" % locals()\r\n #print >> sio, \" __shared__ int shared_i_str[%(n_in)s][%(nd)s]\"\r\n print >> sio, \" if ((threadIdx.x == 0) && (threadIdx.y == 0)) {\"\r\n for ipos, i in enumerate(node.inputs):\r\n if _logical_scalar(i):\r\n print >> sio, \" value0[%i] = i%i_data[0];\" % (ipos, ipos)\r\n for ipos in xrange(nd):\r\n print >> sio, \" shared_dims[%i] = dim%i;\" % (ipos, ipos)\r\n print >> sio, \" }\"\r\n print >> sio, \" __syncthreads();\"\r\n\r\n\r\n if (nd == 4):\r\n print >> sio, \"\"\"\r\n for (int pos0 = blockIdx.x; pos0 < shared_dims[0]; pos0 += gridDim.x)\r\n {\r\n for (int pos1 = blockIdx.y; pos1 < shared_dims[1]; pos1 += gridDim.y)\r\n {\r\n //for (int pos2 = threadIdx.x; pos2 < shared_dims[2]; pos2 += blockDim.x)\r\n for (int pos2 = threadIdx.y; pos2 < shared_dims[2]; pos2 += blockDim.y)\r\n {\r\n //for (int pos3 = threadIdx.y; pos3 < shared_dims[3]; pos3 += blockDim.y)\r\n for (int pos3 = threadIdx.x; pos3 < shared_dims[3]; pos3 += blockDim.x)\r\n {\r\n \"\"\"\r\n else:\r\n raise NotImplementedError()\r\n\r\n for ipos, i in enumerate(node.inputs):\r\n if not _logical_scalar(i):\r\n print >> sio, \" const float * ii_i%i_data = i%i_data;\" % (ipos, ipos)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \" float * ii_o%i_data = o%i_data;\" % (ipos, ipos)\r\n for d in xrange(nd):\r\n for ipos, i in enumerate(node.inputs):\r\n if not _logical_scalar(i):\r\n print >> sio, \" ii_i%i_data += pos%i * i%i_str_%i;\" % (ipos, d, ipos, d)\r\n for ipos, i in enumerate(node.outputs):\r\n print >> sio, \" ii_o%i_data += pos%i * o%i_str_%i;\" % (ipos, d, ipos, d)\r\n\r\n # perform the scalar operation on the input and output references\r\n #TODO: What if the scalar_op needs support_code??\r\n task_code = self.scalar_op.c_code(\r\n Apply(self.scalar_op,\r\n [scalar.Scalar(dtype = input.type.dtype)() for input in node.inputs],\r\n [scalar.Scalar(dtype = output.type.dtype)() for output in node.outputs])\r\n , nodename + '_scalar_'\r\n , get_str_list_logical_scalar(node, value_str='value0[%i]')\r\n , ['ii_o%i_data[0]'%ipos for ipos, i in enumerate(node.outputs)]\r\n , sub=dict(fail='return;')) #TODO: set a failure code somehow!!!\r\n print >> sio, \" \", task_code\r\n\r\n print >> sio, \" }\" * nd\r\n\r\n #TODO: insert runtime stride checks that select the best loop order either here, or in\r\n # the host code that launched the kernel (host code probably better spot)\r\n\r\n #indent = \" \"*(4*d+7)\r\n #for ipos, i in enumerate(node.inputs):\r\n #print >> sio, indent, \"const float * i%i\" % ipos, '= i%i_data', ''\r\n print >> sio, \"}\"\r\n\r\n print sio.getvalue()\r\n return sio.getvalue()", "def truncated_newton(inputs, output, costs, params, givens, maxiter, ridge,\n precond_type, n_train_batches, *args):\n # There might be different costs of interest, but we only minimize the first one.\n opt_cost = costs[0] if isinstance(costs,(list,tuple)) else costs \n\n def gauss_vect_mult(v):\n \"\"\"\n How to get B in James Marten's paper\n \n Multiply a vector v by the Gauss-Newton matrix JHJ'\n where J is the Jacobian between output and params\n and H is the Hessian between costs and output\n\n H should be diagonal and positive.\n Also add the ridge\n\n return\n -------\n JHJv + lamda*v\n \"\"\"\n Jv = T.Rop(output, params, v)\n HJv = T.Rop(T.grad(opt_cost,output), output, Jv)\n JHJv = T.Lop(output, params, HJv)\n if not isinstance(JHJv,list):\n JHJv = [JHJv]\n JHJv = [a+ridge*b for a,b in zip(JHJv,v)]\n return JHJv\n\n#----------------Ridge\n # damping parameter: lamda\n rho = 1 # The ratio between the actual decrease and the predicted decrease\n ridge = shared(numpy.array(ridge,dtype=theano.config.floatX))\n ridge_update_factor = T.scalar(dtype=theano.config.floatX)\n ridge_update = function(\n [ridge_update_factor],\n [],\n on_unused_input='warn',\n updates = [(ridge, ridge*ridge_update_factor)])\n\n#---------------Preconditioner\n\n ind_block = T.iscalar()\n # Preconditioner computed in blocks.\n # The larger nblock, the smaller the variance, but the larger the computation time.\n nblock = 100 \n preconditioner = [ shared(numpy.ones(p.get_value().shape,dtype=theano.config.floatX)) for p in params ]\n\n def compute_preconditioner_block():\n srng = RandomStreams(seed=1234)\n r = T.sgn(srng.normal(output.shape))\n grad = T.grad(opt_cost, output)\n if precond_type == 'jacobi':\n val = T.sqrt(T.grad(T.sum(grad), output)) * r\n elif precond_type == 'martens':\n val = grad * r\n else:\n raise NotImplementedError(\"Invalid preconditioner specified\")\n\n precond = [T.sqr(v) for v in T.Lop(output[ind_block::nblock], params, val[ind_block::nblock])]\n updates = [(a,a+b) for a,b in zip(preconditioner, precond)]\n\n return function(\n [ind_block]+inputs,\n [],\n givens=givens,\n on_unused_input='warn',\n updates=updates)\n\n if precond_type:\n update_precond_block = compute_preconditioner_block()\n\n init_preconditioner = function(\n [],\n [],\n on_unused_input='warn',\n updates = [(a,ridge*T.ones_like(a)) for a in preconditioner])\n\n def update_preconditioner():\n init_preconditioner()\n if precond_type:\n for i in range(nblock):\n update_precond_block(i,*the_args)\n if precond_type == 'martens':\n function(\n [],\n [],\n on_unused_input='warn',\n updates=[(a,a**0.75) for a in preconditioner])()\n\n\n#-----------------Gradient (on the full dataset)\n # storing gradients \n grhs = [ shared(numpy.zeros(p.get_value().shape,dtype=theano.config.floatX)) for p in params ]\n gparams = T.grad(opt_cost, params)\n init_gradient = function(\n [],\n [],\n on_unused_input='warn',\n updates = [(a,T.zeros_like(a)) for a in grhs])\n\n update_gradient_batch = function(\n inputs,\n costs,\n givens=givens,\n on_unused_input='warn',\n updates=[(a,a-b/n_train_batches) for a,b in zip(grhs, gparams)])\n\n def update_grhs():\n \"\"\"\n go through this training batch and update gradients for all params.\n \"\"\"\n init_gradient()\n costs_per_batch = []\n for i in range(n_train_batches):\n c = update_gradient_batch(i,*args)\n costs_per_batch.append(c)\n return numpy.mean(costs_per_batch,axis=0)\n\n#---------------------Linear CG and updates\n\n lcg = linear_conj_grad_r(gauss_vect_mult, grhs, preconditioner, inputs, givens)\n step = lcg[0]\n norm_step = function(\n [],\n ridge*sum([T.sum(T.sqr(a)) for a in step]),\n on_unused_input='warn')\n\n starting_point_lcg = function(\n [],\n [],\n on_unused_input='warn',\n updates = [ (a,T.zeros_like(a)) for a in step ])\n \n update_params = function(\n [],\n [],\n on_unused_input='warn',\n updates = [(p,p+s) for p,s in zip(params,step)])\n \n backtrack_params = function(\n [],\n [],\n on_unused_input='warn',\n updates = [(p,p-s) for p,s in zip(params,step)])\n\n # g gauss_newton(H) (g.T)\n gauss_grad_mult = function(\n inputs,\n sum([T.sum(a*b) for a,b in zip(gauss_vect_mult(grhs),grhs)]),\n on_unused_input='warn',\n givens = givens)\n\n#---------------------Start\n\n cost_values = update_grhs()\n obj_value = cost_values[0]\n costs_log = []\n\n # TN Loop\n for i in range(maxiter):\n \n st = time.time()\n # global pre-training batch\n current_batch = i % n_train_batches\n the_args = (current_batch,) + args\n\n update_preconditioner()\n\n if i == 0:\n exp_monitor.record_train_step(i, obj_value, cost_values[1], rho, ridge.get_value(), 0)\n exp_monitor.record_valid_step(i)\n \n if i > 0:\n \n delta_obj = old_obj_value - obj_value\n rho = delta_obj / newton_decr\n\n if rho > 0.5 and delta_obj < obj_value * 1e-5 and delta_obj > 0:\n return costs_log\n\n if rho < 0.25: ridge_update(2)\n if rho > 0.75: ridge_update(0.5)\n\n costs_log.append(cost_values)\n print 'TN Loop:Iter',i, 'Objective function value =', obj_value, 'other costs = ', cost_values[1:], 'rho =', rho, 'ridge =', ridge.get_value()\n else:\n print 'TN Loop:Iter',i, 'Objective function value =', obj_value, 'other costs = ', cost_values[1:]\n\n old_obj_value = obj_value\n\n #L-CG loop\n while 1:\n \n newton_decr = perform_linear_conj_grad(lcg, tol=0.0, args=the_args)\n newton_decr += 0.5*norm_step()\n if newton_decr > 0 and not isnan(newton_decr):\n update_params()\n cost_values = update_grhs()\n obj_value = cost_values[0]\n gauss_grad_mult_nan = isnan(gauss_grad_mult(*the_args))\n if gauss_grad_mult_nan:\n print 'L-CG Loop:Gauss-Newton gradient multiplication returned nan'\n if obj_value < old_obj_value and not isnan(obj_value) and not isinf(obj_value) and not gauss_grad_mult_nan: break\n backtrack_params()\n update_grhs()\n else:\n cost_values = update_grhs()\n obj_value = cost_values[0]\n\n starting_point_lcg()\n ridge_update(4)\n print 'redo L-CG Loop: Newton decrement =', newton_decr, 'Objective function value =', obj_value, 'Increasing ridge to', ridge.get_value()\n\n et = time.time()\n\n exp_monitor.record_train_step(i, obj_value, cost_values[1], rho, ridge.get_value(), et - st)\n exp_monitor.record_valid_step(i)\n return costs_log", "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn", "def gpu_kernels(self, node, name):\r\n raise MethodNotDefined, 'gpu_kernels'", "def _kill_kernel(self):", "def _get_kernel(self, x:generic_array, y:generic_array) -> generic_array:\n raise NotImplementedError('Subclass must implement the method _get_kernel.')", "def tune(runner, kernel_options, device_options, tuning_options):\n\n #Bayesian Optimization strategy seems to need some hyper parameter tuning to\n #become better than random sampling for auto-tuning GPU kernels.\n\n #alpha, normalize_y, and n_restarts_optimizer are options to\n #https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html\n #defaults used by Baysian Optimization are:\n # alpha=1e-6, #1e-3 recommended for very noisy or discrete search spaces\n # n_restarts_optimizer=5,\n # normalize_y=True,\n\n #several exploration friendly settings are: (default is acq=\"ucb\", kappa=2.576)\n # acq=\"poi\", xi=1e-1\n # acq=\"ei\", xi=1e-1\n # acq=\"ucb\", kappa=10\n\n if not bayes_opt_present:\n raise ImportError(\"Error: optional dependency Bayesian Optimization not installed\")\n\n #defaults as used by Bayesian Optimization Python package\n acq = tuning_options.strategy_options.get(\"method\", \"poi\")\n kappa = tuning_options.strategy_options.get(\"kappa\", 2.576)\n xi = tuning_options.strategy_options.get(\"xi\", 0.0)\n init_points = tuning_options.strategy_options.get(\"popsize\", 5)\n n_iter = tuning_options.strategy_options.get(\"maxiter\", 25)\n\n tuning_options[\"scaling\"] = True\n\n results = []\n\n #function to pass to the optimizer\n def func(**kwargs):\n args = [kwargs[key] for key in tuning_options.tune_params.keys()]\n return -1.0 * minimize._cost_func(args, kernel_options, tuning_options, runner, results)\n\n bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)\n pbounds = OrderedDict(zip(tuning_options.tune_params.keys(),bounds))\n\n verbose=0\n if tuning_options.verbose:\n verbose=2\n\n optimizer = BayesianOptimization(f=func, pbounds=pbounds, verbose=verbose)\n\n optimizer.maximize(init_points=init_points, n_iter=n_iter, acq=acq, kappa=kappa, xi=xi)\n\n if tuning_options.verbose:\n print(optimizer.max)\n\n return results, runner.dev.get_environment()", "def build_gufunc_kernel(library, ctx, innerfunc, sig, inner_ndim):\n # Declare types and function\n byte_t = lc.Type.int(8)\n byte_ptr_t = lc.Type.pointer(byte_t)\n\n intp_t = ctx.get_value_type(types.intp)\n\n fnty = lc.Type.function(lc.Type.void(), [lc.Type.pointer(byte_ptr_t),\n lc.Type.pointer(intp_t),\n lc.Type.pointer(intp_t),\n byte_ptr_t])\n\n mod = library.create_ir_module('parallel.gufunc.wrapper')\n lfunc = mod.add_function(fnty, name=\".kernel\")\n innerfunc = mod.add_function(fnty, name=innerfunc.name)\n\n bb_entry = lfunc.append_basic_block('')\n\n # Function body starts\n builder = lc.Builder.new(bb_entry)\n\n args, dimensions, steps, data = lfunc.args\n\n # Distribute work\n total = builder.load(dimensions)\n ncpu = lc.Constant.int(total.type, NUM_CPU)\n\n count = builder.udiv(total, ncpu)\n\n count_list = []\n remain = total\n\n for i in range(NUM_CPU):\n space = cgutils.alloca_once(builder, intp_t, size=inner_ndim + 1)\n cgutils.memcpy(builder, space, dimensions,\n count=lc.Constant.int(intp_t, inner_ndim + 1))\n count_list.append(space)\n\n if i == NUM_CPU - 1:\n # Last thread takes all leftover\n builder.store(remain, space)\n else:\n builder.store(count, space)\n remain = builder.sub(remain, count)\n\n # Array count is input signature plus 1 (due to output array)\n array_count = len(sig.args) + 1\n\n # Get the increment step for each array\n steps_list = []\n for i in range(array_count):\n ptr = builder.gep(steps, [lc.Constant.int(lc.Type.int(), i)])\n step = builder.load(ptr)\n steps_list.append(step)\n\n # Get the array argument set for each thread\n args_list = []\n for i in range(NUM_CPU):\n space = builder.alloca(byte_ptr_t,\n size=lc.Constant.int(lc.Type.int(), array_count))\n args_list.append(space)\n\n for j in range(array_count):\n # For each array, compute subarray pointer\n dst = builder.gep(space, [lc.Constant.int(lc.Type.int(), j)])\n src = builder.gep(args, [lc.Constant.int(lc.Type.int(), j)])\n\n baseptr = builder.load(src)\n base = builder.ptrtoint(baseptr, intp_t)\n multiplier = lc.Constant.int(count.type, i)\n offset = builder.mul(steps_list[j], builder.mul(count, multiplier))\n addr = builder.inttoptr(builder.add(base, offset), baseptr.type)\n\n builder.store(addr, dst)\n\n # Declare external functions\n add_task_ty = lc.Type.function(lc.Type.void(), [byte_ptr_t] * 5)\n empty_fnty = lc.Type.function(lc.Type.void(), ())\n add_task = mod.get_or_insert_function(add_task_ty, name='numba_add_task')\n synchronize = mod.get_or_insert_function(empty_fnty,\n name='numba_synchronize')\n ready = mod.get_or_insert_function(empty_fnty, name='numba_ready')\n\n # Add tasks for queue; one per thread\n as_void_ptr = lambda arg: builder.bitcast(arg, byte_ptr_t)\n\n for each_args, each_dims in zip(args_list, count_list):\n innerargs = [as_void_ptr(x) for x\n in [innerfunc, each_args, each_dims, steps, data]]\n builder.call(add_task, innerargs)\n\n # Signal worker that we are ready\n builder.call(ready, ())\n # Wait for workers\n builder.call(synchronize, ())\n\n builder.ret_void()\n\n return lfunc", "def CreateMotionKernel(kernel):\r\n TrajSize = 64\r\n anxiety = 0.2* np.random.rand()\r\n numT = 10\r\n MaxTotalLength =10\r\n TotLength = 0\r\n #term determining, at each sample, the strengh of the component leating towards the previous position\r\n centripetal = 0.7 * np.random.rand()\r\n #term determining, at each sample, the random component of the new direction\r\n gaussianTerm =10 * np.random.rand()\r\n #probability of having a big shake, e.g. due to pressing camera button or abrupt hand movements\r\n freqBigShakes = 3 *np.random.rand()\r\n #v is the initial velocity vector, initialized at random direction\r\n init_angle = 360 * np.random.rand()\r\n #initial velocity vector having norm 1\r\n v0 = math.cos(init_angle / 180.0 * math.pi) + 1.0j * math.sin(init_angle/ 180.0 * math.pi)\r\n #the speed of the initial velocity vector\r\n v = v0* MaxTotalLength/(numT-1);\r\n\r\n if anxiety > 0:\r\n v = v0 * anxiety\r\n # initialize the trajectory vector\r\n x = np.zeros(numT,dtype = np.complex);\r\n\r\n abruptShakesCounter = 0\r\n for t in range(numT-1):\r\n # determine if there is an abrupt (impulsive) shake\r\n if np.random.rand() < freqBigShakes * anxiety:\r\n #if yes, determine the next direction which is likely to be opposite to the previous one\r\n nextDirection = 2 * v * (np.exp( 1.0j * (math.pi + (np.random.rand() - 0.5))))\r\n abruptShakesCounter = abruptShakesCounter + 1\r\n else:\r\n nextDirection=0\r\n\r\n #determine the random component motion vector at the next step\r\n dv = nextDirection + anxiety * (gaussianTerm * (np.random.randn()- + 1.0j * np.random.randn()) - centripetal * x[t]) * (MaxTotalLength / (numT - 1))\r\n v = v + dv\r\n # velocity vector normalization\r\n v = (v / np.abs(v)) * MaxTotalLength / (numT - 1)\r\n #print v\r\n x[t + 1] = x[t] + v\r\n # compute total length\r\n #TotLength=TotLength+np.abs(x([t+1]-x[t]))\r\n x_real = []\r\n x_imag = []\r\n for elem in x:\r\n x_real.append(elem.real)\r\n x_imag.append(elem.imag)\r\n x_real = np.round((x_real - np.min(x_real))/(np.max(x_real) - np.min(x_real)) * kernel-0.5)\r\n x_imag = np.round((x_imag - np.min(x_imag))/(np.max(x_imag) - np.min(x_imag)) * kernel-0.5)\r\n for idx in range(len(x_real)):\r\n if x_real[idx] < 0:\r\n x_real[idx] = 0\r\n if x_imag[idx] < 0:\r\n x_imag[idx] = 0\r\n if x_real[idx] > kernel -1:\r\n x_real[idx] = kernel -1\r\n if x_imag[idx] > kernel -1:\r\n x_imag[idx] = kernel -1\r\n\r\n ker = np.zeros((kernel, kernel))\r\n for idx in range(len(x_real)):\r\n ker[np.int(x_real[idx])][np.int(x_imag[idx])] = 1\r\n ker = ker/np.sum(np.sum(ker))\r\n return ker", "def get_model_code():\n\n return \"\"\"\n functions {\n matrix cov_matrix_ard(int N, int D, vector[] x, vector ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_sum;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For RBF ARD kernel\n if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_sum = 0;\n for(d in 1:D) {\n dist_sum = dist_sum + square(x[i][d] - x[j][d]) / square(ls[d]);\n }\n S[i,j] = alpha_sq * exp( -0.5 * dist_sum);\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n matrix distance_matrix_on_vectors(int N, vector[] x) {\n matrix[N, N] distmat;\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n distmat[i, j] = square(distance(x[i], x[j]));\n }\n }\n return distmat;\n }\n\n matrix cov_matrix_matern(int N, matrix dist, real ls, real alpha_sq, int cov_id) {\n matrix[N,N] S;\n real dist_ls;\n real sqrt3;\n real sqrt5;\n sqrt3=sqrt(3.0);\n sqrt5=sqrt(5.0);\n\n // For Matern kernel with parameter nu=1/2 (i.e. absolute exponential kernel)\n if (cov_id == 2) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/square(ls);\n S[i,j] = alpha_sq * exp(-1 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=3/2\n else if (cov_id == 3) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt3 * dist_ls) * exp(-sqrt3 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu=5/2\n else if (cov_id == 4) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * (1 + sqrt5 * dist_ls + 5 * pow(dist_ls,2)/3) * exp(-sqrt5 * dist_ls);\n }\n }\n }\n\n // For Matern kernel with parameter nu tending to infinity (i.e. RBF kernel)\n else if (cov_id == 1) {\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n dist_ls = fabs(dist[i,j])/ls;\n S[i,j] = alpha_sq * exp( -0.5 * pow(dist_ls, 2) );\n }\n }\n }\n\n // Fill upper triangle\n for(i in 1:(N-1)) {\n for(j in (i+1):N) {\n S[j,i] = S[i,j];\n }\n }\n\n // Create diagonal\n for(i in 1:N) {\n S[i,i] = alpha_sq;\n }\n\n return S;\n }\n\n }\n\n data {\n int<lower=1> D;\n int<lower=1> N;\n vector[D] x[N];\n vector[N] y;\n real<lower=0> ig1;\n real<lower=0> ig2;\n real<lower=0> n1;\n real<lower=0> n2;\n real<lower=0> sigma;\n int kernel_id;\n }\n\n parameters {\n real<lower=0> rho;\n vector<lower=0>[D] rhovec;\n real<lower=0> alpha;\n }\n\n model {\n int cov_id;\n matrix[N, N] cov;\n matrix[N, N] L_cov;\n matrix[N, N] distmat;\n\n // RBF kernel single lengthscale\n if (kernel_id == 1) {\n cov = cov_exp_quad(x, alpha, rho) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // Matern kernel single lengthscale\n else if (kernel_id >= 2 && kernel_id <= 4) {\n if (kernel_id == 2) { cov_id = 2; }\n if (kernel_id == 3) { cov_id = 3; }\n if (kernel_id == 4) { cov_id = 4; }\n\n distmat = distance_matrix_on_vectors(N, x);\n cov = cov_matrix_matern(N, distmat, rho, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n rho ~ inv_gamma(ig1, ig2);\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n // RBF kernel with ARD (D-dimensional) lengthscale\n else if (kernel_id == 5) {\n cov_id = 1;\n cov = cov_matrix_ard(N, D, x, rhovec, square(alpha), cov_id) + diag_matrix(rep_vector(square(sigma), N));\n L_cov = cholesky_decompose(cov);\n for(d in 1:D) {\n rhovec[d] ~ inv_gamma(ig1, ig2);\n }\n alpha ~ normal(n1, n2);\n y ~ multi_normal_cholesky(rep_vector(0, N), L_cov);\n }\n\n }\n \"\"\"", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def __init__(\n self,\n XS,\n YS,\n XE,\n YE,\n IE,\n Var,\n d,\n l0,\n l1,\n DT,\n G,\n GAMMA,\n LAMBDA,\n TAU,\n QUAD_REG,\n QUAD_REG_MEAN,\n pos_only=True,\n SMIN=0.,\n SMAX=1.\n ):\n self.l_i = XS.shape[0]\n self.n_l = d.shape[0]\n self.n_n = XE.shape[0]\n\n # Define Theano Variables Common to Generation and Inference\n self.t_XS = theano.shared(XS, 'XS')\n self.t_YS = theano.shared(YS, 'YS')\n self.t_XE = theano.shared(XE, 'XE')\n self.t_YE = theano.shared(YE, 'YE')\n self.t_IE = theano.shared(IE, 'IE')\n self.t_Var = theano.shared(Var, 'Var')\n\n self.t_XR = T.matrix('XR')\n self.t_YR = T.matrix('YR')\n\n # Parameters\n self.t_L0 = theano.shared(np.float32(l0), 'L0')\n self.t_L1 = theano.shared(np.float32(l1), 'L1')\n self.t_DT = theano.shared(np.float32(DT), 'DT')\n self.t_G = theano.shared(np.float32(G), 'G')\n self.t_TAU = theano.shared(np.float32(TAU), 'TAU')\n self.t_SMIN = theano.shared(np.float32(SMIN), 'SMIN')\n self.t_SMAX = theano.shared(np.float32(SMAX), 'SMAX')\n\n ##############################\n # Simulated Spike Generation #\n ##############################\n\n self.t_S_gen = T.matrix('S_gen') # Image dims are i2, i1\n self.t_Ips_gen, _ = inner_products(self.t_S_gen, self.t_Var,\n self.t_XS, self.t_YS,\n self.t_XE, self.t_YE,\n self.t_XR, self.t_YR)\n self.t_FP_gen = firing_prob(self.t_Ips_gen, self.t_G, self.t_IE,\n self.t_L0, self.t_L1, self.t_SMIN,\n self.t_SMAX, self.t_DT)\n\n # Computes image-RF inner products and the resulting firing\n # probabilities\n self.RFS = theano.function(\n inputs=[self.t_S_gen, self.t_XR, self.t_YR],\n outputs=[self.t_Ips_gen, self.t_FP_gen])\n\n self.rng = T.shared_randomstreams.RandomStreams(seed=10)\n self.t_R_gen = (self.rng.uniform(size=self.t_FP_gen.shape) <\n self.t_FP_gen).astype('float32')\n\n self.spikes = theano.function(\n inputs=[self.t_S_gen, self.t_XR, self.t_YR],\n outputs=self.t_R_gen)\n\n ##############################\n # Latent Variable Estimation #\n ##############################\n\n self.t_R = T.matrix('R')\n\n # Current value of A\n self.t_A = theano.shared(\n np.zeros((self.n_l,)).astype('float32'), 'A')\n # Previous value of A\n self.t_Ap = theano.shared(\n np.zeros((self.n_l,)).astype('float32'), 'Ap')\n\n self.t_D = theano.shared(d, 'D') # Dictionary\n\n self.t_Wbt = T.matrix('Wbt') # Weights (b,t) from particle filter\n\n # Sum of Hessians\n self.t_H = theano.shared(\n np.zeros((self.n_l, self.n_l)).astype('float32'), 'H')\n self.t_B = theano.shared(\n np.zeros((self.n_l,)).astype('float32'), 'B') # Prior Bias\n\n # Constants\n\n self.t_GAMMA = theano.shared(np.float32(GAMMA), 'GAMMA')\n self.t_LAMBDA = theano.shared(np.float32(LAMBDA), 'LAMBDA')\n self.QUAD_REG = QUAD_REG.astype('float32')\n # self.t_QUAD_REG = theano.shared(np.float32(QUAD_REG), 'QUAD_REG')\n self.t_QUAD_REG_MEAN = theano.shared(\n np.float32(QUAD_REG_MEAN), 'QUAD_REG_MEAN')\n\n # Calculate Firing rate\n self.t_S = T.dot(self.t_A, self.t_D).reshape((self.l_i, self.l_i))\n self.image_est = theano.function(inputs=[], outputs=self.t_S)\n\n self.t_Ips, t_PixRFCoupling = inner_products(\n self.t_S, self.t_Var, self.t_XS, self.t_YS,\n self.t_XE, self.t_YE, self.t_XR, self.t_YR)\n\n self.t_FP = firing_prob(self.t_Ips, self.t_G, self.t_IE,\n self.t_L0, self.t_L1,\n self.t_SMIN, self.t_SMAX, self.t_DT)\n\n # Define Hessian\n # Reshape dictionary for computing derivative: k, i2, i1\n t_Dp = self.t_D.reshape((self.n_l, self.l_i, self.l_i))\n\n # Compute dc/dA = dc/dS * ds/dA\n # b, i2, i1, j, t -> b, _, i2, i1, j, t\n # k, i2, i1 -> _, k, i2, i1, _, _\n # b, k, j, t\n\n # t_SpRFCoupling1 = (\n # t_PixRFCoupling.dimshuffle(0, 'x', 1, 2, 3, 4) *\n # t_Dp.dimshuffle('x', 0, 1, 2, 'x', 'x')).sum(axis=(2, 3))\n\n def pix_rf_to_sp_rf(t_PixRFCoupling, t_Dp):\n \"\"\"\n b i2 i1 j t\n k i2 i1\n b k j t\n \"\"\"\n n_n = t_PixRFCoupling.shape[3]\n tmp1 = t_PixRFCoupling.dimshuffle(1, 2, 0, 3, 4).reshape(\n (self.l_i ** 2, -1))\n # i2i1 bjt\n\n tmp2 = t_Dp.reshape((self.n_l, -1)) # k i2i1\n\n tmp3 = T.dot(tmp2, tmp1) # k bjt\n n_b, n_t = self.t_Wbt.shape\n return tmp3.reshape(\n (self.n_l, n_b, n_n, n_t)).dimshuffle(\n 1, 0, 2, 3)\n\n t_SpRFCoupling = pix_rf_to_sp_rf(t_PixRFCoupling, t_Dp)\n\n # self.sp_rf_test= theano.function(\n # inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n # outputs=[t_SpRFCoupling, t_SpRFCoupling1])\n\n # Get RGC Sparse Coeff couplings\n # bkjt,bt-> kj\n t_SpRGCCoupling = (self.t_Wbt.dimshuffle(0, 'x', 'x', 1) *\n t_SpRFCoupling).sum(axis=(0, 3))\n\n self.get_sp_rf_coupling = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n outputs=t_SpRGCCoupling)\n\n # b, k, j, t\n t_dlogFPdA = dlogfp_dA(\n t_SpRFCoupling, self.t_G, self.t_IE, self.t_L0, self.t_L1,\n self.t_SMIN, self.t_SMAX)\n\n # b, k, k', j, t -> k, k'\n t_dE_R_dAA1 = (\n self.t_Wbt.dimshuffle(0, 'x', 'x', 'x', 1) *\n t_dlogFPdA.dimshuffle(0, 'x', 1, 2, 3) *\n t_dlogFPdA.dimshuffle(0, 1, 'x', 2, 3) *\n self.t_FP.dimshuffle(0, 'x', 'x', 1, 2)\n ).sum(axis=(0, 3, 4))\n\n def calc_hessian(t_Wbt, t_dlogFPdA, t_FP):\n \"\"\"\n Calculate the hessian given the following\n\n Parameters\n ----------\n t_Wbt : theano.tensor, shape (b, t)\n t_dlogFPdA : theano.tensor, shape (b,k,j,t)\n t_FP : theano.tensor, shape (b, j, t)\n\n Returns\n -------\n t_dE_R_dAA : theano.tensor, shape (k, k')\n \"\"\"\n\n tmp = t_Wbt.dimshuffle(0, 'x', 1) * t_FP # b, j, t\n tmp1 = tmp.dimshuffle(0, 'x', 1, 2) * t_dlogFPdA\n\n return T.dot(\n tmp1.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)),\n t_dlogFPdA.dimshuffle(1, 0, 2, 3).reshape((self.n_l, -1)).T\n )\n\n t_dE_R_dAA = calc_hessian(self.t_Wbt, t_dlogFPdA, self.t_FP)\n\n self.sp_rf_test = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n outputs=[t_dE_R_dAA, t_dE_R_dAA1])\n\n self.t_dlogFPdA = t_dlogFPdA\n self.t_dE_R_dAA = t_dE_R_dAA\n\n # Compute Energy Functions (negative log-likelihood) to minimize\n\n # Spiking cost separated by b, j, t\n self.t_E_R_f = spiking_cost(self.t_R, self.t_FP)\n\n self.t_E_R = T.sum(T.sum(self.t_E_R_f, axis=1) * self.t_Wbt)\n self.t_E_R.name = 'E_R'\n\n self.t_E_bound = self.t_GAMMA * (\n T.sum(T.switch(self.t_S < self.t_SMIN,\n -(self.t_S - self.t_SMIN), 0.)) +\n T.sum(T.switch(self.t_S > self.t_SMAX,\n self.t_S - self.t_SMAX, 0.)))\n self.t_E_bound.name = 'E_bound'\n\n self.t_E_sp = self.t_LAMBDA * T.sum(T.abs_(self.t_A))\n self.t_E_sp.name = 'E_sp'\n\n # self.t_E_quad = 0.5 * T.sum(self.t_QUAD_REG *\n # ((self.t_A - self.t_QUAD_REG_MEAN) ** 2))\n # self.t_E_quad.name = 'E_quad'\n\n # Define bias term\n t_dPrior = T.grad(self.t_E_sp, self.t_A)\n\n self.t_E_prev = (\n (self.t_A - self.t_Ap).dimshuffle('x', 0) *\n self.t_H *\n (self.t_A - self.t_Ap).dimshuffle(0, 'x')\n ).sum() * 0.5\n\n self.t_E_lin_prior = ((self.t_A - self.t_Ap) * self.t_B).sum()\n\n # Split off terms that will go into fista (i.e. not icluding E_sp)\n self.t_E_rec = (\n self.t_E_prev + self.t_E_R +\n self.t_E_lin_prior + self.t_E_bound\n # + self.t_E_quad\n )\n self.t_E_rec.name = 'E_rec'\n\n self.t_E = self.t_E_rec + self.t_E_sp\n self.t_E.name = 'E'\n\n # Cost from poisson terms separated by batches for particle filter log\n # probability\n self.t_E_R_pf = T.sum(self.t_E_R_f, axis=(1, 2))\n self.spike_energy = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R],\n outputs=self.t_E_R_pf)\n\n # Generate costs given a path, spikes, and time-batch weights\n energy_outputs = [\n self.t_E,\n self.t_E_prev,\n self.t_E_R,\n self.t_E_bound,\n self.t_E_sp,\n self.t_E_lin_prior,\n # self.t_E_quad,\n ]\n\n self.costs = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R, self.t_Wbt],\n outputs=energy_outputs)\n\n self.image_costs = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R,\n self.t_Wbt, self.t_S],\n outputs=self.t_E_R)\n\n # Define variables for FISTA minimization\n self.t_L = T.scalar('L')\n\n self.grad_updates = fista_updates(\n self.t_A, self.t_E_rec, self.t_LAMBDA,\n self.t_L, pos_only=pos_only)\n\n _, self.t_fista_X, self.t_T = self.grad_updates.keys()\n\n # Initialize t_A, and extra variables\n\n inputs = [self.t_XR, self.t_YR, self.t_R, self.t_Wbt, self.t_L]\n self.run_fista_step = theano.function(\n inputs=inputs, outputs=energy_outputs,\n updates=self.grad_updates)\n\n # Define functions for online learning #\n\n self.hessian_func = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n outputs=t_dE_R_dAA)\n\n # After each iteration, replace value of Ap with A\n self.update_Ap = theano.function(\n inputs=[], updates=[(self.t_Ap, self.t_A)])\n\n t_decay = T.exp(- self.t_DT / self.t_TAU *\n self.t_XR.shape[1].astype('float32'))\n\n self.update_HB = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_Wbt],\n updates=[\n (self.t_H, t_decay * self.t_H + t_dE_R_dAA),\n (self.t_B, t_dPrior)])\n\n # Code for no motion optimizer\n self.t_E_R_no_mo = T.sum(spiking_cost(self.t_R, self.t_FP))\n self.t_E_R_no_mo.name = 'E_R_no_mo'\n\n t_E_no_mo = self.t_E_R_no_mo + self.t_E_bound\n t_E_no_mo.name = 'E_no_mo'\n\n t_Rho = T.scalar('Rho')\n t_Eps = T.scalar('Eps')\n ada_updates = ada_delta(t_E_no_mo, self.t_A, *(t_Rho, t_Eps))\n t_ada_Eg2, t_ada_dA2, _ = ada_updates.keys()\n\n def reset_adadelta_variables(t_A=self.t_A):\n \"\"\"\n Resets ADA Delta auxillary variables\n \"\"\"\n A0 = np.zeros_like(t_A.get_value()).astype(theano.config.floatX)\n t_ada_Eg2.set_value(A0)\n t_ada_dA2.set_value(A0)\n t_A.set_value(A0)\n\n self.reset_adadelta_variables = reset_adadelta_variables\n\n self.run_image_max_step = theano.function(\n inputs=[self.t_XR, self.t_YR, self.t_R, t_Rho, t_Eps],\n updates=ada_updates,\n outputs=[t_E_no_mo]\n )", "def aGMKernel(Ni,Nj,alpha,gamma):\n \n #Dimension of data\n d = Ni.mu.size\n I = sp.eye(d)\n\n ##Normalisation\n deltaMean = (Ni.mu-Nj.mu).reshape(d,)\n SigmaSum = alpha * (Ni.Sigma+Nj.Sigma) + I/gamma\n Kij = (linalg.det(2*gamma*alpha * Ni.Sigma + I) * linalg.det(2*gamma*alpha * Nj.Sigma + I))**0.25\n Kij *= sp.exp(-0.5*sp.dot(deltaMean.T,linalg.solve(SigmaSum,deltaMean)))\n Kij /= sp.sqrt(linalg.det(SigmaSum*gamma)) \n \n return Kij", "def ghosal_edge(img,Ks,thr=1,thrmax=0.995,lmin = 0.5,phimin=1.4,thresholding=True, debug=False):\n\ttotaltime = time.time()\n\tkerneltime = time.time()\n\t# Ks must be odd\n\tif Ks%2 != 1:\n\t\tprint(\"Ks must be odd! Continuing with Ks = Ks-1\")\n\t\tKs = Ks-1\n\t# define the rectangular kernels\n\t#Vc00 = np.zeros((Ks,Ks),dtype=complex)\n\tVc11 = np.zeros((Ks,Ks),dtype=complex)\n\tVc20 = np.zeros((Ks,Ks),dtype=complex)\n\tofs = 1 *(1-1/Ks) # offset for centering kernel around 0,0\n\tfor i in range(Ks):\n\t\tfor j in range(Ks):\n\t\t\tKx = 2*j/Ks-ofs # limits of integration between -1 and 1\n\t\t\tKy = 2*i/Ks-ofs\n\t\t\tif Kx**2+Ky**2 <= 1: # only a circle\n\t\t\t\t#Vc00[i,j] = 1 # the conjugate of V00\n\t\t\t\tVc11[i,j] = Kx-Ky*1j # ...\n\t\t\t\tVc20[i,j] = 2*Kx**2+2*Ky**2-1\n\tkerneltime = time.time() - kerneltime\n\t\n\t# Kernel Plots\n\t#\tVCplot = Vc00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Vc20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag w K Vc20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t# do the convolution with the images to get the zernike moments\n\tAnorm = lambda n : (n+1)/np.pi\t# a normalization value\n\tconvolvetime = time.time()\n\t#A00 = scig.convolve2d(img,Vc00,mode='same')\n\t#\tA11 = Anorm(1)*scig.convolve2d(img,Vc11,mode='same')\n\t#\tA20 = Anorm(2)*scig.convolve2d(img,Vc20,mode='same')\n\tA11 = Anorm(1)*scig.oaconvolve(img,Vc11,mode='same')\n\tA20 = Anorm(2)*scig.oaconvolve(img,Vc20,mode='same')\n\tconvolvetime = time.time() - convolvetime\n\t# Plot Zernike moments\n\t#\tVCplot = A00\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A00\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = A20\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A20\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\tparamstime = time.time()\n\t# calculate the edge paramters\n\t#\ttanphi = np.imag(A11)/np.real(A11)\n\t#\tphi = np.arctan(tanphi)\n\t#\tcosphi = np.cos(phi)\n\t#\tsinphi = cosphi*tanphi\n\t#\tAl11 = np.real(A11)*cosphi+np.imag(A11)*sinphi\n\t\n\tphi = np.arctan(np.imag(A11)/np.real(A11))\n\tAl11 = np.real(A11)*np.cos(phi)+np.imag(A11)*np.sin(phi)\n\t\n\t#\tAl11 = A11*np.exp(-phi*1j)\n\tl = A20/Al11 # A20 has no imaginary component so A20 = A'20\n\n\tk = 3*Al11/(2*(1-l**2)**(3/2))\n\tparamstime = time.time() - paramstime\n\t\n\t# Plot edge paramters\n\t#\tVCplot = phi\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag phi\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = Al11\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag A\\'11\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = l\n\t#\tplt.pcolormesh(np.real(VCplot))#,vmin=-5,vmax=5\n\t#\tplt.title(\"real l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot)) # ,vmin=-5,vmax=5\n\t#\tplt.title(\"imag l\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tVCplot = k\n\t#\tplt.pcolormesh(np.real(VCplot))\n\t#\tplt.title(\"real k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t#\tplt.pcolormesh(np.imag(VCplot))\n\t#\tplt.title(\"imag k\")\n\t#\tplt.colorbar()\n\t#\tplt.show()\n\t\n\t\n\ttreattime = time.time()\n\tif thresholding==True:\n\t\t# do the thresholding\n\t\tif (thrmax<0)&(thr>0):\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])\n\t\telif thrmax>0:\n\t\t\tknorm = np.sort(k.flatten())[[int(thr*np.size(k)),int(thrmax*np.size(k))]]\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)&(abs(k)>knorm[0])&(abs(k)<knorm[1])\n\t\telif thr<0:\n\t\t\tidx = (abs(l)<lmin)&(abs(phi)>phimin)\n\t\t\tknorm = np.sort(k[idx].flatten())[int(thr)]\n\t\t\tidx = idx&(abs(k)>abs(knorm))\n\t\tne = np.sum(idx)\n\telif thresholding==False:\n\t\traise ValueError(\"this option is not still uncer development\")\n\t\t# no thresholding\n\t\tidx = np.ones(np.shape(l),dtype=bool)\n\t\tne =np.sum(idx)\n\telse:\n\t\traise ValueError(\"thresholding should be boolean\")\n\t\n\t# put all detected points in a vector of (x,y) values\n\tedg = np.zeros((ne,2))\n\torg = np.zeros((ne,2))\n\tnx,ny = np.shape(img)\n\te = 0\n\tfor i in range(nx):\n\t\tfor j in range(ny):\n\t\t\tif idx[i,j]:\n\t\t\t\tedg[e]=np.array([i,j]) + l[i,j]*Ks/2*np.array(\n\t\t\t\t\t[np.sin(phi[i,j]),-np.cos(phi[i,j])])\n\t\t\t\torg[e]=np.array([i,j])\n\t\t\t\te +=1\n\ttreattime = time.time() - treattime\n\ttotaltime = time.time() - totaltime\n\tprint(\"total %0.5f\tconvolution %0.5f\tthresholding %0.5f\tparamters %0.5f\tkernel %0.5f\"%(totaltime,convolvetime,treattime,paramstime,kerneltime))\n\t\n\tif debug==True:\n\t\treturn edg, org, k, l, phi\n\telse:\n\t\treturn edg, org" ]
[ "0.61699444", "0.5723231", "0.5606339", "0.55607325", "0.5518511", "0.55096704", "0.55096334", "0.5484832", "0.5463777", "0.5462731", "0.54323053", "0.54275984", "0.5419684", "0.5415917", "0.54151106", "0.54038984", "0.53997", "0.5388906", "0.53794676", "0.5318565", "0.5302338", "0.52963775", "0.5294763", "0.5289145", "0.5247436", "0.5223083", "0.5222627", "0.52184147", "0.5217503", "0.5216327" ]
0.63298035
0
Returns an op to increase the eval step for TPU evaluation.
def _increase_eval_step_op(iterations_per_loop): eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access # Estimator evaluate increases 1 by default. So, we increase the difference. return state_ops.assign_add( eval_step, math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), use_locking=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TpuEvalStep(self, *args):\n with tf.name_scope('tpu_eval'):\n self._model.ConstructFPropGraph()\n per_step_eval_metrics = self._eval_metrics.PackStepMetricsForAccumulation(\n self._task.eval_metrics, args)\n return [x + y for x, y in zip(per_step_eval_metrics, args)]", "def eval_step(total_loss):\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n tpu_estimator_spec = self._call_model_fn(features, labels)\n if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):\n raise RuntimeError(\n 'estimator_spec used by TPU evaluation must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n loss = tpu_estimator_spec.loss\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n to_record = {}\n to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics\n if tpu_estimator_spec.host_call is not None:\n # We assume that evaluate won't update global step, so we don't wrap\n # this host_call.\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return math_ops.add(total_loss, loss)", "def op(self) -> Node:\n return self._step_execution_context.op", "def incr_operand(self):\n pass", "def exp_incr_lr():\n maxlr_div_minlr = tf.divide(max_lr, min_lr)\n power_iter = tf.divide(global_step, num_iters)\n pow_div = tf.pow(maxlr_div_minlr, power_iter)\n return tf.multiply(min_lr, pow_div, name=name)", "def add_eval_op(self, pred):\n f1_score, metric_update_op = tf.contrib.metrics.f1_score(\n self.labels_placeholder,\n tf.slice(tf.nn.softmax(self.pred), [0, 1], [-1, 1]),\n name='f1_score'\n )\n\n return f1_score, metric_update_op", "def increment(self) -> global___Expression:", "def TpuTrainStep(self, *args):\n with tf.name_scope('tpu_train'):\n with py_utils.OpportunisticVariableReuseScope(True):\n with contextlib.ExitStack() as stack:\n if py_utils.IsEagerMode():\n stack.enter_context(py_utils.GradientTape(persistent=True))\n self._model.ConstructFPropBPropGraph()\n per_step_eval_metrics = self._eval_metrics.PackStepMetricsForAccumulation(\n self.task.eval_metrics, args)\n outfeed_op = self._OutfeedEnqueue(self.task.per_example_tensors)\n summed_metrics = []\n assert len(per_step_eval_metrics) == len(args)\n with tf.control_dependencies([outfeed_op]):\n for x, y in zip(per_step_eval_metrics, args):\n summed_metrics.append(x + y)\n return summed_metrics + [self.task.train_op]", "def op(self):\n return self.getop(self.pc)", "def compute_step(X):\n return MOVING_STEP", "def eval_step(self, *args, **kwargs):\n raise NotImplementedError", "def run_eval_epoch(sess, cost_op, ops, num_unrolls, step=None, unroll_len=None):\n start = timer()\n # sess.run(reset)\n total_cost = []\n feed_dict = {}\n for i in xrange(num_unrolls):\n if step is not None:\n feed_dict[step] = i * unroll_len + 1\n cost = sess.run([cost_op] + ops, feed_dict=feed_dict)[0]\n total_cost.append(cost)\n return timer() - start, total_cost", "def convert_to_single_tpu_eval_step(self, dequeue_fn):\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n\n def eval_step(total_loss):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n tpu_estimator_spec = self._call_model_fn(features, labels)\n if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):\n raise RuntimeError(\n 'estimator_spec used by TPU evaluation must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n loss = tpu_estimator_spec.loss\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n to_record = {}\n to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics\n if tpu_estimator_spec.host_call is not None:\n # We assume that evaluate won't update global step, so we don't wrap\n # this host_call.\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return math_ops.add(total_loss, loss)\n\n return eval_step, host_calls, captured_scaffold_fn", "def eval_expr(e, store):\n if e.type == 'IntLit':\n return e.value\n elif e.type == 'IntVar':\n index = eval_expr(e.children[0], store)\n return store.get(e.value, {}).get(index, 0)\n elif e.type == 'Random':\n rg = eval_expr(e.children[0], store)\n return random.randint(0, rg-1)\n elif e.type == 'IntOp':\n lhs = eval_expr(e.children[0], store)\n rhs = eval_expr(e.children[1], store)\n if e.value == '+':\n return lhs + rhs\n elif e.value == '-':\n return lhs - rhs\n elif e.value == '*':\n return lhs * rhs\n elif e.value == '/':\n if rhs == 0:\n return 0\n else:\n return lhs // rhs\n else:\n raise NotImplementedError(e.value)", "def op(self):\n return self.__op", "def op(self):\n return self.__op", "def get_train_op(loss):\n gs = tf.train.get_global_step()\n is_boolq = FLAGS.dataset == \"boolq\"\n lr = tf.train.exponential_decay(\n global_step=gs,\n learning_rate=FLAGS.learning_rate,\n staircase=True,\n decay_steps=50 if is_boolq else 100,\n decay_rate=0.999)\n opt = tf.train.AdamOptimizer(lr)\n grad_and_vars = opt.compute_gradients(loss)\n return opt.apply_gradients(grad_and_vars, tf.train.get_global_step())", "def TpuTrainStep():\n with py_utils.OpportunisticVariableReuseScope(True):\n self._train_model.ConstructFPropBPropGraph()\n return [self._train_task.train_op]", "def _create_training_op(self):\n with tf.device('/cpu:0'):\n self.learning_rate = tf.train.exponential_decay(\n learning_rate=self.start_learning_rate, \n global_step=self.global_step, \n decay_steps=self.decay_steps, \n decay_rate=self.decay_rate,\n staircase = True, name='learning_rate')\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.training_op = optimizer.minimize(loss=self.loss, \n global_step=self.global_step)", "def make_predict_step(self):\n return self.make_eval_step()", "def training(cost, global_step):\n tf.summary.scalar(\"cost\", cost)\n # using Adam Optimizer \n optimizer = tf.train.AdamOptimizer(learning_rate)\n train_op = optimizer.minimize(cost, global_step=global_step)\n return train_op", "def on_step(self, t, is_update): \n if t >= self.t_learn_start + self.t_start:\n if is_update:\n self.has_updated = True\n \n if t % self.t_save == 0 and self.has_updated:\n # Save model.\n self.save_model(t, self.saver)\n\n # Increment iteration count.\n self.t_add_op.eval(session=self.sess)", "def get_train_op(total_loss, global_step):\n # Variables that affect learning rate.\n decay_steps = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN // FLAGS.batch_size\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n opt = tf.train.GradientDescentOptimizer(lr)\n optimize_op = opt.minimize(total_loss, global_step=global_step)\n\n return optimize_op", "def add_training_op(self, loss):\n ### YOUR CODE HERE\n train_op=tf.train.GradientDescentOptimizer(learning_rate=Config.lr).minimize(loss)\n ### END YOUR CODE\n return train_op", "def add_train_op(self, loss):\n optimizer = tf.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.minimize(loss)", "def IncrementLoadUnit(self):\n\t\treturn self._get_attribute('incrementLoadUnit')", "def train_op_a(self):\r\n return self._train_op_a", "def op(self):\n\n return self._op", "def _train_op_fn(loss):\n return tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.train.get_global_step(),\n learning_rate=params[\"learning_rate\"],\n optimizer=\"Adagrad\")", "def calc_value(exp, t_new):\n t = Symbol('t')\n return exp.evalf(subs={t: t_new})" ]
[ "0.64432496", "0.63263434", "0.6229572", "0.60930693", "0.6043687", "0.5865047", "0.58446443", "0.5816877", "0.55298465", "0.5529144", "0.54990685", "0.54590845", "0.5441091", "0.54245347", "0.5421626", "0.5421626", "0.5403084", "0.5372202", "0.5353022", "0.5350679", "0.53279877", "0.53202724", "0.5318146", "0.5280536", "0.5273475", "0.52501804", "0.5249549", "0.52113235", "0.5202741", "0.51959956" ]
0.7544818
0
Creates a validated `TPUEstimatorSpec` instance.
def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metrics=None, export_outputs=None, scaffold_fn=None, host_call=None): host_calls = {} if eval_metrics is not None: host_calls['eval_metrics'] = eval_metrics if host_call is not None: host_calls['host_call'] = host_call _OutfeedHostCall.validate(host_calls) return super(TPUEstimatorSpec, cls).__new__( cls, mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics, export_outputs=export_outputs, scaffold_fn=scaffold_fn, host_call=host_call)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_estimator_spec(self):\n host_calls = {}\n if self.eval_metrics is not None:\n host_calls['eval_metrics'] = self.eval_metrics\n if self.host_call is not None:\n host_calls['host_call'] = self.host_call\n host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)\n eval_metric_ops = None\n if self.eval_metrics is not None:\n eval_metric_ops = host_call_ret['eval_metrics']\n hooks = None\n if self.host_call is not None:\n hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]\n scaffold = self.scaffold_fn() if self.scaffold_fn else None\n return model_fn_lib.EstimatorSpec(\n mode=self.mode,\n predictions=self.predictions,\n loss=self.loss,\n train_op=self.train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=self.export_outputs,\n scaffold=scaffold,\n training_hooks=hooks,\n evaluation_hooks=hooks,\n prediction_hooks=hooks)", "def test_create_estimator(self):\n # Hyperparameters to create the Estimator\n hparams = tf.contrib.training.HParams(\n job_dir='test_dir',\n save_checkpoints_steps=1,\n keep_checkpoint_max=1,\n num_layers=2,\n dnn_dropout=0.7,\n dnn_optimizer='test_optimizer',\n linear_optimizer='test_optimizer',\n first_layer_size=10)\n estimator = model.create_estimator(hparams)\n self.assertIsInstance(estimator, tf.estimator.Estimator)", "def __new__(cls,\n input_fn,\n max_steps=None,\n hooks=None):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate max_steps.\n if max_steps is not None and max_steps <= 0:\n raise ValueError(\n 'Must specify max_steps > 0, given: {}'.format(max_steps))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n return super(TrainSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n max_steps=max_steps,\n hooks=hooks)", "def _tpu_build(self):\n def _define_model(features, labels, mode, params):\n data_source = (features, labels)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses, eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(\n mode=mode, predictions=outputs\n )\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error('Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses[0], train_op=self.optimize_ops[0]\n )\n\n tpu_name = ['node-1'] # TODO Bring outside\n tpu_iterations = 500 # TODO Bring outside\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=self.output_path,\n cluster=tpu_cluster_resolver,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations),\n )\n\n self.estimator = tpu.TPUEstimator(\n model_fn=_define_model,\n use_tpu=True,\n train_batch_size=32*4, #self.dataset['train'].batch_size,\n eval_batch_size=32*4, #self.dataset['validation'].batch_size,\n config=run_config,\n params={\"data_dir\": self.data_dir}\n )", "def __init__(self, input_spec, transform_or_spec, dtype, shape, name,\n also_track_spec=None):\n self._input_spec = input_spec\n self._transform_or_spec = transform_or_spec\n self._also_track_spec = also_track_spec\n self._dtype = dtype\n self._shape = shape\n self._name = name\n\n self._transform_is_composite = isinstance(transform_or_spec, tf.TypeSpec)\n self._unique_id_params = {'dtype': dtype, 'shape': shape}\n\n self._specs = {'input_spec': input_spec}\n if self._transform_is_composite:\n self._specs['transform_or_spec'] = transform_or_spec\n if also_track_spec is not None:\n self._specs['also_track_spec'] = also_track_spec", "def create(self, validated_data):\n new_spec = Specification(key = validated_data.get('key'),\n value = validated_data.get('value'),\n category = validated_data.get('category'),\n car = validated_data.get('car'),)\n new_spec.save()\n\n return new_spec", "def from_spec(cls, spec, prog, **kwargs):\n parser = spec.parser(prog, **kwargs)\n return cls(spec.name, spec.kind, spec.summary, parser, spec.factory)", "def _init_tf_estimator(desc_file, model_dir):\n sess_config = tf.compat.v1.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n config = tf.estimator.RunConfig(\n model_dir=model_dir, session_config=sess_config)\n model = ModelZoo().get_model(desc_file)\n\n def _model_fn(features, labels, mode):\n \"\"\"Model function of gpu evaluator.\"\"\"\n model.training = False\n logits = model(features)\n logits = tf.cast(logits, tf.float32)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=logits)\n else:\n eval_metric_ops = RMSE()(logits, labels)\n return tf.estimator.EstimatorSpec(mode=mode, loss=tf.log(1.0), train_op=None,\n eval_metric_ops=eval_metric_ops)\n\n return tf.estimator.Estimator(model_fn=_model_fn, config=config)", "def getInputSpecification(cls):\n inputSpecification = InputData.parameterInputFactory(cls.__name__, ordered=True, baseNode=None)\n\n StatePartInput = InputData.parameterInputFactory(\"state\", contentType=InputTypes.StringType)\n StatePartInput.addParam(\"outcome\", InputTypes.FloatType, True)\n StatePartInput.addParam(\"index\", InputTypes.IntegerType, True)\n TransitionInput = InputData.parameterInputFactory(\"transition\", contentType=InputTypes.StringType)\n inputSpecification.addSub(StatePartInput, InputData.Quantity.one_to_infinity)\n inputSpecification.addSub(TransitionInput, InputData.Quantity.zero_to_one)\n inputSpecification.addSub(InputData.parameterInputFactory(\"workingDir\", contentType=InputTypes.StringType))\n ## Because we do not inherit from the base class, we need to manually\n ## add the name back in.\n inputSpecification.addParam(\"name\", InputTypes.StringType, True)\n\n return inputSpecification", "def __init__(self, input_spec, transform_or_spec, dtype, name):\n self._input_spec = input_spec\n self._transform_or_spec = transform_or_spec\n self._dtype = dtype\n self._name = name\n\n self._unique_id_params = {'dtype': dtype}\n self._transform_is_composite = isinstance(transform_or_spec, tf.TypeSpec)\n\n self._specs = {'input_spec': input_spec}\n if self._transform_is_composite:\n self._specs['transform_or_spec'] = transform_or_spec", "def getInputSpecification(cls):\n inputSpecification = InputData.parameterInputFactory(cls.__name__, ordered=True, baseNode=None)\n\n StatePartInput = InputData.parameterInputFactory(\"state\", contentType=InputTypes.FloatType)\n StatePartInput.addParam(\"outcome\", InputTypes.FloatOrStringType, True)\n inputSpecification.addSub(StatePartInput, InputData.Quantity.one_to_infinity)\n\n ## Because we do not inherit from the base class, we need to manually\n ## add the name back in.\n inputSpecification.addParam(\"name\", InputTypes.StringType, True)\n\n return inputSpecification", "def model_fn_builder(self, bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def parse_spec(inp_file):\n try:\n y_spec = yaml.load(inp_file, Loader=yaml.SafeLoader)\n spec = create_spec(y_spec)\n except jsonschema.exceptions.RefResolutionError:\n logging.error(\"Could not load specification. Check your network or try again\")\n raise err.BeaconTestError()\n except openapi_spec_validator.exceptions.OpenAPIValidationError:\n logging.error(\"Could not read specification. Check tat your file is valid\")\n raise err.BeaconTestError()\n return spec", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def getInputSpecification(cls):\n inputSpecification = super(Beta, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"low\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"alpha\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"beta\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"high\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"peakFactor\", contentType=InputTypes.FloatType))\n\n return inputSpecification", "def load_spec(cls, spec):\n checkpoint_dict = load_checkpoint(os.path.join(spec.dir, 'checkpoint'))\n\n load_var = partial(var, spec, checkpoint_dict)\n\n result = QuantizedCheckpoint(\n q_wi=load_var(\n 'optimizer/target/decoder/decoder/q_wi_fused/qkernel',\n transpose=spec.transpose_scan_axis),\n q_wi_scale=load_var(\n 'optimizer/target/decoder/decoder/q_wi_fused/qscale',\n transpose=spec.transpose_scan_axis,\n ),\n kv=load_var(\n 'optimizer/target/decoder/decoder/kv_fused/qkernel',\n transpose=spec.transpose_scan_axis),\n kv_scale=load_var(\n 'optimizer/target/decoder/decoder/kv_fused/qscale',\n transpose=spec.transpose_scan_axis),\n o_wo=load_var(\n 'optimizer/target/decoder/decoder/o_wo_fused/qkernel',\n transpose=spec.transpose_scan_axis),\n o_wo_scale=load_var(\n 'optimizer/target/decoder/decoder/o_wo_fused/qscale',\n transpose=spec.transpose_scan_axis),\n layernorm_scale=load_var(\n 'optimizer/target/decoder/decoder/layer_norm/scale',\n transpose=spec.transpose_scan_axis),\n embedding=load_var(\n 'optimizer/target/decoder/token_embedder/embedding',\n transpose=False),\n )\n\n jax.tree_util.tree_map(\n check_shape,\n result,\n QuantizedCheckpoint.make_shaped_arrays(spec.hparams),\n is_leaf=lambda v: isinstance(v, tensorstore.Spec))\n return result", "def __init__(self, brain_spec, hparams):\n self._brain_spec = brain_spec\n self._validate_spec()\n self._hparams = hparams\n super().__init__(input_tensor_spec=brain_spec.observation_spec.tfa_spec)", "def create_tpu_estimator(model_fn, feature_columns, params):\n\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n params[\"tpu\"],\n zone=params[\"tpu_zone\"],\n project=params[\"gcp_project\"],\n coordinator_name=\"coordinator\")\n\n config = tf_estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=params[\"model_dir\"],\n tpu_config=tf_estimator.tpu.TPUConfig(\n iterations_per_loop=params[\"iterations_per_loop\"],\n experimental_host_call_every_n_steps=100,\n per_host_input_for_training=tf_estimator.tpu.InputPipelineConfig\n .PER_HOST_V2))\n\n return tf_estimator.tpu.TPUEstimator(\n use_tpu=params[\"use_tpu\"],\n model_fn=model_fn,\n config=config,\n train_batch_size=params[\"global_batch_size\"],\n eval_batch_size=params[\"eval_global_batch_size\"],\n params=params,\n embedding_config_spec=tf_estimator.tpu.experimental.EmbeddingConfigSpec(\n feature_columns=feature_columns,\n pipeline_execution_with_tensor_core=params[\"pipeline_execution\"],\n optimization_parameters=tf.tpu.experimental.AdagradParameters(\n learning_rate=params[\"learning_rate\"],\n use_gradient_accumulation=params[\"use_gradient_accumulation\"])))", "def load_spec(cls, spec):\n checkpoint_dict = load_checkpoint(os.path.join(spec.dir, 'checkpoint'))\n\n load_var = partial(var, spec, checkpoint_dict)\n\n result = Checkpoint(\n q_wi=load_var(\n 'optimizer/target/decoder/decoder/q_wi_fused/kernel',\n transpose=True),\n kv=load_var(\n 'optimizer/target/decoder/decoder/kv_fused/kernel', transpose=True),\n o_wo=load_var(\n 'optimizer/target/decoder/decoder/o_wo_fused/kernel',\n transpose=True),\n layernorm_scale=load_var(\n 'optimizer/target/decoder/decoder/layer_norm/scale',\n transpose=True),\n embedding=load_var(\n 'optimizer/target/decoder/token_embedder/embedding',\n transpose=False),\n )\n\n jax.tree_util.tree_map(\n check_shape,\n result,\n Checkpoint.make_shaped_arrays(spec.hparams),\n is_leaf=lambda v: isinstance(v, tensorstore.Spec))\n return result", "def __init__(self,\n model_fn=None,\n model_dir=None,\n config=None,\n params=None,\n use_tpu=True,\n train_batch_size=None,\n eval_batch_size=None,\n predict_batch_size=None,\n batch_axis=None):\n if config is None or not isinstance(config, tpu_config.RunConfig):\n raise ValueError(\n '`config` must be provided with type `tpu_config.RunConfig`')\n\n if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):\n raise ValueError('{} are reserved keys but existed in params {}.'.format(\n _RESERVED_PARAMS_KEYS, params))\n\n if use_tpu:\n # Perform some very basic validations. More validations will be found in\n # _TPUContext.\n if train_batch_size is None:\n raise ValueError('`train_batch_size` cannot be `None`')\n util_lib.check_positive_integer(train_batch_size, 'train_batch_size')\n\n if (config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.PER_SHARD_V1 and\n config.tpu_config.computation_shape):\n raise ValueError(\n 'Model parallelism only supports per host input for training. '\n 'Please adjust TPURunconfig.per_host_input_for_training.')\n\n if eval_batch_size is not None:\n util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')\n\n if predict_batch_size is not None:\n util_lib.check_positive_integer(predict_batch_size,\n 'predict_batch_size')\n\n # Verifies the model_fn signature according to Estimator framework.\n estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access\n # We cannot store config and params in this constructor as parent\n # constructor might change them, such as assigning a temp dir for\n # config.model_dir.\n model_function = self._augment_model_fn(model_fn, batch_axis)\n\n # Passing non-None params as wrapped model_fn has it.\n params = params or {}\n super(TPUEstimator, self).__init__(\n model_fn=model_function,\n model_dir=model_dir,\n config=config,\n params=params)\n self._iterations_per_training_loop = (\n self._config.tpu_config.iterations_per_loop)\n\n # All properties passed to _TPUContext are immutable.\n # pylint: disable=protected-access\n self._ctx = tpu_context._get_tpu_context(\n self._config, train_batch_size,\n eval_batch_size, predict_batch_size,\n use_tpu)\n\n self._is_input_fn_invoked = None", "def test_init_custom_parameters():\n\n tpot_obj = TPOTClassifier(population_size=500, generations=1000,\n mutation_rate=0.05, crossover_rate=0.9,\n scoring='accuracy', num_cv_folds=10,\n verbosity=1, random_state=42,\n disable_update_check=True)\n\n assert tpot_obj.population_size == 500\n assert tpot_obj.generations == 1000\n assert tpot_obj.mutation_rate == 0.05\n assert tpot_obj.crossover_rate == 0.9\n assert tpot_obj.scoring_function == 'accuracy'\n assert tpot_obj.num_cv_folds == 10\n assert tpot_obj.max_time_mins is None\n assert tpot_obj.verbosity == 1\n assert tpot_obj._optimized_pipeline is None\n assert tpot_obj._fitted_pipeline is None\n assert not (tpot_obj._pset is None)\n assert not (tpot_obj._toolbox is None)", "def getInputSpecification(cls):\n specs = super().getInputSpecification()\n specs.description = r\"\"\"The \\xmlNode{OneVsRestClassifier} (\\textit{One-vs-the-rest (OvR) multiclass strategy})\n Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each\n classifier, the class is fitted against all the other classes. In addition to its computational\n efficiency (only n\\_classes classifiers are needed), one advantage of this approach is its\n interpretability. Since each class is represented by one and one classifier only, it is\n possible to gain knowledge about the class by inspecting its corresponding classifier.\n This is the most commonly used strategy for multiclass classification and is a fair default choice.\n \\zNormalizationNotPerformed{OneVsRestClassifier}\n \"\"\"\n estimatorInput = InputData.assemblyInputFactory(\"estimator\", contentType=InputTypes.StringType,\n descr=r\"\"\"name of a ROM that can be used as an estimator\"\"\", default='no-default')\n #TODO: Add more inputspecs for estimator\n specs.addSub(estimatorInput)\n\n specs.addSub(InputData.parameterInputFactory(\"n_jobs\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"TThe number of jobs to use for the computation: the n\\_classes one-vs-rest\n problems are computed in parallel. None means 1 unless in a joblib.parallel\\_backend\n context. -1 means using all processors.\"\"\", default=None))\n return specs", "def __init__(__self__, *,\n spec: Optional[pulumi.Input['InstanceSpecArgs']] = None):\n if spec is not None:\n pulumi.set(__self__, \"spec\", spec)", "def create_train_and_eval_specs(train_input_fn,\n eval_input_fn,\n predict_fn,\n train_steps):\n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,\n max_steps=train_steps)\n eval_spec_name = \"0\"\n exported_name = \"{}_{}\".format('Servo', eval_spec_name)\n exporter = tf.estimator.FinalExporter(name=exported_name, serving_input_receiver_fn=predict_fn)\n eval_spec = tf.estimator.EvalSpec(name=eval_spec_name, input_fn=eval_input_fn, steps=None, exporters=exporter)\n return train_spec, eval_spec", "def get_estimator_spec(features, labels, mode):\n if mode not in {\"train\", \"infer\", \"eval\"}:\n raise ValueError('mode should be in {\"train\", \"infer\", \"eval\"}')\n\n logits = get_logits(features)\n preds = tf.argmax(logits, axis=-1)\n probs = tf.nn.softmax(logits, axis=-1)\n predictions = dict(preds=preds, probs=probs, image=features)\n\n if mode == 'infer':\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n step = tf.train.get_or_create_global_step()\n train_op = optimizer.minimize(loss, global_step=step)\n\n accuracy = tf.metrics.accuracy(labels, preds)\n\n return tf.estimator.EstimatorSpec(\n mode=mode, predictions=predictions,\n loss=loss, train_op=train_op, eval_metric_ops=dict(accuracy=accuracy))", "def getInputSpecification(cls):\n inputSpecification = super(Triangular, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"apex\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"min\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"max\", contentType=InputTypes.FloatType))\n\n return inputSpecification", "def _validate_estimator(self):\n\n if self.smote is not None:\n if isinstance(self.smote, SMOTE):\n self.smote_ = self.smote\n else:\n raise ValueError('smote needs to be a SMOTE object.'\n 'Got {} instead.'.format(type(self.smote)))\n else:\n self.smote_ = SMOTE(ratio=self.ratio, k_neighbors=3,\n random_state=self.random_state)\n\n if self.tomek is not None:\n if isinstance(self.tomek, TomekLinks):\n self.tomek_ = self.tomek\n else:\n raise ValueError('tomek needs to be a TomekLinks object.'\n 'Got {} instead.'.format(type(self.tomek)))\n else:\n self.tomek_ = TomekLinks(ratio=\"all\",\n random_state=self.random_state)", "def __init__(self, spec):\n self.spec = spec", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec\n\n return model_fn", "def validate_task_spec(task_spec: Dict[str, Any], with_inputs: bool) -> None:\n error_prefix = \"Invalid task specification:\"\n assert 'task_name' in task_spec, f\"{error_prefix} missing field 'task_name'\"\n assert isinstance(task_spec['task_name'], str) and re.match(r\"^[A-Za-z0-9\\-_.]+$\", task_spec['task_name']), \\\n f\"{error_prefix} 'task_name' must be a string consisting only of [A-Za-z0-9\\\\-_.]\"\n assert 'labels' in task_spec, f\"{error_prefix} missing field 'labels'\"\n assert isinstance(task_spec['labels'], dict), f\"{error_prefix} 'labels' must be a dictionary\"\n all_labels = task_spec['labels'].keys()\n for label, label_dict in task_spec['labels'].items():\n assert isinstance(label_dict, dict), f\"{error_prefix} label '{label}' is not mapped to a dictionary\"\n assert not label_dict.keys() - {'instruction', 'counter_labels'}, \\\n f\"{error_prefix} invalid keys for label '{label}', only 'instruction' and 'counter_labels' are allowed\"\n assert 'instruction' in label_dict.keys(), f\"{error_prefix} missing field 'instruction' for label '{label}'\"\n assert isinstance(label_dict['instruction'], str), f\"{error_prefix} 'instruction' not a string for label '{label}'\"\n assert label_dict['instruction'][-1] == '\"', \\\n f\"{error_prefix} each instruction should end with an opening quotation mark (\\\") so that the next quotation mark generated \" \\\n f\"by the model can be interpreted as a signal that it is done.\"\n if with_inputs:\n assert label_dict['instruction'].count(PLACEHOLDER_STR) == 1, \\\n f\"{error_prefix} The instruction for label '{label}' does not contain exactly one placeholder token ({PLACEHOLDER_STR}). \" \\\n f\"If an input file is specified, each instruction must contain this placeholder to indicate where the input should be \" \\\n f\"inserted.\"\n else:\n assert label_dict['instruction'].count(PLACEHOLDER_STR) == 0, \\\n f\"{error_prefix} The instruction for label '{label}' contains a placeholder token ({PLACEHOLDER_STR}). If no input file \" \\\n f\"is specified, instructions must not contain this placeholder as there is no input to replace it with.\"\n if 'counter_labels' in label_dict.keys():\n assert isinstance(label_dict['counter_labels'], list), f\"{error_prefix} 'counter_labels' not a list for label '{label}'\"\n for counter_label in label_dict['counter_labels']:\n assert counter_label in all_labels, f\"{error_prefix} counter_label '{counter_label}' for label '{label}' is not a label\"" ]
[ "0.58872473", "0.56756306", "0.5586498", "0.5519904", "0.54025954", "0.5400498", "0.53641117", "0.53511024", "0.5270698", "0.52680284", "0.52115655", "0.51035535", "0.5102771", "0.50725776", "0.50706977", "0.5068895", "0.5065051", "0.50586456", "0.5047727", "0.5010918", "0.50048965", "0.4984379", "0.49771369", "0.49542427", "0.49531347", "0.49206835", "0.490945", "0.48976332", "0.4893371", "0.48776746" ]
0.6168227
0
Creates an equivalent `EstimatorSpec` used by CPU train/eval.
def as_estimator_spec(self): host_calls = {} if self.eval_metrics is not None: host_calls['eval_metrics'] = self.eval_metrics if self.host_call is not None: host_calls['host_call'] = self.host_call host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) eval_metric_ops = None if self.eval_metrics is not None: eval_metric_ops = host_call_ret['eval_metrics'] hooks = None if self.host_call is not None: hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] scaffold = self.scaffold_fn() if self.scaffold_fn else None return model_fn_lib.EstimatorSpec( mode=self.mode, predictions=self.predictions, loss=self.loss, train_op=self.train_op, eval_metric_ops=eval_metric_ops, export_outputs=self.export_outputs, scaffold=scaffold, training_hooks=hooks, evaluation_hooks=hooks, prediction_hooks=hooks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_estimator_spec(features, labels, mode):\n if mode not in {\"train\", \"infer\", \"eval\"}:\n raise ValueError('mode should be in {\"train\", \"infer\", \"eval\"}')\n\n logits = get_logits(features)\n preds = tf.argmax(logits, axis=-1)\n probs = tf.nn.softmax(logits, axis=-1)\n predictions = dict(preds=preds, probs=probs, image=features)\n\n if mode == 'infer':\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n step = tf.train.get_or_create_global_step()\n train_op = optimizer.minimize(loss, global_step=step)\n\n accuracy = tf.metrics.accuracy(labels, preds)\n\n return tf.estimator.EstimatorSpec(\n mode=mode, predictions=predictions,\n loss=loss, train_op=train_op, eval_metric_ops=dict(accuracy=accuracy))", "def _init_tf_estimator(desc_file, model_dir):\n sess_config = tf.compat.v1.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n config = tf.estimator.RunConfig(\n model_dir=model_dir, session_config=sess_config)\n model = ModelZoo().get_model(desc_file)\n\n def _model_fn(features, labels, mode):\n \"\"\"Model function of gpu evaluator.\"\"\"\n model.training = False\n logits = model(features)\n logits = tf.cast(logits, tf.float32)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=logits)\n else:\n eval_metric_ops = RMSE()(logits, labels)\n return tf.estimator.EstimatorSpec(mode=mode, loss=tf.log(1.0), train_op=None,\n eval_metric_ops=eval_metric_ops)\n\n return tf.estimator.Estimator(model_fn=_model_fn, config=config)", "def create_train_and_eval_specs(train_input_fn,\n eval_input_fn,\n predict_fn,\n train_steps):\n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,\n max_steps=train_steps)\n eval_spec_name = \"0\"\n exported_name = \"{}_{}\".format('Servo', eval_spec_name)\n exporter = tf.estimator.FinalExporter(name=exported_name, serving_input_receiver_fn=predict_fn)\n eval_spec = tf.estimator.EvalSpec(name=eval_spec_name, input_fn=eval_input_fn, steps=None, exporters=exporter)\n return train_spec, eval_spec", "def test_create_estimator(self):\n # Hyperparameters to create the Estimator\n hparams = tf.contrib.training.HParams(\n job_dir='test_dir',\n save_checkpoints_steps=1,\n keep_checkpoint_max=1,\n num_layers=2,\n dnn_dropout=0.7,\n dnn_optimizer='test_optimizer',\n linear_optimizer='test_optimizer',\n first_layer_size=10)\n estimator = model.create_estimator(hparams)\n self.assertIsInstance(estimator, tf.estimator.Estimator)", "def __new__(cls,\n mode,\n predictions=None,\n loss=None,\n train_op=None,\n eval_metrics=None,\n export_outputs=None,\n scaffold_fn=None,\n host_call=None):\n host_calls = {}\n if eval_metrics is not None:\n host_calls['eval_metrics'] = eval_metrics\n if host_call is not None:\n host_calls['host_call'] = host_call\n _OutfeedHostCall.validate(host_calls)\n return super(TPUEstimatorSpec, cls).__new__(\n cls,\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metrics=eval_metrics,\n export_outputs=export_outputs,\n scaffold_fn=scaffold_fn,\n host_call=host_call)", "def create_cpu():\n return CPU()", "def __call__(self, features: Dict, labels: List, mode: str) -> tf.estimator.EstimatorSpec:\n input_length = features[\"input_length\"]\n label_length = features[\"label_length\"]\n features = features[\"features\"]\n if mode == tf.estimator.ModeKeys.PREDICT:\n logits = self.model(features, training=False)\n predictions = {\n \"classes\": tf.argmax(logits, axis=2),\n \"probabilities\": tf.nn.softmax(logits),\n \"logits\": logits\n }\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # mode = `train` or `eval`\n # compute ctc loss\n logits = self.model(features, training=True)\n probs = tf.nn.softmax(logits)\n ctc_input_length = self.compute_length_after_conv(\n max_time_steps=tf.shape(features)[1], ctc_time_steps=tf.shape(probs)[1], input_length=input_length)\n\n loss = tf.reduce_mean(\n self.ctc_loss(label_length=label_length, ctc_input_length=ctc_input_length, labels=labels, logits=logits))\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss)\n\n # train_op for `train` mode.\n # train_op won't be used under 'eval' mode\n global_step = tf.train.get_or_create_global_step()\n minimize_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss, global_step=global_step)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n train_op = tf.group(minimize_op, update_ops)\n\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)", "def build_estimator(model_dir, model_type):\n set_tfconfig_environ()\n\n wide_columns, deep_columns, embedding_columns, usm_deep_columns = build_model_columns() #build_model_columns()\n global _GLOBAL_FEATURES\n _GLOBAL_FEATURES = wide_columns + deep_columns + embedding_columns\n hidden_units = [300, 100, 80] #[100, 75, 50, 25] #[400, 200, 100] #[300, 100, 80, 80]\n\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig().replace(\n session_config=tf.ConfigProto(device_count={'GPU': 0}),\n save_checkpoints_secs = FLAGS.save_checkpoints_steps, #300\n keep_checkpoint_max = 3,\n model_dir=model_dir)\n\n if FLAGS.pretrain == 'no':\n model = tf.estimator.Estimator(\n model_fn=my_model,\n params={\n 'wide_feature': wide_columns,\n 'deep_feature': deep_columns, # if model_type.find('conv') < 0 else usm_deep_columns,\n 'embedding_feature': embedding_columns,\n 'hidden_units': hidden_units, #FLAGS.hidden_units.split(','),\n 'usm_units': 100,\n 'learning_rate': FLAGS.learning_rate,\n 'dropout_rate': FLAGS.dropout_rate,\n 'model_type': model_type\n },\n config=tf.estimator.RunConfig(model_dir=FLAGS.checkpoints_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps)\n )\n else:\n model = tf.estimator.Estimator(\n model_fn=my_model_pretrain,\n params={\n 'embedding_feature': embedding_columns,\n 'learning_rate': FLAGS.learning_rate,\n 'dropout_rate': FLAGS.dropout_rate\n },\n config=tf.estimator.RunConfig(model_dir=FLAGS.checkpoints_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps)\n )\n\n return model", "def train_and_evaluate(estimator, train_spec, eval_spec):\n\n if not isinstance(estimator, estimator_lib.Estimator):\n raise TypeError('`estimator` must have type `tf.estimator.Estimator`, '\n 'given {}'.format(type(estimator)))\n config = estimator.config\n\n executor = _TrainingExecutor(estimator=estimator, train_spec=train_spec,\n eval_spec=eval_spec)\n\n if (not config.cluster_spec and\n config.task_type != run_config_lib.TaskType.EVALUATOR):\n logging.info('Running training and evaluation locally (non-distributed).')\n return executor.run_local()\n\n # Distributed case.\n if not config.task_type:\n # TODO(xiejw): Improve the error message about how to set the TF_CONFIG\n # correctly.\n raise ValueError(\n '`estimator.config` must have task_type set. This usually means '\n 'TF_CONFIG environment is not set correctly.')\n\n if config.task_type == 'local':\n raise ValueError(\n '`task.type` in TF_CONFIG cannot be `local`. Leaving `cluster` and '\n '`task` properties in TF_CONFIG absent triggers train and evaluate '\n '`Estimator` locally (non-distributed).')\n\n # For task type foo, call executor.run_foo.\n available_tasks = [x for x in dir(executor) if x.startswith('run_')\n and x != 'run_local'\n and callable(getattr(executor, x))]\n task_to_run = 'run_' + config.task_type\n if task_to_run not in available_tasks:\n raise ValueError(\n 'Task type {} is not supported. Supported task types are {}'.format(\n config.task_type, [x[len('run_'):] for x in available_tasks]))\n return getattr(executor, task_to_run)()", "def generate_estimator(\n mode_feature_cols_map,\n params,\n config):\n model_fn = generate_model_fn(mode_feature_cols_map)\n\n return tf.estimator.Estimator(\n model_fn,\n model_dir=config.model_dir,\n params=params,\n config=config\n )", "def estimator_spec_for_softmax_classification(logits, labels, mode):\n predicted_classes = tf.argmax(logits, axis=1)\n predicted_probs = tf.nn.softmax(logits, name='softmax_tensor')\n\n predictions = {\n # Holds the raw logit values\n 'logits': logits,\n\n # Holds the class id (0,1) representing the model's prediction of the most\n # likely species for this example.\n 'classes': predicted_classes,\n\n # Holds the probabilities for each prediction\n 'probs': predicted_probs,\n }\n\n # Represents an output of a model that can be served.\n export_outputs = {\n 'output': tf.estimator.export.ClassificationOutput(scores=predicted_probs)\n }\n\n # PREDICT Mode\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs=export_outputs\n )\n\n # Calculate loss for both TRAIN and EVAL modes\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(\n labels=labels, predictions=predicted_classes, name='acc_op'),\n 'auc': tf.metrics.auc(\n labels=labels, predictions=predicted_classes, name='auc_op'),\n }\n\n # Add summary ops to the graph. These metrics will be tracked graphed\n # on each checkpoint by TensorBoard.\n tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1])\n tf.summary.scalar('auc', eval_metric_ops['auc'][1])\n\n # TRAIN Mode\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n logging_hook = tf.train.LoggingTensorHook(\n tensors={'loss': loss}, every_n_iter=50)\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n training_hooks=[logging_hook],\n predictions={'loss': loss},\n export_outputs=export_outputs,\n eval_metric_ops=eval_metric_ops\n )\n\n # EVAL Mode\n assert mode == tf.estimator.ModeKeys.EVAL\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n predictions=predictions,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs\n )", "def build_estimator(\n data_dir,\n num_gpus,\n variable_strategy,\n run_config,\n hparams,\n use_distortion_for_training=True,\n ws=None,\n):\n\n # Create estimator.\n train_input_fn = functools.partial(\n input_fn,\n data_dir,\n subset=\"train\",\n num_shards=num_gpus,\n batch_size=hparams.train_batch_size,\n use_distortion_for_training=use_distortion_for_training,\n )\n\n eval_input_fn = functools.partial(\n input_fn,\n data_dir,\n subset=\"validation\",\n batch_size=hparams.eval_batch_size,\n num_shards=num_gpus,\n )\n\n # validation: 5000, eval:10000\n num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch(\n \"validation\"\n )\n\n if num_eval_examples % hparams.eval_batch_size != 0:\n raise ValueError(\n \"validation set size must be multiple of eval_batch_size\"\n )\n\n classifier = tf.estimator.Estimator(\n model_fn=get_model_fn(\n num_gpus, variable_strategy, run_config.num_worker_replicas or 1\n ),\n config=run_config,\n params=hparams,\n warm_start_from=ws,\n )\n\n return train_input_fn, eval_input_fn, classifier", "def __init__(self, \n n_estimators = 10, \n n_jobs = -1, \n max_features = None, \n bootstrap = True, \n cpu_classifier = skRF): \n assert hasattr(cpu_classifier, \"fit\"),\\\n \"cpu classifier must support fit method.\"\n assert hasattr(cpu_classifier, \"predict_proba\"),\\\n \"cpu classifier must support predict proba method.\"\n \n self.n_estimators = n_estimators\n self.max_features = max_features\n self.bootstrap = bootstrap\n self._cpu_forests = None\n self._cuda_forest = None\n self._cpu_classifier = cpu_classifier\n \n if n_jobs == -1:\n n_jobs = cpu_count()\n self.n_jobs = n_jobs", "def _build_estimator(config, hidden_units=None, warm_start_from=None):\n real_valued_columns = [\n tf.feature_column.numeric_column(key, shape=())\n for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)\n ]\n categorical_columns = [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)\n for key in _transformed_names(_VOCAB_FEATURE_KEYS)\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)\n for key in _transformed_names(_BUCKET_FEATURE_KEYS)\n ]\n categorical_columns += [\n tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension\n key,\n num_buckets=num_buckets,\n default_value=0) for key, num_buckets in zip(\n _transformed_names(_CATEGORICAL_FEATURE_KEYS),\n _MAX_CATEGORICAL_FEATURE_VALUES)\n ]\n return tf.estimator.DNNLinearCombinedClassifier(\n config=config,\n linear_feature_columns=categorical_columns,\n dnn_feature_columns=real_valued_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25],\n warm_start_from=warm_start_from)", "def build_estimator(model_dir, model_type):\n wide_columns, deep_columns = build_model_columns()\n hidden_units = [100, 75, 50, 25]\n\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig().replace(\n session_config=tf.ConfigProto(device_count={'GPU': 0}))\n\n if model_type == 'wide':\n return tf.estimator.LinearClassifier(\n model_dir=model_dir,\n feature_columns=wide_columns,\n config=run_config)\n\n else:\n return tf.estimator.DNNLinearCombinedClassifier(\n model_dir=model_dir,\n linear_feature_columns=wide_columns,\n dnn_hidden_units=hidden_units,\n config=run_config)", "def _make_model(self):\n self._model = tf.estimator.Estimator(model_fn=self.model_fn,\n model_dir=self.model_dir,\n config=self._config,\n params=self._params,\n )", "def build_estimator(config, embedding_size=8, hidden_units=None):\n (time, v1, v2, v3, v4,\n v5, v6, v7, v8, v9, \n v10, v11, v12, v13, v14, \n v15,v16,v17,v18,v19,v20,v21,\n v22,v23,v24,v25,v26,v27,v28, amount) = INPUT_COLUMNS\n \"\"\"Build an estimator.\"\"\"\n \n # Reused Transformations.\n # Continuous columns can be converted to categorical via bucketization\n # We use the (bucketized) amount column in the Wide part\n amount_buckets = tf.feature_column.bucketized_column(amount, boundaries=[4,8,12,15,35,75,100, 200, 300, 1000])\n\n # Wide columns and deep columns.\n wide_columns = [amount_buckets]\n\n # All the other CCF features will be used in the deep part\n deep_columns = [\n time, v1, v2, v3, v4,\n v5, v6, v7, v8, v9, \n v10, v11, v12, v13, v14, \n v15,v16,v17,v18,v19,v20,v21,\n v22,v23,v24,v25,v26,v27,v28\n ]\n \n # We hardcode here the models in order to avoid the exponential decaying model which is already implemented\n hidden_units = [20,15]\n\n # We can try either Wide and Deep models or Deep Neural Networks (DNN)\n #\"\"\"\n return tf.contrib.learn.DNNLinearCombinedClassifier(\n config=config,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_hidden_units=hidden_units or [100, 70, 50, 25],\n dnn_optimizer=tf.train.AdamOptimizer(),\n fix_global_step_increment_bug=True\n )\n\n \"\"\"\n deep_columns = deep_columns + [amount]\n return tf.contrib.learn.DNNClassifier(\n config=config,\n feature_columns=deep_columns,\n hidden_units=hidden_units or [100, 70, 50, 25],\n optimizer=tf.train.AdamOptimizer()\n #optimizer=tf.train.ProximalAdagradOptimizer(\n #learning_rate=0.1,\n #l2_regularization_strength=0.001\n\t #)\n )\n #\"\"\"", "def build_centralized_runner_spec(\n self) -> training_specs.RunnerSpecCentralized:\n\n task_spec = self._task_spec\n\n train_preprocess_fn = _create_preprocess_fn(\n num_epochs=1,\n batch_size=task_spec.batch_size,\n shuffle_buffer_size=task_spec.centralized_shuffle_buffer_size)\n\n train_dataset = train_preprocess_fn(\n client_data_utils.interleave_create_tf_dataset_from_all_clients(\n self._part_train_cd_raw, seed=task_spec.shared_random_seed))\n\n (part_train_eval_fn, part_val_fn, unpart_fn,\n _) = trainer_utils.create_centralized_eval_fns(\n tff_model_builder=self._tff_model_builder,\n metrics_builder=functools.partial(\n _metrics_builder_generic, tff_training=False),\n part_train_eval_cd=self._part_train_eval_cd,\n part_val_cd=self._part_val_cd,\n unpart_cd=self._unpart_cd,\n test_cd=None,\n stat_fns=eval_metric_distribution.ALL_STAT_FNS,\n part_clients_per_eval=task_spec.part_clients_per_eval,\n unpart_clients_per_eval=task_spec.unpart_clients_per_eval,\n test_clients_for_eval=task_spec.test_clients_for_eval,\n resample_eval_clients=task_spec.resample_eval_clients,\n eval_clients_random_seed=task_spec.shared_random_seed)\n\n keras_model = self._keras_model_builder()\n keras_model.compile(\n loss=_loss_builder(),\n optimizer=task_spec.optimizer,\n metrics=_metrics_builder_generic(tff_training=False))\n\n return training_specs.RunnerSpecCentralized(\n keras_model=keras_model,\n train_dataset=train_dataset,\n part_train_eval_fn=part_train_eval_fn,\n part_val_fn=part_val_fn,\n unpart_fn=unpart_fn,\n test_fn=None)", "def build_centralized_runner_spec(\n self) -> training_specs.RunnerSpecCentralized:\n\n task_spec = self._task_spec\n\n train_preprocess_fn = _create_preprocess_fn(\n num_epochs=1,\n batch_size=task_spec.batch_size,\n merge_case=self._merge_case,\n shuffle_buffer_size=task_spec.centralized_shuffle_buffer_size)\n\n train_dataset = train_preprocess_fn(\n client_data_utils.interleave_create_tf_dataset_from_all_clients(\n self._part_train_cd_raw, seed=task_spec.shared_random_seed))\n\n (part_train_eval_fn, part_val_fn, unpart_fn,\n _) = trainer_utils.create_centralized_eval_fns(\n tff_model_builder=self._tff_model_builder,\n metrics_builder=functools.partial(\n _metrics_builder_generic, tff_training=False),\n part_train_eval_cd=self._part_train_eval_cd,\n part_val_cd=self._part_val_cd,\n unpart_cd=self._unpart_cd,\n test_cd=None,\n stat_fns=eval_metric_distribution.ALL_STAT_FNS,\n part_clients_per_eval=task_spec.part_clients_per_eval,\n unpart_clients_per_eval=task_spec.unpart_clients_per_eval,\n test_clients_for_eval=task_spec.test_clients_for_eval,\n resample_eval_clients=task_spec.resample_eval_clients,\n eval_clients_random_seed=task_spec.shared_random_seed)\n\n keras_model = self._keras_model_builder()\n keras_model.compile(\n loss=_loss_builder(),\n optimizer=task_spec.optimizer,\n metrics=_metrics_builder_generic(tff_training=False))\n\n return training_specs.RunnerSpecCentralized(\n keras_model=keras_model,\n train_dataset=train_dataset,\n part_train_eval_fn=part_train_eval_fn,\n part_val_fn=part_val_fn,\n unpart_fn=unpart_fn,\n test_fn=None)", "def getInputSpecification(cls):\n specs = super().getInputSpecification()\n specs.description = r\"\"\"The \\xmlNode{OneVsRestClassifier} (\\textit{One-vs-the-rest (OvR) multiclass strategy})\n Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each\n classifier, the class is fitted against all the other classes. In addition to its computational\n efficiency (only n\\_classes classifiers are needed), one advantage of this approach is its\n interpretability. Since each class is represented by one and one classifier only, it is\n possible to gain knowledge about the class by inspecting its corresponding classifier.\n This is the most commonly used strategy for multiclass classification and is a fair default choice.\n \\zNormalizationNotPerformed{OneVsRestClassifier}\n \"\"\"\n estimatorInput = InputData.assemblyInputFactory(\"estimator\", contentType=InputTypes.StringType,\n descr=r\"\"\"name of a ROM that can be used as an estimator\"\"\", default='no-default')\n #TODO: Add more inputspecs for estimator\n specs.addSub(estimatorInput)\n\n specs.addSub(InputData.parameterInputFactory(\"n_jobs\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"TThe number of jobs to use for the computation: the n\\_classes one-vs-rest\n problems are computed in parallel. None means 1 unless in a joblib.parallel\\_backend\n context. -1 means using all processors.\"\"\", default=None))\n return specs", "def build_estimator(model_dir, model_type):\r\n wide_columns, deep_columns = build_model_columns()\r\n print(wide_columns)\r\n print(deep_columns)\r\n hidden_units = [256, 128, 64]\r\n\r\n if model_type == 'wide':\r\n return tf.estimator.LinearClassifier(model_dir=model_dir, feature_columns=wide_columns)\r\n elif model_type == 'deep':\r\n return tf.estimator.DNNClassifier(model_dir=model_dir, feature_columns=deep_columns,\r\n hidden_units=hidden_units)\r\n else:\r\n return tf.estimator.DNNLinearCombinedClassifier(model_dir=model_dir,\r\n linear_feature_columns=wide_columns,\r\n dnn_feature_columns=deep_columns,\r\n dnn_hidden_units=hidden_units)", "def build_estimator(config,\n hidden_units=None,\n learning_rate=1e-4,\n num_classes=3):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n return tf.estimator.DNNClassifier(\n optimizer=optimizer,\n config=config,\n feature_columns=get_feature_columns(),\n hidden_units=hidden_units,\n n_classes=num_classes)", "def estimator_spec_for_softmax_classification(\n logits, labels, mode):\n predicted_classes = tf.argmax(logits, 1)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'class': predicted_classes,\n 'prob': tf.nn.softmax(logits)\n })\n\n onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)\n loss = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(\n labels=labels, predictions=predicted_classes)\n }\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def get_estimator_fn(num_gpus,\n variable_strategy,\n run_config,\n hparams):\n estimator = tf.estimator.Estimator(\n model_fn=get_model_fn(num_gpus, variable_strategy,\n run_config.num_worker_replicas or 1),\n config=run_config,\n params=hparams)\n\n return estimator", "def nizza_model_fn(self, features, mode, params):\n precomputed = self.precompute(features, mode, params)\n loss = self.compute_loss(features, mode, params, precomputed)\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.train.get_global_step(),\n optimizer=tf.train.AdamOptimizer,\n learning_rate=params.learning_rate\n )\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op)", "def build_estimator(model_dir, model_type):\n wide_columns, deep_columns = build_model_columns()\n hidden_units = [100, 75, 50, 25]\n\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig().replace(\n session_config=tf.ConfigProto(device_count={'GPU': 0}))\n\n if model_type == 'wide':\n return tf.estimator.LinearClassifier(\n model_dir=model_dir,\n feature_columns=wide_columns,\n config=run_config\n )\n elif model_type == 'deep':\n return tf.estimator.DNNClassifier(\n model_dir=model_dir,\n feature_columns=deep_columns,\n hidden_units=hidden_units,\n config=run_config)\n else:\n return tf.estimator.DNNLinearCombinedClassifier(\n model_dir=model_dir,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_hidden_units=hidden_units,\n config=run_config)", "def __new__(cls,\n input_fn,\n max_steps=None,\n hooks=None):\n # Validate input_fn.\n _validate_input_fn(input_fn)\n\n # Validate max_steps.\n if max_steps is not None and max_steps <= 0:\n raise ValueError(\n 'Must specify max_steps > 0, given: {}'.format(max_steps))\n\n # Validate hooks.\n hooks = _validate_hooks(hooks)\n\n return super(TrainSpec, cls).__new__(\n cls,\n input_fn=input_fn,\n max_steps=max_steps,\n hooks=hooks)", "def build_estimator(model_dir, model_type):\n wide_columns, deep_columns = build_model_columns()\n hidden_units = [100, 75, 50, 25]\n\n # Create a tf.estimator.RunConfig to ensure the model is run on CPU, which\n # trains faster than GPU for this model.\n run_config = tf.estimator.RunConfig().replace(\n session_config=tf.ConfigProto(device_count={'GPU': 0}))\n\n if model_type == 'wide':\n return tf.estimator.LinearClassifier(\n model_dir=model_dir,\n feature_columns=wide_columns,\n config=run_config)\n elif model_type == 'deep':\n return tf.estimator.DNNClassifier(\n model_dir=model_dir,\n feature_columns=deep_columns,\n hidden_units=hidden_units,\n config=run_config)\n else:\n return tf.estimator.DNNLinearCombinedClassifier(\n model_dir=model_dir,\n linear_feature_columns=wide_columns,\n dnn_feature_columns=deep_columns,\n dnn_hidden_units=hidden_units,\n config=run_config)", "def __init__(self, pipeline=PIPELINE, name=\"fake_estimator\"):\n super().__init__(pipeline=pipeline, name=name)", "def _make_estimator(self, append=True, random_state=None):\n estimator = clone(self.base_estimator_)\n estimator.set_params(**dict((p, getattr(self, p))\n for p in self.estimator_params))\n # print estimator.get_params()\n\n if random_state is not None:\n _set_random_states(estimator, random_state)\n\n if append:\n self.estimators_.append(estimator)\n\n return estimator" ]
[ "0.653255", "0.6415198", "0.62002885", "0.61464673", "0.60211784", "0.59734035", "0.5882333", "0.58066183", "0.57011044", "0.5623417", "0.55480283", "0.55394053", "0.5519858", "0.5499919", "0.54941463", "0.54804045", "0.5472486", "0.5453627", "0.5434046", "0.5416209", "0.53727245", "0.5356063", "0.53522116", "0.5349217", "0.53337383", "0.53221256", "0.5317503", "0.5313736", "0.5288922", "0.52157533" ]
0.7923026
0
Log an infeed or outfeed error. This logs a short error message immediately, and schedules a timer to emit the full stack trace and error message after a short period of time. If the main session has terminated by the time the timer triggers, we assume the real source of the error was from the main session and avoid emitting a stack trace for the infeed.
def _log_error(self, session, error): logging.warning( '\n\n' 'Error occurred during infeed/outfeed. This may be due to a compile ' 'error in the main session. Waiting for a short time for the main ' 'session to come back.\n\n%s', error) self._feed_error = traceback.format_exc() # If we've already encountered a feed error, don't schedule another # cancellation op. if self._session_cancel_timer: return def _cancel_session(): # Close the session to avoid the main thread from hanging. If input # pipeline triggers any error, the infeed thread dies but the main thread # for TPU computation waits for the infeed enqueue forever. Close the # Session to cancel the main thread Session.run execution. # # We sleep for a few seconds before closing to give some time # for the TPU compilation error, if any, propagating, from TPU to CPU # host. Compilation errors should be reported by the main thread so that # the program can be interrupted and users can take action. Due to a race # condition, the infeed thread might see an error first. Closing the # session here immediately would result in a session cancellation # exception in the main thread, instead of the expected compile error. # User code that depends on having the proper exception type will # therefore be confused. time.sleep(5) # If the main session is still running, the infeed/outfeed errors are # legitimate, and should be logged. if not self._finished and self._feed_error: logging.error('Feed error: %s', self._feed_error) logging.error('Closing session. A RuntimeError should follow.') session.close() self._session_cancel_timer = threading.Thread(target=_cancel_session) self._session_cancel_timer.daemon = True self._session_cancel_timer.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logError(self, text):\n time = datetime.now().strftime(\"%H:%M:%S \")\n self.log(time + \"(ERR):\\t\", text)", "def error(self, *args):\n\n self.log(\"ERROR:\", args)\n if not self.transport.connected:\n Timer(5, connect(self.host, self.port)).register(self)", "def err(message):\n\n timestamp = format_time(get_time())\n message = '{} - [ERROR] - {}'.format(timestamp, message)\n _log_status(message)", "def _log_err_msg(self, message):\n current_time = time.time()\n if current_time - self._last_warning_time > 600:\n logging.warning(message)\n self._last_warning_time = current_time", "def log_error(self,msg):\r\n t = time.strftime(\"%a %d/%b/%Y %H:%M:%S\", time.gmtime())\r\n logfile = open(self.log_file, \"a\")\r\n logfile.write(\"Error at %s \"% t)\r\n logfile.write(msg)\r\n traceback.print_exc(file=logfile)\r\n logfile.write( \"---------------------\\n\")\r\n logfile.close()", "def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r", "def error(self, *args):\n\n if self.is_on(_Log.ERROR):\n self._write(self._err, *args)", "def error(ctx, flow):\n ctx.log(\"error\")", "def error(self, msg='', context='', severity=logging.INFO, traceback=False):\r\n if traceback:\r\n msg += _cperror.format_exc()\r\n self.error_log.log(severity, ' '.join((self.time(), context, msg)))", "def error_log(self, msg='', level=20, traceback=False):\n cherrypy.engine.log(msg, level, traceback)", "def log_error(err):\n print(err)", "def error():\n logging.error(\"ERROR\")\n print('ERROR')", "def log_error(self, message):\n # log the datetime+message to error_log.txt\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S \"\n \"%Y-%m-%d\")\n with open(ERROR_FILE_PATH, \"a+\") as error_file:\n error_file.write(\"{} $ {}\\n\".format(curr_time, message))", "def log_poll_problem(self, msg):\n now = time.time()\n if now - self.last_logged_error >= LOG_ERRORS_EVERY:\n self.last_logged_error = now\n logging.exception(msg)", "def log_error(self, message):\n self.logger.error(RED_RESET.format(thing=message))\n return", "def error(msg):\n log('ERROR', msg)", "def log_error(task_request, message):\n _log(logger.error, task_request, message)", "def error_handler(e):\n logging.error('error_handler for socketio. An error has occurred: ' + str(e))", "def log_error(error_message, no_exit=False):\n log(f\"error: \")\n if not no_exit:\n exit()", "def log_new_error(*args, **kwargs):\n logging.error(*args, **kwargs)", "def error(message):\n global LAST_LOG\n LAST_LOG = message\n cprint('\\r[ERR] {0}'.format(message), 'red', file=sys.stderr)", "def _log_error(self, event, err, **kwargs):\n self.context.logger.error(\n f\"step {self.name} got error {err} when processing an event:\\n {event.body}\"\n )\n message = traceback.format_exc()\n self.context.logger.error(message)\n self.context.push_error(\n event, f\"{err}\\n{message}\", source=self.fullname, **kwargs\n )", "def log_error(e):\n\tprint(e)", "def log_error(e):\n\tprint(e)", "def queue_error(action, error_message):\n global ERRORS\n print(\"Error while {}: {}\".format(action, error_message))\n ERRORS[action] = error_message", "def error_traceback():\n Logger.log('ERROR', traceback.format_exc())", "def logError(e):\r\n print(e)", "def error(self, *args):\n self.mylog.error(*args)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def error(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.error(message)" ]
[ "0.6038217", "0.5916494", "0.5900259", "0.55589175", "0.5555639", "0.5525812", "0.55219215", "0.5496659", "0.54817575", "0.5462014", "0.5454981", "0.54069924", "0.5402037", "0.53926015", "0.5391754", "0.5385907", "0.5369441", "0.53470963", "0.5342299", "0.5338065", "0.53171074", "0.5285808", "0.5268173", "0.5268173", "0.5256498", "0.525572", "0.5247627", "0.5237166", "0.5230698", "0.5221245" ]
0.736192
0
A fn returns enqueue_ops.
def enqueue_ops_fn(): num_cores_per_host = ctx.num_of_cores_per_host per_host_sharded_inputs = [] for core_ordinal in range(num_cores_per_host): with ops.name_scope('ordinal_%d' % (core_ordinal)): inputs = _Inputs.from_input_fn(input_fn()) if inputs.is_dataset: raise TypeError( '`input_fn` returning `Dataset` is not yet supported in ' 'per-Core input pipeline deployment yet. Please set ' 'TPUConfig.per_host_input_for_training to True or return ' '`features` and `labels` from `input_fn`') features, labels = inputs.features_and_labels() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_configuration_from_sharded_input_tensors( per_host_sharded_inputs) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function) return per_host_enqueue_ops
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def enqueue(self, func):\n self.queue.put(func)", "def enqueue(tup):", "def create_enqueue_op(self):\n if not self._names:\n return []\n\n tensors = []\n # TODO(jhseu): Consider deduping tensors.\n for name in self._names:\n tensors.extend(self._tensors[name])\n\n with ops.device(tpu.core(0)):\n return [tpu_ops.outfeed_enqueue_tuple(tensors)]", "def get_fn_enqueue_op(tf_fn_phd):\n raw_im_ = tf.read_file(tf_fn_phd)\n im_ = tf.image.decode_jpeg(raw_im_)\n r_im_ = tf.image.resize_images(im_, IM_H, IM_W)\n enq_op = tf_fn_q.enqueue([tf.to_float(r_im_)])\n return enq_op", "def enqueue(self, fn):\n self.queue.put(fn)", "def Enqueue(\r\n funcs: List[Callable[[], None]],\r\n ) -> List[Future]:\r\n raise Exception(\"Abstract method\") # pragma: no cover\r", "def Enqueue(\r\n funcs: List[Callable[[], None]],\r\n ) -> List[Future]:\r\n raise Exception(\"Abstract method\") # pragma: no cover\r", "def enqueue(self,value):\n pass", "def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, string_types) and f.__module__ == '__main__':\n raise ValueError('Functions from the __main__ module cannot be processed '\n 'by workers')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = kwargs.pop('timeout', None)\n description = kwargs.pop('description', None)\n result_ttl = kwargs.pop('result_ttl', None)\n ttl = kwargs.pop('ttl', None)\n depends_on = kwargs.pop('depends_on', None)\n at_front = kwargs.pop('at_front', False)\n meta = kwargs.pop('meta', None)\n\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs' # noqa\n args = kwargs.pop('args', None)\n kwargs = kwargs.pop('kwargs', None)\n\n return self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl, ttl=ttl,\n description=description, depends_on=depends_on,\n at_front=at_front, meta=meta)", "def enqueue(self, f, *args, **kwargs):\n if not isinstance(f, basestring) and f.__module__ == '__main__':\n raise ValueError(\n 'Functions from the __main__ module cannot be processed '\n 'by workers.')\n\n # Detect explicit invocations, i.e. of the form:\n # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, timeout=30)\n timeout = None\n result_ttl = None\n if 'args' in kwargs or 'kwargs' in kwargs:\n assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs.' # noqa\n timeout = kwargs.pop('timeout', None)\n args = kwargs.pop('args', None)\n result_ttl = kwargs.pop('result_ttl', None)\n kwargs = kwargs.pop('kwargs', None)\n\n job = yield self.enqueue_call(func=f, args=args, kwargs=kwargs,\n timeout=timeout, result_ttl=result_ttl)\n defer.returnValue(job)", "def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q", "def wrapper(sources, dtypes, capacity, num_threads):\n # enqueue function\n def enqueue_func(sess, op):\n # read data from source queue\n data = func(sess.run(sources))\n # create feeder dict\n feed_dict = {}\n for ph, col in zip(placeholders, data):\n feed_dict[ph] = col\n # run session\n sess.run(op, feed_dict=feed_dict)\n\n # create place holder list\n placeholders = []\n for dtype in dtypes:\n placeholders.append(tf.placeholder(dtype=dtype))\n\n # create FIFO queue\n queue = tf.FIFOQueue(capacity, dtypes=dtypes)\n\n # enqueue operation\n enqueue_op = queue.enqueue(placeholders)\n\n # create queue runner\n runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * num_threads)\n\n # register to global collection\n tf.train.add_queue_runner(runner)\n\n # return de-queue operation\n return queue.dequeue()", "def queue_wrapper(result_queue, wid,\n func, args):\n result_queue.put((wid, func(*args)))", "def enqueue(self,e):", "def generate_infeed_enqueue_ops_and_dequeue_fn(self):\n # While tf.while_loop is called, the body function, which invokes\n # `enqueue_fn` passed in, is called to construct the graph. So, input_fn\n # structure is recorded.\n enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (\n self._invoke_input_fn_and_record_structure())\n\n self._validate_input_pipeline()\n\n def dequeue_fn():\n \"\"\"dequeue_fn is used by TPU to retrieve the tensors.\"\"\"\n # In the model-parallel case, both the host-side and device-side\n # computations must agree on the core on which infeed takes place. We\n # choose to perform infeed on logical core 0 of each replica.\n values = self._infeed_queue.generate_dequeue_op(tpu_device=0)\n # The unflatten process uses the structure information recorded above.\n return self._inputs_structure_recorder.unflatten_features_and_labels(\n values)\n\n return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)", "def enqueue(self, name):\n pass", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "def producer_func(func):\n @wraps(func)\n def wrapper(sources, dtypes, capacity, num_threads):\n r\"\"\"Manages arguments of `tf.sg_.\n\n Args:\n **kwargs:\n source: A source queue list to enqueue\n dtypes: Data types of each tensor\n capacity: Queue capacity. Default is 32.\n num_threads: Number of threads. Default is 1.\n \"\"\"\n # enqueue function\n def enqueue_func(sess, op):\n # read data from source queue\n data = func(sess.run(sources))\n # create feeder dict\n feed_dict = {}\n for ph, col in zip(placeholders, data):\n feed_dict[ph] = col\n # run session\n sess.run(op, feed_dict=feed_dict)\n\n # create place holder list\n placeholders = []\n for dtype in dtypes:\n placeholders.append(tf.placeholder(dtype=dtype))\n\n # create FIFO queue\n queue = tf.FIFOQueue(capacity, dtypes=dtypes)\n\n # enqueue operation\n enqueue_op = queue.enqueue(placeholders)\n\n # create queue runner\n runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * num_threads)\n\n # register to global collection\n tf.train.add_queue_runner(runner)\n\n # return de-queue operation\n return queue.dequeue()\n\n return wrapper", "def enqueue(self, x):\r\n self.queue.append(x)\r\n return self.queue", "def runQueueEnqueue(self):\n raise NotImplementedError", "def enqueue(self):\n # TensorFlow Input Pipelines for Large Data Sets\n # ischlag.github.io\n # http://ischlag.github.io/2016/11/07/tensorflow-input-pipeline-for-large-datasets/\n # http://web.stanford.edu/class/cs20si/lectures/slides_09.pdf\n under = 0\n max = len(self.train_x)\n try:\n while not self.coord.should_stop():\n # print(\"starting to write into queue\")\n upper = under + self.capacity\n # print(\"try to enqueue \", under, \" to \", upper)\n if upper <= max:\n curr_x = self.train_x[under:upper]\n curr_t = self.train_t[under:upper]\n curr_e = self.train_e[under:upper]\n under = upper\n else:\n rest = upper - max\n curr_x = np.concatenate((self.train_x[under:max], self.train_x[0:rest]))\n curr_t = np.concatenate((self.train_t[under:max], self.train_t[0:rest]))\n curr_e = np.concatenate((self.train_e[under:max], self.train_e[0:rest]))\n under = rest\n\n self.session.run(self.enqueue_op,\n feed_dict={self.x: curr_x, self.t: curr_t, self.e: curr_e})\n except tf.errors.CancelledError:\n print(\"finished enqueueing\")", "def enqueue(self, value): ################# <-\n self.lst = self.lst +[value]", "def queue(self, func, *args, **kwargs):\n return self.event_queue.put((func, args, kwargs))", "def _process_run(queue: Queue, func: Callable[[Any], Any] = None,\n *args, **kwargs):\n queue.put(func(*args, **kwargs))", "def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue", "def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()", "def putting_on_queue(*args):\n results.put(main_func(*args))", "def rec_from_queue( self, ):\r\n try:\r\n action, function, function_args = self.queue_fr_helper.get_nowait()\r\n except queue.Empty:\r\n action = \"\"\r\n function = None\r\n function_args = None\r\n\r\n return ( action, function, function_args )", "def enqueue(elem: Any, priority: int = 0) -> None:\n\tglobal queue\n\tqueue.append((priority, elem))\n\treturn None" ]
[ "0.6918618", "0.67096776", "0.66286814", "0.6531821", "0.64277637", "0.61345917", "0.6119504", "0.6111642", "0.6108845", "0.61017036", "0.6042078", "0.6023918", "0.60035884", "0.59705424", "0.58645797", "0.58212256", "0.58058554", "0.57611626", "0.56956524", "0.56872046", "0.56363773", "0.5610259", "0.557443", "0.55460435", "0.55205214", "0.5484186", "0.54664946", "0.5440276", "0.54342026", "0.54176265" ]
0.6903641
1
Generates the per_host enqueue ops.
def enqueue_ops_fn(): control_deps = [] per_host_sharded_inputs = [] num_replicas_per_host = ctx.num_of_replicas_per_host with ops.device(device): if not inputs.is_dataset: raise TypeError('`input_fn` must return a `Dataset` for this mode.') for _ in range(num_replicas_per_host): # Use control dependencies to ensure a deterministic ordering. with ops.control_dependencies(control_deps): features, labels = inputs.features_and_labels() # Calls get_next() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) control_deps.extend(flattened_inputs) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_configuration_from_sharded_input_tensors( per_host_sharded_inputs) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function) return per_host_enqueue_ops
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def generate_per_host_v2_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, device, host_id):\n del host_id # unused\n captured_infeed_queue = _CapturedObject()\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if not is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '\n 'input pipeline configuration.')\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n # TODO(b/XXX): Add predict support for PER_HOST_V2\n raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')\n\n hooks.append(inputs.dataset_initializer_hook())\n\n def enqueue_ops_fn():\n \"\"\"Generates the per_host enqueue ops.\"\"\"\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def generate_per_host_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):\n captured_infeed_queue = _CapturedObject()\n\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n if batch_axis is not None:\n raise TypeError('For mode PREDICT, batch_axis is not supported yet.')\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n hooks.append(inputs.dataset_initializer_hook())\n\n # TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the\n # _TPUContext.tpu_ordinal_function. We should either introduce another\n # abstraction or a different helper method.\n def _tpu_ordinal_function_impl(shard_index_in_host):\n # We put both enqueue/dequeue op at tpu.core(0) in each replica.\n replica = ctx.device_assignment.lookup_replicas(\n host_id, (0, 0, 0))[shard_index_in_host]\n return ctx.device_assignment.tpu_ordinal(replica=replica)\n\n if ctx.model_parallelism_enabled:\n tpu_ordinal_function = _tpu_ordinal_function_impl\n else:\n tpu_ordinal_function = None\n\n def enqueue_ops_fn():\n with ops.device(device):\n num_of_replicas_per_host = ctx.num_of_replicas_per_host\n # Convert user input to features and labels. If the user returns a\n # dataset, it is initialized and the features and labels extracted via\n # `dataset.iterator.get_next()`\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels, signals)\n unsharded_tensor_list = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n\n infeed_queue = tpu_feed.InfeedQueue(\n tuple_types=[t.dtype for t in unsharded_tensor_list],\n tuple_shapes=[t.shape for t in unsharded_tensor_list],\n shard_dimensions=batch_axis)\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_number_of_shards(num_of_replicas_per_host)\n per_host_enqueue_ops = (\n infeed_queue.split_inputs_and_generate_enqueue_ops(\n unsharded_tensor_list,\n placement_function=lambda x: device,\n tpu_ordinal_function=tpu_ordinal_function))\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def create_tpu_hostcall(self):\n if not self._names:\n return []\n\n ret = {}\n # For each i, dequeue_ops[i] is a list containing the tensors from all\n # shards. This list is concatenated later.\n dequeue_ops = []\n tensor_dtypes = []\n tensor_shapes = []\n for name in self._names:\n for _ in self._tensors[name]:\n dequeue_ops.append([])\n for dtype in self._tensor_dtypes[name]:\n tensor_dtypes.append(dtype)\n for shape in self._tensor_shapes[name]:\n tensor_shapes.append(shape)\n\n # Outfeed ops execute on each replica's first logical core. Note: we must\n # constraint it such that we have at most one outfeed dequeue and enqueue\n # per replica.\n tpu_device_placement_fn = self._ctx.tpu_device_placement_function\n for i in xrange(self._ctx.num_replicas):\n with ops.device(tpu_device_placement_fn(i)):\n outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(\n dtypes=tensor_dtypes, shapes=tensor_shapes)\n for j, item in enumerate(outfeed_tensors):\n dequeue_ops[j].append(item)\n\n # Deconstruct dequeue ops.\n dequeue_ops_by_name = {}\n pos = 0\n for name in self._names:\n dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]\n pos += len(self._tensors[name])\n\n # It is assumed evaluation always happens on single host TPU system. So,\n # place all ops on tpu host if possible.\n #\n # TODO(jhseu): Evaluate whether this is right for summaries.\n with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):\n for name in self._names:\n dequeue_ops = dequeue_ops_by_name[name]\n for i, item in enumerate(dequeue_ops):\n if dequeue_ops[i][0].shape.ndims == 0:\n raise RuntimeError(\n 'All tensors outfed from TPU should preserve batch size '\n 'dimension, but got scalar {}'.format(dequeue_ops[i][0]))\n # TODO(xiejw): Allow users to specify the axis for batch size\n # dimension.\n dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)\n\n if self._tensor_keys[name] is not None:\n # The user-provided eval_metrics[1] is a dict.\n dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))\n try:\n ret[name] = self._host_fns[name](**dequeue_ops)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n else:\n ret[name] = self._host_fns[name](*dequeue_ops)\n\n return ret", "def create_enqueue_op(self):\n if not self._names:\n return []\n\n tensors = []\n # TODO(jhseu): Consider deduping tensors.\n for name in self._names:\n tensors.extend(self._tensors[name])\n\n with ops.device(tpu.core(0)):\n return [tpu_ops.outfeed_enqueue_tuple(tensors)]", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m})", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m, self.y:y})", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "def enqueue(self):\n # TensorFlow Input Pipelines for Large Data Sets\n # ischlag.github.io\n # http://ischlag.github.io/2016/11/07/tensorflow-input-pipeline-for-large-datasets/\n # http://web.stanford.edu/class/cs20si/lectures/slides_09.pdf\n under = 0\n max = len(self.train_x)\n try:\n while not self.coord.should_stop():\n # print(\"starting to write into queue\")\n upper = under + self.capacity\n # print(\"try to enqueue \", under, \" to \", upper)\n if upper <= max:\n curr_x = self.train_x[under:upper]\n curr_t = self.train_t[under:upper]\n curr_e = self.train_e[under:upper]\n under = upper\n else:\n rest = upper - max\n curr_x = np.concatenate((self.train_x[under:max], self.train_x[0:rest]))\n curr_t = np.concatenate((self.train_t[under:max], self.train_t[0:rest]))\n curr_e = np.concatenate((self.train_e[under:max], self.train_e[0:rest]))\n under = rest\n\n self.session.run(self.enqueue_op,\n feed_dict={self.x: curr_x, self.t: curr_t, self.e: curr_e})\n except tf.errors.CancelledError:\n print(\"finished enqueueing\")", "def producer(networks, q):\n host_count = 0\n for network in networks:\n LOG.debug(f\"Producer: working on network {network} queue[current size = {q.qsize()}] {time.strftime('%H:%M:%S')}\")\n num_hosts = len(list(network.hosts()))\n # Select first 5 hosts add to queue if num_hosts > 10 else add them all\n if num_hosts > 10:\n hosts = list(network.hosts())[:5]\n for host in hosts:\n q.put(host)\n host_count += 1\n else:\n hosts = list(network.hosts())\n for host in hosts:\n q.put(host)\n host_count += 1\n # Select last 5 hosts add to queue\n if num_hosts > 10:\n hosts = list(network.hosts())[-5:]\n for host in hosts:\n q.put(host)\n host_count += 1\n # Select 10% of the rest of the hosts add to queue\n if num_hosts > 10:\n sample_hosts_len = network.size() // 10\n hosts = random.sample(list(network.hosts())[5:-5], sample_hosts_len)\n for host in hosts:\n q.put(host)\n host_count += 1\n return host_count", "async def dispatch_auto_starts(self, ctx):\n for operation in self.config.dataflow.operations.values():\n if operation.inputs or not await self.ictx.check_conditions(\n operation, self.config.dataflow, ctx\n ):\n continue\n parameter_set = MemoryParameterSet(\n MemoryParameterSetConfig(ctx=ctx, parameters=[])\n )\n task = await self.nctx.dispatch(self, operation, parameter_set)\n task.operation = operation\n task.parameter_set = parameter_set\n yield task", "def enqueue(tup):", "def worker(self):\n while True: # Feed forever. Enqueue will block when queue is full.\n while len(self.memory) < self.min_memory:\n time.sleep(1)\n batch = self.memory.sample(self.batchsize)\n states, actions, rewards, terminals = zip(*batch)\n self.session.run(self.enqueue_op, {\n self.states: states, self.actions: actions,\n self.rewards: rewards, self.terminals: terminals,\n })", "def _GetHostTrainLoop(\n self, strategy: tf.distribute.TPUStrategy\n ) -> Callable[..., Any]:\n replicas_per_host = strategy.extended.num_replicas_per_host\n\n def Split(batch, replicas_per_host, axis=0):\n \"\"\"Splits a NestedMap into replicas_per_host pieces.\"\"\"\n def _SplitFn(t):\n return tf.sparse.split if isinstance(t, tf.SparseTensor) else tf.split\n\n split = batch.Transform(lambda t: _SplitFn(t)(t, replicas_per_host, axis))\n return [\n nest.map_structure_up_to(batch, lambda t: t[i], split) # pylint: disable=cell-var-from-loop\n for i in range(replicas_per_host)\n ]\n\n def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n \"\"\"Fetch and shard one batch per attached device.\"\"\"\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )\n\n def _Step(batch: py_utils.NestedMap):\n \"\"\"A single forward/backward step.\n\n Processes the given input batch and updates the distributed metrics\n accumulator. We use FProp (instead of FPropDefaultTheta) and\n _BPropForVariables (instead of BProp) in order to permit the tf.distribute\n library to handle threading values across devices.\n\n Args:\n batch: NestedMap of input batch data.\n \"\"\"\n with tf.name_scope('tpu_train'):\n with py_utils.GradientTape(persistent=True):\n batch.Update(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Dequeue(batch)\n )\n metrics_dict, _ = self.task.FPropDefaultTheta(batch)\n # py_utils.ComputeGradientsSimple() needs to access the tape, so BProp\n # needs to be within the GradientTape context.\n self.task.BProp()\n\n self._metrics_dict_structure = metrics_dict\n self._metrics_mgr.AccumulateStepMetrics(metrics_dict)\n\n @tf.function\n def _TpuFunction():\n \"\"\"Runs several training steps and returns a flattened metrics list.\"\"\"\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )\n\n # Trace the train function so it can create the optimizer slot vars and save\n # them at step 0.\n return _TpuFunction.get_concrete_function()", "def enqueue(self,value):\n pass", "def run(self, host):\n\n # Trying to connect to given host.\n while self.csrftoken == None:\n try:\n self.client.get(self.url)\n self.csrftoken = self.client.cookies['csrftoken']\n self.cookies = dict(self.client.cookies)\n except:\n pass\n\n while True:\n if self.queue.empty():\n self.sending = False\n time.sleep(1)\n else:\n self.sending = True\n\n # sending operations when queue reached batching size\n # after about 5 seconds send anyway...\n qsize = self.queue.qsize()\n if qsize < BATCH_SIZE and qsize != self.old_qsize:\n time.sleep(1)\n self.old_qsize = qsize\n elif qsize < BATCH_SIZE and qsize == self.old_qsize and self.count < 5:\n time.sleep(1)\n self.count += 1\n else:\n data_list = []\n while not self.queue.empty():\n op = self.queue.get()\n data_list.append(op)\n # just for testing\n # shuffle(data_list)\n data_dict = dict(list=json.dumps(data_list)) \n\n while True:\n try:\n self.send_post(data_dict)\n break\n except requests.exceptions.RequestException:\n time.sleep(2)\n print \"[THREAD \" + str(host['id']) + \"] Can't reach host \" + str(host['port'])\n continue\n\n self.count = 0", "def enqueue(self, name):\n pass", "def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)", "def runQueueEnqueue(self):\n raise NotImplementedError", "def wrapper(sources, dtypes, capacity, num_threads):\n # enqueue function\n def enqueue_func(sess, op):\n # read data from source queue\n data = func(sess.run(sources))\n # create feeder dict\n feed_dict = {}\n for ph, col in zip(placeholders, data):\n feed_dict[ph] = col\n # run session\n sess.run(op, feed_dict=feed_dict)\n\n # create place holder list\n placeholders = []\n for dtype in dtypes:\n placeholders.append(tf.placeholder(dtype=dtype))\n\n # create FIFO queue\n queue = tf.FIFOQueue(capacity, dtypes=dtypes)\n\n # enqueue operation\n enqueue_op = queue.enqueue(placeholders)\n\n # create queue runner\n runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * num_threads)\n\n # register to global collection\n tf.train.add_queue_runner(runner)\n\n # return de-queue operation\n return queue.dequeue()", "def push(host):\n dispatcher = Dispatch(host)\n\n post(host)\n\n context = zmq.Context()\n zmq_socket = context.socket(zmq.PUSH)\n zmq_socket.bind('tcp://127.0.0.1:5560')\n\n for record in dispatcher:\n zmq_socket.send_pyobj((int(time.time()),record.raw))", "def toQueue(data):\n\n for host in settings.OTHER_HOSTS:\n settings.SENDER[host['id']].queue.put(dict(**data))", "def restructure_host_cpu_data(host):\n init_cpu_counts(host)\n host.sockets = len(host.nodes or [])\n host.hyperthreading = False\n host.physical_cores = 0\n if not host.cpus:\n return\n host.cpu_model = host.cpus[0].cpu_model\n cpu_list = sorted(host.cpus, key=_sort_by_coreid)\n for cpu in cpu_list:\n inode = pecan.request.dbapi.inode_get(inode_id=cpu.forinodeid)\n cpu.numa_node = inode.numa_node\n if cpu.thread == 0:\n host.physical_cores += 1\n elif cpu.thread > 0:\n host.hyperthreading = True\n function = cpu.allocated_function or get_default_function(host)\n host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))\n host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))", "def produce_query_batches(self):\n pass", "def update_core_allocations(host, cpu_counts):\n # Remove any previous assignments\n for s in range(0, len(host.nodes)):\n for f in CORE_FUNCTIONS:\n host.cpu_functions[s][f] = []\n # Set new assignments\n for s in range(0, len(host.nodes)):\n cpu_list = host.cpu_lists[s] if s in host.cpu_lists else []\n # Reserve for the platform first\n for i in range(0, cpu_counts[s][constants.PLATFORM_FUNCTION]):\n host.cpu_functions[s][constants.PLATFORM_FUNCTION].append(\n cpu_list.pop(0))\n # Reserve for the vswitch next\n for i in range(0, cpu_counts[s][constants.VSWITCH_FUNCTION]):\n host.cpu_functions[s][constants.VSWITCH_FUNCTION].append(\n cpu_list.pop(0))\n # Reserve for the shared next\n for i in range(0, cpu_counts[s][constants.SHARED_FUNCTION]):\n host.cpu_functions[s][constants.SHARED_FUNCTION].append(\n cpu_list.pop(0))\n for i in range(0, cpu_counts[s][constants.ISOLATED_FUNCTION]):\n host.cpu_functions[s][constants.ISOLATED_FUNCTION].append(\n cpu_list.pop(0))\n # Assign the remaining cpus to the default function for this host\n host.cpu_functions[s][get_default_function(host)] += cpu_list\n return", "def _Add_sync_queues_and_barrier(enqueue_after_list):\n sync_queues = [\n data_flow_ops.FIFOQueue(\n self._num_worker, [dtypes.bool],\n shapes=[[]],\n shared_name='%s%s' % ('variable_init_sync_queue', i))\n for i in range(self._num_worker)\n ]\n queue_ops = []\n # For each other worker, add an entry in a queue\n token = constant_op.constant(False)\n with ops.control_dependencies(enqueue_after_list):\n for i, q in enumerate(sync_queues):\n if i == task_index:\n queue_ops.append(control_flow_ops.no_op())\n else:\n queue_ops.append(q.enqueue(token))\n queue_ops.append(\n sync_queues[task_index].dequeue_many(len(sync_queues) - 1))\n return control_flow_ops.group(*queue_ops)", "def InfeedTFFunc(self, inp_instance):\n inp_instance.DeviceLoopSetupEager()\n inp_instance.CreateTpuEnqueueOps()\n # `CreateTpuEnqueueOps` and `CreateCpuPassthroughEnqueueOps` must be in the\n # same place, because the former enqueues `_per_host_passthrough_batches`,\n # while the latter consumes it.\n inp_instance.CreateCpuPassthroughEnqueueOps()\n # `CreateCpuPassthroughEnqueueOps` and `DequeueCpuPassthrough` must be in\n # the same place, because the former enqueues `_host_queues`,\n # while the latter consumes it.\n cpu_pt = inp_instance.DequeueCpuPassthrough()\n return cpu_pt", "def _invoke_input_fn_and_record_structure(self):\n enqueue_ops = []\n infeed_queues = []\n all_hooks = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n else:\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id))\n else:\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n all_hooks.extend(hooks)\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if is_dataset:\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator", "def generate_infeed_enqueue_ops_and_dequeue_fn(self):\n # While tf.while_loop is called, the body function, which invokes\n # `enqueue_fn` passed in, is called to construct the graph. So, input_fn\n # structure is recorded.\n enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (\n self._invoke_input_fn_and_record_structure())\n\n self._validate_input_pipeline()\n\n def dequeue_fn():\n \"\"\"dequeue_fn is used by TPU to retrieve the tensors.\"\"\"\n # In the model-parallel case, both the host-side and device-side\n # computations must agree on the core on which infeed takes place. We\n # choose to perform infeed on logical core 0 of each replica.\n values = self._infeed_queue.generate_dequeue_op(tpu_device=0)\n # The unflatten process uses the structure information recorded above.\n return self._inputs_structure_recorder.unflatten_features_and_labels(\n values)\n\n return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)" ]
[ "0.65835243", "0.6533586", "0.6291134", "0.62328285", "0.6117832", "0.57363117", "0.5343912", "0.53310704", "0.53279525", "0.5282919", "0.5259834", "0.5183685", "0.5105475", "0.50972563", "0.50761807", "0.5038841", "0.5036618", "0.5018514", "0.5013788", "0.49980277", "0.49547938", "0.49277472", "0.48854586", "0.48526448", "0.48468488", "0.48374093", "0.48330715", "0.4832542", "0.47983855", "0.47900814" ]
0.6555443
1
Validates and records the structure of features` and `labels`.
def validate_and_record_structure(self, features, labels, signals=None): def _extract_key_names(tensor_or_dict): if tensor_or_dict is None: return [] return sorted(tensor_or_dict.keys()) if isinstance( tensor_or_dict, dict) else [] # Extract structure. has_labels = labels is not None feature_names = _extract_key_names(features) label_names = _extract_key_names(labels) if signals is not None and self._signals_helper is None: # Record signals helper. self._signals_helper = _SignalsHelper(signals) if self._initialized: # Verify the structure is same. The following should never happen. assert feature_names == self._feature_names, 'feature keys mismatched' assert label_names == self._label_names, 'label keys mismatched' assert has_labels == self._has_labels, 'label presence mismatched' else: # Record structure. self._initialized = True self._feature_names = feature_names self._label_names = label_names self._has_labels = has_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loadValid(self, features, labels):\n\t\tself.validX_, self.validY_, self.validLabel_ = self.__load(features, labels)", "def _check_labels_features_exist(\n labels_example: List[\"Message\"], attribute: Text\n ) -> bool:\n\n for label_example in labels_example:\n if (\n label_example.get(SPARSE_FEATURE_NAMES[attribute]) is None\n and label_example.get(DENSE_FEATURE_NAMES[attribute]) is None\n ):\n return False\n return True", "def _validate_length_features_and_labels(\n model_endpoint: mlrun.common.schemas.ModelEndpoint,\n ):\n\n # Getting the length of label names, feature_names and feature_stats\n len_of_label_names = (\n 0\n if not model_endpoint.spec.label_names\n else len(model_endpoint.spec.label_names)\n )\n len_of_feature_names = len(model_endpoint.spec.feature_names)\n len_of_feature_stats = len(model_endpoint.status.feature_stats)\n\n if len_of_feature_stats != len_of_feature_names + len_of_label_names:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"The length of model endpoint feature_stats is not equal to the \"\n f\"length of model endpoint feature names and labels \"\n f\"feature_stats({len_of_feature_stats}), \"\n f\"feature_names({len_of_feature_names}),\"\n f\"label_names({len_of_label_names}\"\n )", "def __init__(self, features, labels, bigdl_type=\"float\"):\n self.feature = features[0]\n self.features = features\n self.label = labels[0]\n self.bigdl_type = bigdl_type\n self.labels = labels", "def load_data(self, features=None, labels=None):\n if features is None or labels is None:\n self._features = None\n self._labels = None\n return\n if len(features) != len(labels):\n raise DataMismatchError('Features and labels lists are different lengths')\n try:\n self._features = np.array(features, dtype=float)\n self._labels = np.array(labels, dtype=float)\n except ValueError:\n self._features = None\n self._labels = None\n raise ValueError('Label and feature lists must be homogeneous (same data type)'\n 'and numeric (i.e integers and floats) list of lists')", "def train_with_validation_provided(self, features, labels, validation_features, validation_labels):\n pass", "def _check_for_labels(self):\n check = True\n if 'labels' not in self.mapper:\n check = False\n return check", "def features_and_labels(self):\n if self.is_dataset:\n if self._iterator is None:\n raise RuntimeError('Internal error: Must call dataset_initializer_hook '\n 'before calling features_and_labels(). Please file '\n 'a bug!')\n return _Inputs._parse_inputs(self._iterator.get_next())\n\n return (self._features, self._labels)", "def train(self, features, labels):\n pass", "def feature_label(self, train, val):\n self.train_features = {name: np.array(value) for name, value in train.items()}\n self.train_labels = {name: self.train_features.pop(name) for name in self.label_names}\n\n self.val_features = {name: np.array(value) for name, value in val.items()}\n self.val_labels = {name: self.val_features.pop(name) for name in self.label_names}\n\n return \"feature and label for training has been created\"", "def _validate_features_in_predict_input(self, result):\n pass", "def assert_labels (labels):\n assert labels is not None\n assert (type (labels) is np.ndarray) or (type (labels) is list)", "def test_basic_labeling(self):\n # data with only 1 feature\n data = array([[-1], [1], [0.5], [0.25], [-0.33], [0]])\n # give 1 if feature value >= 0; otherwise 0\n labels = array([0, 1, 1, 1, 0, 1])\n cdata = LabeledCData(data, labels)\n\n # ensure that labelling is correct\n assert array_equal(cdata.labels, labels)", "def __test_input_fn(self):\n ## Test labels\n labels = self.labels_test\n ## Recast spectra into dictionary for estimator\n features = {'flux': self.spectra_test}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def test_labels(self):\n self.compliance_tester.test_labels(self.oi)", "def test__validate_features__0():\n for input_value, expected_output in (\n (None, None),\n ([], None),\n ([GuildFeature.animated_banner], (GuildFeature.animated_banner, )),\n ([GuildFeature.animated_banner.value], (GuildFeature.animated_banner, )),\n (\n [GuildFeature.animated_banner, GuildFeature.animated_icon],\n (GuildFeature.animated_banner, GuildFeature.animated_icon,),\n ),\n ):\n output = validate_features(input_value)\n vampytest.assert_eq(output, expected_output)", "def create_feature_and_label(inputs, feature_size: int):\n input_token_ids = inputs['features']\n labels = inputs['labels']\n num_tokens = inputs['num_tokens']\n\n input_mask = tf.sequence_mask(num_tokens, feature_size, dtype=tf.int32)\n type_id = tf.sequence_mask(num_tokens, feature_size, dtype=tf.int32)\n features = [input_token_ids, input_mask, type_id]\n\n return features, labels", "def check_sanity(self):\n # ensure numeric labels\n try:\n list(map(int, flatten(self.labels[:1])))\n except ValueError as ve:\n error(\"Non-numeric label encountered: {}\".format(ve))\n except TypeError as ve:\n warning(\"Non-collection labelitem encountered: {}\".format(ve))", "def input_features_labels(device, signal, subject_ID):\n\n directory = f'data/feature_label_tables/feature_{device}_{signal}/feature_S{subject_ID}_all_axis_{device}_{signal}'\n data = pd.read_csv(directory)\n data = data.dropna()\n\n # since all zero_crossing and mean_crossing metrics are zero and 200, respectively,\n # regardless of the signal and the activity, we ignore this feature.\n features = data.drop(columns=[f'x_{signal}_zero_crossing', f'x_{signal}_mean_crossing',\n f'y_{signal}_zero_crossing', f'y_{signal}_mean_crossing',\n f'z_{signal}_zero_crossing', f'z_{signal}_mean_crossing',\n 'Activity_ID'])\n\n all_labels = data[['Activity_ID']]\n\n feature_train, feature_test, label_train, label_test = train_test_split(\n features, all_labels, test_size=0.2, shuffle=True)\n # feature normalization\n scalar = StandardScaler().fit(feature_train)\n normalized_feature_train = scalar.transform(feature_train)\n normalized_feature_test = scalar.transform(feature_test)\n normalized_all_feature = scalar.transform(features)\n # convert 'numpy.ndarray' to pandas dataframe\n normalized_feature_train = pd.DataFrame(normalized_feature_train)\n normalized_feature_test = pd.DataFrame(normalized_feature_test)\n normalized_all_feature = pd.DataFrame(normalized_all_feature)\n\n return normalized_feature_train, normalized_feature_test, label_train, label_test, normalized_all_feature, all_labels", "def get_features(self, para, label_list, tokenizer, max_seq_length):\n\t\tlabel_map = {label : i for i, label in enumerate(label_list)}\n# self.reverse_label_map = {v: k for k, v in label_map.items()}\n\t\tguid = \"%s-%s\" % (\"test\", 1)\n\t\ttext_a = para[\"model_answer\"]\n\t\ttext_b = para[\"candidate_answer\"]\n\t\tlabel = label_list[0]\n\t\texample = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n\t\t\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\tself._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\t\tlabel_id = label_map[example.label]\n# print(\"*** Example ***\")\n# print(\"guid: %s\" % (example.guid))\n# print(\"tokens: %s\" % \" \".join(\n# [str(x) for x in tokens]))\n# print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n# print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n# print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n\t\t\n\t\treturn InputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_id=label_id)", "def _preprocess(self, features, labels):\n with tf.variable_scope('preprocess'):\n with tf.variable_scope('image'):\n features['image_orig'] = features['image']\n image = tf.image.convert_image_dtype(features['image_orig'],\n dtype=tf.float32)\n if self.mode == ModeKeys.TRAIN:\n images = tf.unstack(image)\n images = [augment_image(img) for img in images]\n image = tf.stack(images)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n features['image'] = image\n\n if labels is None:\n return features, None\n\n with tf.variable_scope('label'):\n # TODO(Shancheng): use start token and end token rather constant 0\n # labels for decoder input\n labels['label_input'] = tf.concat([labels['label'][:, -1:],\n labels['label'][:, 0:-1]], axis=1)\n # from text length to training label length\n labels['length'] = tf.reshape(labels['length'], [-1])\n labels['length'] = labels['length'] + 1\n\n return features, labels", "def read_data(feature_file, label_file):", "def _label_encoder_features_checker(func):\n\n @wraps(func)\n def wrapper_checker(database, features_to_encode):\n _CheckInput._check_database_input(database)\n for column in features_to_encode:\n _CheckInput._check_column_in_database(column,database)\n return func(database, features_to_encode)\n return wrapper_checker", "def check_inputs(x_unlabeled, x_labeled, y_labeled, y_true):\n if x_unlabeled is None:\n if x_labeled is None:\n raise Exception(\"No data, labeled or unlabeled, passed to check_inputs!\")\n x_unlabeled = x_labeled[0:0]\n if x_labeled is not None and y_labeled is not None:\n pass\n elif x_labeled is None and y_labeled is None:\n x_labeled = x_unlabeled[0:0]\n y_shape = y_true.get_shape()[1 : K.ndim(y_true)].as_list()\n y_labeled = np.empty([0] + y_shape)\n else:\n raise Exception(\"x_labeled and y_labeled must both be None or have a value\")\n return x_unlabeled, x_labeled, y_labeled", "def _parse(serialized_example):\n\n feature_map = {\n 'dayofweek': tf.io.FixedLenFeature([], tf.int64),\n 'dropofflat': tf.io.FixedLenFeature([], tf.float32),\n 'dropofflon': tf.io.FixedLenFeature([], tf.float32),\n 'fare_amount': tf.io.FixedLenFeature([], tf.float32),\n 'hourofday': tf.io.FixedLenFeature([], tf.int64),\n 'passengers': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplat': tf.io.FixedLenFeature([], tf.float32),\n 'pickuplon': tf.io.FixedLenFeature([], tf.float32)\n }\n\n # Parse the serialized data into a dictionary.\n parsed_example = tf.io.parse_single_example(\n serialized=serialized_example,\n features=feature_map)\n\n features = add_engineered(parsed_example)\n label = features.pop(\"fare_amount\")\n\n return features, label", "def evaluate(self, features, labels):\n raise NotImplementedError('Not implemented')", "def validate_labels(labels, path):\n for labels_ in labels.values():\n for label in labels_:\n for ann in label['annotations']:\n assert len(ann['segmentation']) == 1\n assert len(ann['segmentation'][0]) % 2 == 0\n\n label['annotations'] = [\n ann\n for ann in label['annotations']\n if len(ann['segmentation'][0]) >= 6\n ]\n assert len(label['annotations']) > 0\n label['file_name'] = path + '/' + label['file_name']\n\n for k in labels:\n labels[k] = [\n label for label in labels[k]\n if os.path.exists(label['file_name'])\n ]\n return labels", "def _process_features(self, limit):\n\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n raw = '/'.join((self.rawdir, 'feature'))\n logger.info(\"building labels for features\")\n\n line_counter = 0\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (feature_id, dbxref_id, organism_id, name, uniquename,\n residues, seqlen, md5checksum, type_id, is_analysis,\n timeaccessioned, timelastmodified) = line\n\n feature_key = feature_id\n if re.search(r'[\\|\\s\\[\\]\\{\\}\\\\<\\>]', uniquename):\n # some uniquenames have pipes or other nasty chars!\n # for example: FB||||FBrf0133242|Hugh-u1\n feature_id = self._makeInternalIdentifier(\n 'feature', feature_key)\n else:\n feature_id = 'FlyBase:'+uniquename\n self.idhash['feature'][feature_key] = feature_id\n self.feature_types[feature_key] = type_id\n self.label_hash[feature_id] = name\n\n if feature_key not in self.feature_to_organism_hash:\n self.feature_to_organism_hash[feature_key] = set()\n self.feature_to_organism_hash[feature_key].add(organism_id)\n\n # HACK - FBgn are genes, and therefore classes,\n # all else be individuals\n is_gene = False\n if re.search(r'(FBgn|FBog)', feature_id):\n self.idhash['gene'][feature_key] = feature_id\n is_gene = True\n elif re.search(r'FBa[lb]', feature_id):\n self.idhash['allele'][feature_key] = feature_id\n elif re.search(r'FBt[ip]', feature_id):\n self.idhash['feature'][feature_key] = feature_id\n\n if self.testMode and \\\n int(feature_key) not in self.test_keys['gene'] + \\\n self.test_keys['allele'] + self.test_keys['feature']:\n continue\n\n # now do something with it!\n # switch on type_id\n if name.strip() == '':\n name = uniquename\n\n type_key = type_id\n type_id = self.idhash['cvterm'][type_key]\n\n # skip some features by type\n types_to_skip = [\n 'SO:0000316', # CDS\n 'SO:0000696', # oligos\n 'SO:0000358', # polypeptide\n 'SO:0000234', # transcripts\n ]\n\n type_keys_to_skip = [\n 596, # pcr_product\n 57096, # mature peptide\n 57097, # signal_peptide\n 57270, # repeat masker\n 58210, # alignment\n 59643, # cDNA_clone\n 60006, # uncharacterized_change_in_nucleotide_sequence\n 61351, # oligo\n 61467, # polypeptide_domain\n 257, # exon\n 286, # intron\n ]\n\n organisms_to_skip = [\n 2 # computational result\n ]\n\n if type_id in types_to_skip \\\n or int(type_key) in type_keys_to_skip\\\n or int(organism_id) in organisms_to_skip:\n continue\n\n line_counter += 1\n\n if int(type_key) == 604: # RNAi_reagent\n # TODO add other reagents?\n self.idhash['reagent'][feature_key] = feature_id\n\n # deal with the taxonomy\n # only get taxa for features that are actually used in our set\n tax_internal_id = self._makeInternalIdentifier(\n 'organism', organism_id)\n if organism_id not in self.checked_organisms:\n # will get the NCBITax if necessary\n tax_id = self._get_organism_id(organism_id)\n self.checked_organisms.add(organism_id)\n else:\n tax_id = self.idhash['organism'][organism_id]\n\n tax_label = self.label_hash.get(tax_id)\n if not re.search(r'FBog', feature_id) \\\n and re.search(r'Drosophila', tax_label):\n # make only fly things leaders\n model.makeLeader(feature_id)\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if is_gene:\n model.addClassToGraph(\n feature_id, name, type_id)\n g.addTriple(\n feature_id, model.object_properties['in_taxon'],\n tax_id)\n else:\n if re.search('FBa[lb]', feature_id):\n type_id = Genotype.genoparts['allele']\n model.addIndividualToGraph(feature_id, name, type_id)\n\n # stop adding what we do not appreciate\n # if is_obsolete == 't':\n # if is_gene:\n # model.addDeprecatedClass(feature_id)\n # else:\n # model.addDeprecatedIndividual(feature_id)\n # self.deprecated_features.add(feature_key)\n\n model.addClassToGraph(tax_id)\n if tax_id != tax_internal_id:\n model.addEquivalentClass(tax_id, tax_internal_id)\n\n model.addComment(\n feature_id,\n self._makeInternalIdentifier('feature', feature_key))\n\n # TODO save checked_organisms fbid to ncbitax mapping to\n # a local file to speed up subsequent searches\n\n return", "def test_var_added_labels(self):\n\n for column in [\"feature_name\", \"feature_reference\"]:\n expected_column = self.adata_with_labels.var[column]\n obtained_column = self.label_writer.adata.var[column]\n\n for i, j in zip(expected_column.tolist(), obtained_column.tolist()):\n with self.subTest(i=i, j=j):\n self.assertEqual(i, j)", "def test_data(self):\n\n return self.__valid_data, self.__valid_labels" ]
[ "0.6781636", "0.62255114", "0.6186637", "0.61535", "0.6147909", "0.6134168", "0.61082655", "0.610818", "0.61062473", "0.6101338", "0.60780364", "0.5975347", "0.5925683", "0.5920382", "0.5888969", "0.587738", "0.5873737", "0.5835565", "0.57987905", "0.5770602", "0.5758468", "0.57459545", "0.5735716", "0.57289696", "0.5723889", "0.5718124", "0.57017726", "0.566973", "0.56482613", "0.56341887" ]
0.72594035
0
Flattens the `features` and `labels` to a single tensor list.
def flatten_features_and_labels(self, features, labels, signals=None): flattened_inputs = [] if self._feature_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend( [features[name] for name in self._feature_names]) else: flattened_inputs.append(features) if labels is not None: if self._label_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend([labels[name] for name in self._label_names]) else: flattened_inputs.append(labels) if signals is not None: flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals)) return flattened_inputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pack_features_vector(features, labels):\n features = tf.stack(list(features), axis=1)\n return features, labels", "def pack_features_vector(features, labels):\n features = tf.stack(list(features.values()), axis=1)\n return features, labels", "def pack_features_vector(features, labels):\n features = tf.stack(list(features.values()), axis=1)\n return features, labels", "def tf_flatten(x):\n return tf.contrib.layers.flatten(x)", "def flatten(x_tensor):\n # TODO: Implement Function\n return tf.contrib.layers.flatten(x_tensor)", "def labeledTensors(self):\n return self.__normalizeData__(self.__tensors__)", "def image_to_feature_vector(raw_tensor):\n result = []\n for tensor in raw_tensor:\n result.append(tensor.flatten())\n return result", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def get_one_hot_labels_list(labels_list=None):\n one_hot_labels = np.array([label_to_one_hot(label=label) for label in labels_list])\n return one_hot_labels", "def batch_features_labels(features, labels, batch_size):\r\n for start in range(0, len(features), batch_size):\r\n end = min(start + batch_size, len(features))\r\n yield features[start:end], labels[start:end]", "def unflatten_features_and_labels(self, flattened_inputs):\n expected_num_features = (\n len(self._feature_names) if self._feature_names else 1)\n if self._has_labels:\n expected_num_labels = (\n len(self._label_names) if self._label_names else 1)\n else:\n expected_num_labels = 0\n\n expected_num_signals = (\n self._signals_helper.num_signals if self._signals_helper else 0)\n\n expected_num_tensors = (\n expected_num_features + expected_num_labels + expected_num_signals)\n\n if expected_num_tensors != len(flattened_inputs):\n raise ValueError(\n 'The number of flattened tensors mismatches expected num. '\n 'Expected {}, got {}'.format(expected_num_tensors,\n len(flattened_inputs)))\n if self._feature_names:\n unflattened_features = dict(\n zip(self._feature_names, flattened_inputs[:expected_num_features]))\n else:\n # Single tensor case\n unflattened_features = flattened_inputs[0]\n\n if expected_num_labels == 0:\n unflattened_label = None\n elif self._label_names:\n label_list = flattened_inputs[\n expected_num_features:expected_num_features + expected_num_labels]\n unflattened_label = dict(zip(self._label_names, label_list))\n else:\n # Single tensor case.\n unflattened_label = flattened_inputs[expected_num_features]\n\n signals = None\n if expected_num_signals != 0:\n tensor_list_for_signals = flattened_inputs[\n expected_num_features + expected_num_labels:]\n signals = self._signals_helper.unflatten(tensor_list_for_signals)\n\n return _Inputs(unflattened_features, unflattened_label, signals=signals)", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def flatten(x):\n all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])\n o = tf.reshape(x, [-1, all_dims_exc_first])\n return o", "def get_labels_decomposed(self) -> List[List[str]]:\n return [list(label) for label in self.labels]", "def flatten(self, x_tensor):\n shape = x_tensor.get_shape().as_list()\n assert len(shape) >= 4, 'shape of image is not correct'\n single_image_dimension = shape[1] * shape[2] * shape[3]\n x_tensor = tf.reshape(x_tensor, [-1, single_image_dimension])\n return x_tensor\n # return tf.contrib.layers.flatten(x_tensor)", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(this,x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def batch_flatten(x):\n shape = x.get_shape().as_list()[1:]\n if None not in shape:\n return tf.reshape(x, [-1, int(np.prod(shape))])\n return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))", "def flatten(x_tensor):\n # TODO: Implement Function\n b, w, h, d = x_tensor.get_shape().as_list()\n img_size = w * h * d\n return tf.reshape(x_tensor, [-1, img_size])", "def labels_to_labels(class_labels, num_classes =4):\n levels = []\n for label in class_labels:\n levels_from_label = label_to_levels(int(label), num_classes=num_classes)\n levels.append(levels_from_label)\n return torch.stack(levels).cuda()", "def batch_features_labels(features, labels, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def expand_features_and_labels(x_feat, y_labels):\n x_expanded = []\n y_expanded = []\n for x, y in zip(x_feat, y_labels):\n for segment in x:\n x_expanded.append(segment)\n y_expanded.append(y)\n return x_expanded, y_expanded", "def _to_vectors(labels):\n res = [[int(s) for s in label] for label in labels]\n max_len = 0\n for ele in res:\n if len(ele) > max_len:\n max_len = len(ele)\n return [ele + [0] * (max_len - len(ele)) for ele in res]", "def flatten(x_tensor):\n import numpy as np\n #print(x_tensor)\n\n shape = x_tensor.get_shape().as_list() # a list: [None, height, width, channels]\n dim = np.prod(shape[1:]) # dim = prod(height,width,channels) \n flattened_tensor = tf.reshape(x_tensor, [-1, dim]) # -1 means \"all\"\n #print(flattened_tensor)\n return flattened_tensor", "def flatten(input, name):\n with tf.name_scope(name):\n l = tf.layers.flatten(input)\n return l", "def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))", "def flatten(x_tensor):\n old_shape = x_tensor.get_shape().as_list()\n new_shape = [-1, old_shape[1] * old_shape[2] * old_shape[3]]\n return tf.reshape(x_tensor, new_shape)" ]
[ "0.74422634", "0.73549277", "0.73549277", "0.6901035", "0.65079063", "0.648174", "0.629152", "0.6247123", "0.6247123", "0.6247123", "0.6202904", "0.6201719", "0.6159463", "0.6156918", "0.61519694", "0.6068398", "0.5983221", "0.5978041", "0.5978041", "0.5969851", "0.5969851", "0.5963862", "0.5925594", "0.5924297", "0.5838597", "0.5822644", "0.5803338", "0.5800743", "0.57946146", "0.578459" ]
0.7374709
1
Restores the flattened inputs to original features and labels form.
def unflatten_features_and_labels(self, flattened_inputs): expected_num_features = ( len(self._feature_names) if self._feature_names else 1) if self._has_labels: expected_num_labels = ( len(self._label_names) if self._label_names else 1) else: expected_num_labels = 0 expected_num_signals = ( self._signals_helper.num_signals if self._signals_helper else 0) expected_num_tensors = ( expected_num_features + expected_num_labels + expected_num_signals) if expected_num_tensors != len(flattened_inputs): raise ValueError( 'The number of flattened tensors mismatches expected num. ' 'Expected {}, got {}'.format(expected_num_tensors, len(flattened_inputs))) if self._feature_names: unflattened_features = dict( zip(self._feature_names, flattened_inputs[:expected_num_features])) else: # Single tensor case unflattened_features = flattened_inputs[0] if expected_num_labels == 0: unflattened_label = None elif self._label_names: label_list = flattened_inputs[ expected_num_features:expected_num_features + expected_num_labels] unflattened_label = dict(zip(self._label_names, label_list)) else: # Single tensor case. unflattened_label = flattened_inputs[expected_num_features] signals = None if expected_num_signals != 0: tensor_list_for_signals = flattened_inputs[ expected_num_features + expected_num_labels:] signals = self._signals_helper.unflatten(tensor_list_for_signals) return _Inputs(unflattened_features, unflattened_label, signals=signals)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _restore_data_inputs(self):\n super()._restore_data_inputs()\n self.training_data = (\n self._data.training_data.data if self._data.training_data and self._data.training_data.data else None\n )\n self.validation_data = (\n self._data.validation_data.data if self._data.validation_data and self._data.validation_data.data else None\n )", "def flatten_features_and_labels(self, features, labels, signals=None):\n flattened_inputs = []\n if self._feature_names:\n # We need a fixed ordering for enqueueing and dequeueing.\n flattened_inputs.extend(\n [features[name] for name in self._feature_names])\n else:\n flattened_inputs.append(features)\n\n if labels is not None:\n if self._label_names:\n # We need a fixed ordering for enqueueing and dequeueing.\n flattened_inputs.extend([labels[name] for name in self._label_names])\n else:\n flattened_inputs.append(labels)\n\n if signals is not None:\n flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))\n return flattened_inputs", "def restore(self):\n pert_params = list(self.net.parameters())\n saved_params = list(self.saved_net.parameters())\n for perturbed, saved in zip(pert_params, saved_params):\n perturbed_shape = perturbed.shape\n saved_shape = saved.shape\n perturbed = perturbed.flatten()\n saved = saved.flatten()\n for i, _ in enumerate(perturbed.data):\n perturbed.data[i] = saved.data[i]\n perturbed = perturbed.view(perturbed_shape)\n saved = saved.view(saved_shape)", "def _transform_inputs(self) -> None:\n self.inputs = None if self.inputs == {} else self.inputs", "def restore(self):\n self.nodes.restore()", "def restore(self):\n self.igate.restore()\n self.fgate.restore()\n self.ogate.restore()\n super(LSTM, self).restore()", "def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy()\n self.v = self.vb.copy()\n if self.en_bias: self.b = self.bb.copy()", "def restore_coefs(self, coefs):\n self._weights = coefs['weights']\n self._bias = coefs['bias']", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def unflatten(self): \n self.assign(self.get_unflattened_circuit())\n self._expr_map = None", "def flatten():", "def _revert(self):\n self.kwargs[\"collect\"].reset_scan_form_data()", "def restore(self):\n\n self.dispersion = self.raw_dispersion\n self.flux = self.raw_flux\n self.flux_err = self.raw_flux_err\n self.reset_mask()", "def reset_original(self):\n self._original = [] # Empty out self._originals", "def flatten_inputs(self, inputs):\n ndim = inputs.ndim\n if ndim == 2:\n return inputs\n elif ndim == 4:\n # Maybe add a check\n inputs_reshaped = inputs.ravel().reshape((self._learning_batch_size,\n self._input_size)).T\n return inputs_reshaped\n else:\n raise Exception('Wrong inputs dimension : it should be a matrix or a 4D tensor')", "def _Restore(self) -> None:\n self._SetNodes(self._nodes)", "def resetTransformations():\n dislin.trfres()", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.new_epoch()", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.target_ids = self.target_ids[inv_perm]\n self.new_epoch()", "def transform(self, dataset, labels):\n print(f\"Dropping {len(self.deficient)} deficient features...\")\n dataset.drop(columns=self.deficient, inplace=True)\n print(f\"Scanning {len(dataset)} samples for duplicates...\")\n duplicates = dataset.duplicated()\n print(f\"Dropping {sum(duplicates)} duplicate samples...\")\n dataset.drop(index=dataset.index[duplicates], inplace=True)\n dataset.reset_index(drop=True, inplace=True)\n labels.drop(labels=labels.index[duplicates], inplace=True)\n labels.reset_index(drop=True, inplace=True)\n return dataset, labels", "def reconstruct_input_ext(self, model_in):", "def tf_flatten(x):\n return tf.contrib.layers.flatten(x)", "def flatten(self):\n pass", "def flatten(input, name):\n with tf.name_scope(name):\n l = tf.layers.flatten(input)\n return l", "def switch_to_tuned_inputs(self):\n \n self.h_e=self.inputs_flat.T\n self.h=np.vstack([self.h_e,self.h_i])", "def convert_full_features_to_input_features(raw_features):\n data_features = mx.gluon.data.SimpleDataset(list(itertools.chain.from_iterable(raw_features)))\n data_features = data_features.transform(lambda *example: (\n example[0], # example_id\n example[7], # inputs_id\n example[9], # segment_ids\n example[2], # valid_length,\n example[8], # p_mask\n example[10], # start_position,\n example[11], # end_position\n example[14])) # is_impossible\n return data_features", "def normalize(self):\n d = learning_utils.convert_data_to_2d(self._data)\n d = learning_utils.normalize_2d(d)\n self._data = learning_utils.convert_data_to_1d(d)", "def _maybe_reset_state(self, state: NestedMap, inputs: NestedMap) -> JTensor:\n if self.params.reset_cell_state:\n state_modified = self._reset_state(state.DeepCopy(), inputs)\n else:\n state_modified = state\n return state_modified # pytype: disable=bad-return-type # jax-ndarray", "def _flatten(prev_layer):\n\n with tf.name_scope('flatten'):\n shape = int(np.prod(prev_layer.get_shape()[1:]))\n return tf.reshape(prev_layer, [-1, shape])", "def _flatten(self, inputT, size):\n return tf.reshape(inputT, (-1, size))" ]
[ "0.6197647", "0.6104975", "0.6052526", "0.6033052", "0.6020405", "0.5886211", "0.5778616", "0.57276195", "0.5723627", "0.5686435", "0.5685843", "0.56825626", "0.56755847", "0.56697255", "0.5663335", "0.5650272", "0.5640793", "0.5625429", "0.5615306", "0.5614015", "0.5612984", "0.56088424", "0.55944836", "0.557473", "0.55695474", "0.5518273", "0.5510842", "0.54972804", "0.5489098", "0.54811" ]
0.69521254
0
Generates infeed enqueue ops and dequeue_fn.
def generate_infeed_enqueue_ops_and_dequeue_fn(self): # While tf.while_loop is called, the body function, which invokes # `enqueue_fn` passed in, is called to construct the graph. So, input_fn # structure is recorded. enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( self._invoke_input_fn_and_record_structure()) self._validate_input_pipeline() def dequeue_fn(): """dequeue_fn is used by TPU to retrieve the tensors.""" # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatten process uses the structure information recorded above. return self._inputs_structure_recorder.unflatten_features_and_labels( values) return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def InfeedTFFunc(self):\n self._task.input.DeviceLoopSetupEager()\n\n def InfeedBody(i):\n self._task.input.CreateTpuEnqueueOps()\n return i + 1\n\n tf.while_loop(\n cond=lambda i: i < self._steps_per_loop,\n body=InfeedBody,\n loop_vars=[tf.constant(0)])", "def dequeue_fn():\n # In the model-parallel case, both the host-side and device-side\n # computations must agree on the core on which infeed takes place. We\n # choose to perform infeed on logical core 0 of each replica.\n values = self._infeed_queue.generate_dequeue_op(tpu_device=0)\n # The unflatten process uses the structure information recorded above.\n return self._inputs_structure_recorder.unflatten_features_and_labels(\n values)", "def InfeedTFFunc(self):\n self.task.input.DeviceLoopSetupEager()\n\n def InfeedBody(i):\n self.task.input.CreateTpuEnqueueOps()\n return i + 1\n\n tf.while_loop(\n cond=lambda i: i < self._steps_per_loop,\n body=InfeedBody,\n loop_vars=[tf.constant(0)])", "def _invoke_input_fn_and_record_structure(self):\n enqueue_ops = []\n infeed_queues = []\n all_hooks = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n else:\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id))\n else:\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n all_hooks.extend(hooks)\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if is_dataset:\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m, self.y:y})", "def wrapper(sources, dtypes, capacity, num_threads):\n # enqueue function\n def enqueue_func(sess, op):\n # read data from source queue\n data = func(sess.run(sources))\n # create feeder dict\n feed_dict = {}\n for ph, col in zip(placeholders, data):\n feed_dict[ph] = col\n # run session\n sess.run(op, feed_dict=feed_dict)\n\n # create place holder list\n placeholders = []\n for dtype in dtypes:\n placeholders.append(tf.placeholder(dtype=dtype))\n\n # create FIFO queue\n queue = tf.FIFOQueue(capacity, dtypes=dtypes)\n\n # enqueue operation\n enqueue_op = queue.enqueue(placeholders)\n\n # create queue runner\n runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * num_threads)\n\n # register to global collection\n tf.train.add_queue_runner(runner)\n\n # return de-queue operation\n return queue.dequeue()", "def create_enqueue_op(self):\n if not self._names:\n return []\n\n tensors = []\n # TODO(jhseu): Consider deduping tensors.\n for name in self._names:\n tensors.extend(self._tensors[name])\n\n with ops.device(tpu.core(0)):\n return [tpu_ops.outfeed_enqueue_tuple(tensors)]", "def build_input_fns(data_dir, batch_size):\n\n # Build an iterator over training batches.\n training_dataset = static_mnist_dataset(data_dir, \"train\")\n training_dataset = training_dataset.shuffle(50000).repeat().batch(batch_size)\n train_input_fn = lambda: training_dataset.make_one_shot_iterator().get_next()\n\n # Build an iterator over the heldout set.\n eval_dataset = static_mnist_dataset(data_dir, \"valid\")\n eval_dataset = eval_dataset.batch(batch_size)\n eval_input_fn = lambda: eval_dataset.make_one_shot_iterator().get_next()\n\n return train_input_fn, eval_input_fn", "def InfeedTFFunc(self, inp_instance):\n inp_instance.DeviceLoopSetupEager()\n inp_instance.CreateTpuEnqueueOps()\n # `CreateTpuEnqueueOps` and `CreateCpuPassthroughEnqueueOps` must be in the\n # same place, because the former enqueues `_per_host_passthrough_batches`,\n # while the latter consumes it.\n inp_instance.CreateCpuPassthroughEnqueueOps()\n # `CreateCpuPassthroughEnqueueOps` and `DequeueCpuPassthrough` must be in\n # the same place, because the former enqueues `_host_queues`,\n # while the latter consumes it.\n cpu_pt = inp_instance.DequeueCpuPassthrough()\n return cpu_pt", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m})", "def _input_fn():\n # TODO(seominjoon): There is bottleneck in data feeding, slow for N >= 128.\n filename_queue = tf.train.string_input_producer(\n filenames, shuffle=shuffle_files, num_epochs=num_epochs)\n reader = tf.TFRecordReader()\n _, se = reader.read(filename_queue)\n # TODO(seominjoon): Consider moving data filtering to here.\n features_op = tf.parse_single_example(se, features=features)\n\n names = list(features_op.keys())\n dtypes = [features_op[name].dtype for name in names]\n shapes = [features_op[name].shape for name in names]\n\n if shuffle_examples:\n # Data shuffling.\n rq = tf.RandomShuffleQueue(\n queue_capacity, min_after_dequeue, dtypes, names=names)\n else:\n rq = tf.FIFOQueue(queue_capacity, dtypes, names=names)\n enqueue_op = rq.enqueue(features_op)\n dequeue_op = rq.dequeue()\n dequeue_op = [dequeue_op[name] for name in names]\n qr = tf.train.QueueRunner(rq, [enqueue_op])\n tf.train.add_queue_runner(qr)\n\n batch = tf.train.batch(\n dequeue_op,\n batch_size,\n capacity=queue_capacity,\n dynamic_pad=True,\n shapes=shapes,\n allow_smaller_final_batch=True,\n num_threads=5)\n batch = {name: each for name, each in zip(names, batch)}\n target_keys = [\n 'word_answer_starts', 'word_answer_ends', 'answers', 'num_answers'\n ]\n # TODO(seominjoon) For cheating-safe, comment out #.\n features_batch = {\n key: val\n for key, val in batch.items() # if key not in target_keys\n }\n\n # `metadata['emb_mat`]` contains GloVe embedding, and `xv` in\n # `features_batch` index into the vectors.\n features_batch['emb_mat'] = tf.constant(emb_mat)\n targets_batch = {key: batch[key] for key in target_keys}\n\n # Postprocessing for character data.\n # Due to the limitation of the python wrapper for prototxt,\n # the characters (by index) need to be flattened when saving on prototxt.\n # The following 'unflattens' the character tensor.\n actual_batch_size = tf.shape(batch['indexed_context_chars'])[0]\n features_batch['indexed_context_chars'] = tf.reshape(\n features_batch['indexed_context_chars'],\n [actual_batch_size, -1, metadata['num_chars_per_word']])\n features_batch['indexed_question_chars'] = tf.reshape(\n features_batch['indexed_question_chars'],\n [actual_batch_size, -1, metadata['num_chars_per_word']])\n\n # Make sure answer start and end positions are less than sequence lengths.\n # TODO(seominjoon) This will need to move to a separate test.\n with tf.control_dependencies([\n tf.assert_less(\n tf.reduce_max(targets_batch['word_answer_starts'], 1),\n features_batch['context_num_words'])\n ]):\n targets_batch['word_answer_starts'] = tf.identity(\n targets_batch['word_answer_starts'])\n with tf.control_dependencies([\n tf.assert_less(\n tf.reduce_max(targets_batch['word_answer_ends'], 1),\n features_batch['context_num_words'])\n ]):\n targets_batch['word_answer_ends'] = tf.identity(\n targets_batch['word_answer_ends'])\n\n # Stress test to ensure no OOM for GPU occurs.\n if oom_test:\n features_batch['indexed_context_words'] = tf.constant(\n np.ones(\n [batch_size, exp_metadata['max_context_size']], dtype='int64'))\n features_batch['glove_indexed_context_words'] = tf.constant(\n np.ones(\n [batch_size, exp_metadata['max_context_size']], dtype='int64'))\n features_batch['indexed_context_chars'] = tf.constant(\n np.ones(\n [\n batch_size, exp_metadata['max_context_size'], exp_metadata[\n 'num_chars_per_word']\n ],\n dtype='int64'))\n features_batch['indexed_question_words'] = tf.constant(\n np.ones([batch_size, exp_metadata['max_ques_size']], dtype='int64'))\n features_batch['glove_indexed_question_words'] = tf.constant(\n np.ones([batch_size, exp_metadata['max_ques_size']], dtype='int64'))\n features_batch['indexed_question_chars'] = tf.constant(\n np.ones(\n [\n batch_size, exp_metadata['max_ques_size'], exp_metadata[\n 'num_chars_per_word']\n ],\n dtype='int64'))\n features_batch['question_num_words'] = tf.constant(\n np.ones([batch_size], dtype='int64') * exp_metadata['max_ques_size'])\n features_batch['context_num_words'] = tf.constant(\n np.ones([batch_size], dtype='int64') *\n exp_metadata['max_context_size'])\n\n return features_batch, targets_batch", "def enqueue(tup):", "def get_input_fn_queue(pattern, flags, batch_size):\n\n def input_fn(params=None):\n \"\"\"Input function using queues for GPU.\"\"\"\n del params\n filenames = gfile.Glob(os.path.join(flags.data_dir, pattern))\n if not filenames:\n raise RuntimeError('No data files found.')\n filename_queue = tf.train.string_input_producer(filenames, shuffle=True)\n reader = tf.TFRecordReader()\n\n _, val = reader.read(filename_queue)\n serialized_input = tf.reshape(val, shape=[1])\n\n image_seq = None\n\n for i in range(0, flags.sequence_length, flags.skip_num):\n image_name = 'image_' + str(i)\n\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n features = {\n pose_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n image_name:\n tf.FixedLenFeature([1], tf.string),\n action_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n joint_pos_name:\n tf.FixedLenFeature([flags.joint_pos_dim], tf.float32)\n }\n else:\n features = {\n image_name: tf.FixedLenFeature([1], tf.string),\n }\n\n parsed_input = tf.parse_example(serialized_input, features)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH), method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n [images, actions, poses, joint_pos] = tf.train.shuffle_batch(\n [image_seq, action_seq, pose_seq, joint_pos_seq],\n batch_size,\n num_threads=4,\n capacity=200 * batch_size,\n min_after_dequeue=batch_size * 10,\n )\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None\n\n return input_fn", "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue", "def _OutfeedDequeue(self, decode_nm):\n num_decode_tensors = len(decode_nm.Flatten())\n outfeed_ops = [[]] * num_decode_tensors\n device_assignment = py_utils.GetTpuDeviceAssignment()\n assert device_assignment\n num_cores_per_replica = (1 if self.spmd else\n (device_assignment.num_cores_per_replica))\n for replica in range(device_assignment.num_replicas):\n for core in range(num_cores_per_replica):\n with tf.device(device_assignment.host_device(replica, core)):\n outfeeds_per_core = tpu_ops.outfeed_dequeue_tuple(\n dtypes=[x.dtype for x in decode_nm.Flatten()],\n shapes=[x.shape for x in decode_nm.Flatten()],\n device_ordinal=device_assignment.tpu_ordinal(replica, core))\n for idx_outfeed, out_feed in enumerate(outfeeds_per_core):\n outfeed_ops[idx_outfeed] = outfeed_ops[idx_outfeed] + [out_feed]\n return [tf.concat(per_outfeed, axis=0) for per_outfeed in outfeed_ops]", "def _InfeedLoop(self, sess=None):\n self._InfeedLoopForInput(self.params.dataset_name, self._task.input, sess)", "def input_fn():\n bos_id = tf.constant(BOS_ID, tf.int32)\n eos_id = tf.constant(EOS_ID, tf.int32)\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_files)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.map(lambda record: _decode_record(record, name_to_features))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([[bos_id], src_ids, [eos_id]], 0),\n tf.concat([tgt_ids, [eos_id]], 0),\n label))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n src_ids[:FLAGS.max_sequence_length],\n tgt_ids[:FLAGS.max_sequence_length],\n label\n ))\n\n d = d.map(lambda src_ids, tgt_ids, label: (\n tf.concat([src_ids, tgt_ids], 0),\n tf.concat([tf.zeros_like(src_ids), tf.ones_like(tgt_ids)], 0),\n label\n ))\n\n d = d.map(lambda input_ids, segment_ids, label_ids: (\n input_ids,\n segment_ids,\n tf.ones_like(input_ids),\n label_ids\n ))\n\n def batching_func(x):\n return x.padded_batch(\n batch_size,\n # The entry is the source line rows;\n # this has unknown-length vectors. The last entry is\n # the source row size; this is a scalar.\n padded_shapes=(\n tf.TensorShape([None]), # src\n tf.TensorShape([None]), # tgt\n tf.TensorShape([None]),\n tf.TensorShape([])), # src_len\n # Pad the source sequences with eos tokens.\n # (Though notice we don't generally need to do this since\n # later on we will be masking out calculations past the true sequence.\n padding_values=(\n PAD_ID, # src\n PAD_ID,\n PAD_ID,\n 0)) # src_len -- unused\n\n batched_dataset = batching_func(d)\n features = batched_dataset.map(lambda input_ids, segment_ids, input_mask, label:\n {\n \"input_ids\": input_ids,\n \"segment_ids\": segment_ids,\n \"input_mask\": input_mask,\n \"label_ids\": label\n\n })\n\n return features", "def generate_per_host_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):\n captured_infeed_queue = _CapturedObject()\n\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n if batch_axis is not None:\n raise TypeError('For mode PREDICT, batch_axis is not supported yet.')\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n hooks.append(inputs.dataset_initializer_hook())\n\n # TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the\n # _TPUContext.tpu_ordinal_function. We should either introduce another\n # abstraction or a different helper method.\n def _tpu_ordinal_function_impl(shard_index_in_host):\n # We put both enqueue/dequeue op at tpu.core(0) in each replica.\n replica = ctx.device_assignment.lookup_replicas(\n host_id, (0, 0, 0))[shard_index_in_host]\n return ctx.device_assignment.tpu_ordinal(replica=replica)\n\n if ctx.model_parallelism_enabled:\n tpu_ordinal_function = _tpu_ordinal_function_impl\n else:\n tpu_ordinal_function = None\n\n def enqueue_ops_fn():\n with ops.device(device):\n num_of_replicas_per_host = ctx.num_of_replicas_per_host\n # Convert user input to features and labels. If the user returns a\n # dataset, it is initialized and the features and labels extracted via\n # `dataset.iterator.get_next()`\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels, signals)\n unsharded_tensor_list = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n\n infeed_queue = tpu_feed.InfeedQueue(\n tuple_types=[t.dtype for t in unsharded_tensor_list],\n tuple_shapes=[t.shape for t in unsharded_tensor_list],\n shard_dimensions=batch_axis)\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_number_of_shards(num_of_replicas_per_host)\n per_host_enqueue_ops = (\n infeed_queue.split_inputs_and_generate_enqueue_ops(\n unsharded_tensor_list,\n placement_function=lambda x: device,\n tpu_ordinal_function=tpu_ordinal_function))\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def generate_per_host_v2_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, device, host_id):\n del host_id # unused\n captured_infeed_queue = _CapturedObject()\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if not is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '\n 'input pipeline configuration.')\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n # TODO(b/XXX): Add predict support for PER_HOST_V2\n raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')\n\n hooks.append(inputs.dataset_initializer_hook())\n\n def enqueue_ops_fn():\n \"\"\"Generates the per_host enqueue ops.\"\"\"\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def get_fn_enqueue_op(tf_fn_phd):\n raw_im_ = tf.read_file(tf_fn_phd)\n im_ = tf.image.decode_jpeg(raw_im_)\n r_im_ = tf.image.resize_images(im_, IM_H, IM_W)\n enq_op = tf_fn_q.enqueue([tf.to_float(r_im_)])\n return enq_op", "def get_eval_inputs(features, labels, batch_size):\n\titerator_initializer_hook = IteratorInitializerHook()\n\n\tdef _eval_input_fn():\n\t\tprint('\\n\\nRunning _eval_input_fn\\n\\n')\n\t\t\"\"\"Defines eval input fn.\"\"\"\n\t\tfeatures_placeholder = {\n\t\t\t#'query_unigrams' : tf.placeholder(tf.string, (len(features['query_unigrams']), features['query_unigrams'][0].shape[0])),\n\t\t\t#'doc_unigrams' : tf.placeholder(tf.string, (len(features['doc_unigrams']), len(features['doc_unigrams'][0]), features['doc_unigrams'][0][0].shape[0]))\n\t\t\t#'query_unigrams' : tf.placeholder(tf.string, (len(features['query_unigrams']), len(features['query_unigrams'][0]))),\n\t\t\t#'doc_unigrams' : tf.placeholder(tf.string, (len(features['doc_unigrams']), len(features['doc_unigrams'][0]), len(features['doc_unigrams'][0][0])))\n\t\t\t#k: tf.placeholder(tf.string, get_shape(v)) for k, v in six.iteritems(features)\n\t\t\tk: tf.placeholder(v.dtype, v.shape) for k, v in six.iteritems(features)\n\t\t\t#k: tf.sparse.placeholder(tf.string, [len(v)] + v[0].shape.as_list(), name=k) for k, v in six.iteritems(features)\n\t\t\t#k: tf.placeholder(tf.string, (len(v), len(v[0]), len(v[0][0]))) for k, v in six.iteritems(features)\n\t\t}\n\t\tlabels_placeholder = tf.placeholder(labels.dtype, labels.shape, name='label')\n\t\tdataset = tf.data.Dataset.from_tensor_slices((features_placeholder,\n\t\t\t\t\t\t\t\t\t\t\t\tlabels_placeholder))\n\t\tdataset = dataset.batch(batch_size)\n\t\titerator = dataset.make_initializable_iterator()\n\t\tfeed_dict = {labels_placeholder: labels}\n\t\tfeed_dict.update(\n\t\t\t\t{features_placeholder[k]: features[k] for k in features_placeholder})\n\t\tprint('feed_dict')\n\t\tfor k,v in six.iteritems(feed_dict):\n\t\t\tprint(k.shape)\n\t\t#\tprint(v.shape)\n\t\trun_options = tf.RunOptions(report_tensor_allocations_upon_oom = True)\n\t\titerator_initializer_hook.iterator_initializer_fn = (\n\t\t\t\tlambda sess: sess.run(iterator.initializer, feed_dict=feed_dict, options=run_options))\n\t\treturn iterator.get_next()\n\n\treturn _eval_input_fn, iterator_initializer_hook", "def _eval_input_fn():\n features_placeholder = {\n k: tf.compat.v1.placeholder(v.dtype, v.shape)\n for k, v in six.iteritems(features)\n }\n if use_multi_head:\n placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)\n labels_placeholder = {\n _PRIMARY_HEAD: placeholder,\n _SECONDARY_HEAD: placeholder,\n }\n else:\n labels_placeholder = tf.compat.v1.placeholder(labels.dtype, labels.shape)\n dataset = tf.data.Dataset.from_tensors(\n (features_placeholder, labels_placeholder))\n iterator = tf.compat.v1.data.make_initializable_iterator(dataset)\n if use_multi_head:\n feed_dict = {\n labels_placeholder[head_name]: labels\n for head_name in labels_placeholder\n }\n else:\n feed_dict = {labels_placeholder: labels}\n\n feed_dict.update(\n {features_placeholder[k]: features[k] for k in features_placeholder})\n iterator_initializer_hook.iterator_initializer_fn = (\n lambda sess: sess.run(iterator.initializer, feed_dict=feed_dict))\n return iterator.get_next()", "def _build_fetches(self, global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops, all_top_5_ops,\n phase_train):\n fetches = {'enqueue_ops': enqueue_ops}\n \n apply_gradient_devices, gradient_state = (\n self.variable_mgr.preprocess_device_grads(device_grads))\n\n training_ops = []\n for d, device in enumerate(apply_gradient_devices):\n with tf.device(device):\n total_loss = tf.reduce_mean(losses)\n avg_grads = self.variable_mgr.get_gradients_to_apply(d, gradient_state)\n\n gradient_clip = self.params.gradient_clip\n learning_rate = (\n self.params.learning_rate or\n self.model.get_learning_rate(global_step, self.batch_size))\n \n clipped_grads = avg_grads\n\n learning_rate = tf.identity(learning_rate, name='learning_rate')\n opt = tf.train.GradientDescentOptimizer(learning_rate)\n \n\n loss_scale_params = variable_mgr_util.AutoLossScaleParams(\n enable_auto_loss_scale=self.enable_auto_loss_scale,\n loss_scale=self.loss_scale,\n loss_scale_normal_steps=self.loss_scale_normal_steps,\n inc_loss_scale_every_n=self.params.fp16_inc_loss_scale_every_n,\n is_chief=not self.job_name or self.task_index == 0)\n\n self.variable_mgr.append_apply_gradients_ops(\n gradient_state, opt, clipped_grads, training_ops, loss_scale_params)\n train_op = tf.group(*(training_ops + update_ops))\n\n fetches['train_op'] = train_op\n fetches['total_loss'] = total_loss\n return fetches", "def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label", "def enqueue(self, func):\n self.queue.put(func)", "def _InfeedLoopForInput(self, dataset_name, inp_instance, sess=None):\n tf.logging.info(f'_InfeedLoop start {self._program_name} '\n f'on dataset {dataset_name}')\n try:\n for i in range(self._steps_per_loop):\n tf.logging.vlog(1, '_InfeedLoop %d', i)\n sess.run(inp_instance.tpu_infeed_op)\n self._WriteInputDataStats(\n sess, dataset_name=dataset_name, inp_instance=inp_instance)\n tf.logging.info('_InfeedLoop done')\n except Exception:\n tf.logging.exception('_InfeedLoop exception')\n raise", "def dequeue(self):", "def enqueue(self):\n # TensorFlow Input Pipelines for Large Data Sets\n # ischlag.github.io\n # http://ischlag.github.io/2016/11/07/tensorflow-input-pipeline-for-large-datasets/\n # http://web.stanford.edu/class/cs20si/lectures/slides_09.pdf\n under = 0\n max = len(self.train_x)\n try:\n while not self.coord.should_stop():\n # print(\"starting to write into queue\")\n upper = under + self.capacity\n # print(\"try to enqueue \", under, \" to \", upper)\n if upper <= max:\n curr_x = self.train_x[under:upper]\n curr_t = self.train_t[under:upper]\n curr_e = self.train_e[under:upper]\n under = upper\n else:\n rest = upper - max\n curr_x = np.concatenate((self.train_x[under:max], self.train_x[0:rest]))\n curr_t = np.concatenate((self.train_t[under:max], self.train_t[0:rest]))\n curr_e = np.concatenate((self.train_e[under:max], self.train_e[0:rest]))\n under = rest\n\n self.session.run(self.enqueue_op,\n feed_dict={self.x: curr_x, self.t: curr_t, self.e: curr_e})\n except tf.errors.CancelledError:\n print(\"finished enqueueing\")" ]
[ "0.67614406", "0.66683185", "0.5815572", "0.58151203", "0.5797748", "0.54712415", "0.538349", "0.5365341", "0.5356222", "0.53494734", "0.53477305", "0.53301996", "0.524572", "0.52256685", "0.52183527", "0.51829576", "0.51430917", "0.50617474", "0.50612164", "0.50002146", "0.49927875", "0.48938766", "0.48639557", "0.4862376", "0.4844225", "0.47816557", "0.47654092", "0.4749827", "0.4736977", "0.47353503" ]
0.83329666
0
dequeue_fn is used by TPU to retrieve the tensors.
def dequeue_fn(): # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatten process uses the structure information recorded above. return self._inputs_structure_recorder.unflatten_features_and_labels( values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _OutfeedDequeue(self, decode_nm):\n num_decode_tensors = len(decode_nm.Flatten())\n outfeed_ops = [[]] * num_decode_tensors\n device_assignment = py_utils.GetTpuDeviceAssignment()\n assert device_assignment\n num_cores_per_replica = (1 if self.spmd else\n (device_assignment.num_cores_per_replica))\n for replica in range(device_assignment.num_replicas):\n for core in range(num_cores_per_replica):\n with tf.device(device_assignment.host_device(replica, core)):\n outfeeds_per_core = tpu_ops.outfeed_dequeue_tuple(\n dtypes=[x.dtype for x in decode_nm.Flatten()],\n shapes=[x.shape for x in decode_nm.Flatten()],\n device_ordinal=device_assignment.tpu_ordinal(replica, core))\n for idx_outfeed, out_feed in enumerate(outfeeds_per_core):\n outfeed_ops[idx_outfeed] = outfeed_ops[idx_outfeed] + [out_feed]\n return [tf.concat(per_outfeed, axis=0) for per_outfeed in outfeed_ops]", "def dequeue(self):", "def _dequeue(self):\n return self._queue.popleft()", "def get_fn_enqueue_op(tf_fn_phd):\n raw_im_ = tf.read_file(tf_fn_phd)\n im_ = tf.image.decode_jpeg(raw_im_)\n r_im_ = tf.image.resize_images(im_, IM_H, IM_W)\n enq_op = tf_fn_q.enqueue([tf.to_float(r_im_)])\n return enq_op", "def test_dequeue(self):\r\n from numpy import random\r\n queue = Queue(shape=(11, 2, 3, 4), dtype='int16')\r\n for i in range(100):\r\n arr_in = random.randint(4096,size = (2,2,3,4))\r\n queue.enqueue(arr_in)\r\n arr_out = queue.dequeue(2)\r\n self.assertEqual((arr_in==arr_out).all(), True)\r\n self.assertEqual(queue.length,0)\r\n self.assertEqual(queue.global_rear,(i+1)*2)\r\n self.assertEqual(queue.rear,2*(i+1)-int(2*(i+1)/11)*11)\r\n\r\n from numpy import random\r\n queue = Queue(shape=(32, 2, 3, 4), dtype='int16')\r\n for i in range(100):\r\n arr_in = random.randint(4096,size = (1,2,3,4))\r\n queue.enqueue(arr_in)\r\n self.assertEqual(queue.length,1)\r\n arr_out = queue.dequeue(1)\r\n self.assertEqual((arr_in==arr_out).all(), True)\r\n self.assertEqual(queue.length,0)\r\n self.assertEqual(queue.global_rear,(i+1)*1)\r\n self.assertEqual(queue.rear,1*(i+1)-int(1*(i+1)/queue.shape[0])*queue.shape[0])", "def dequeue(Q):\n # x = Q.pop(0) # default is to pop from end (LIFO stack), param 0 indicates FIFO queue\n x = Q.get_nowait() # default is to pop from end (LIFO stack), param 0 indicates FIFO queue\n if debug: \n print(\"dequeue :\", end=\" \")\n show_queue(Q)\n return(Q, x)", "def dequeue(self):\n pass", "def dequeue(self):\n pass", "def dequeue(self):\n temp = self.front\n self.front = self.front.getPtr()\n return temp.getData()", "def convert_to_single_tpu_train_step(self, dequeue_fn):\n\n host_call = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n\n def train_step(loss):\n \"\"\"Training step function for use inside a while loop.\"\"\"\n del loss # unused; required in function signature.\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n estimator_spec = self._verify_estimator_spec(\n self._call_model_fn(features, labels))\n loss, train_op = estimator_spec.loss, estimator_spec.train_op\n\n if isinstance(estimator_spec, TPUEstimatorSpec):\n captured_scaffold_fn.capture(estimator_spec.scaffold_fn)\n else:\n captured_scaffold_fn.capture(None)\n\n # We must run train_op to update the variables prior to running the\n # outfeed.\n with ops.control_dependencies([train_op]):\n host_call_outfeed_ops = []\n if (isinstance(estimator_spec, TPUEstimatorSpec) and\n estimator_spec.host_call is not None):\n host_call.record({'host_call': estimator_spec.host_call})\n host_call_outfeed_ops = host_call.create_enqueue_op()\n with ops.control_dependencies(host_call_outfeed_ops):\n return array_ops.identity(loss)\n\n return train_step, host_call, captured_scaffold_fn", "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def wrapper(sources, dtypes, capacity, num_threads):\n # enqueue function\n def enqueue_func(sess, op):\n # read data from source queue\n data = func(sess.run(sources))\n # create feeder dict\n feed_dict = {}\n for ph, col in zip(placeholders, data):\n feed_dict[ph] = col\n # run session\n sess.run(op, feed_dict=feed_dict)\n\n # create place holder list\n placeholders = []\n for dtype in dtypes:\n placeholders.append(tf.placeholder(dtype=dtype))\n\n # create FIFO queue\n queue = tf.FIFOQueue(capacity, dtypes=dtypes)\n\n # enqueue operation\n enqueue_op = queue.enqueue(placeholders)\n\n # create queue runner\n runner = _FuncQueueRunner(enqueue_func, queue, [enqueue_op] * num_threads)\n\n # register to global collection\n tf.train.add_queue_runner(runner)\n\n # return de-queue operation\n return queue.dequeue()", "def generate_infeed_enqueue_ops_and_dequeue_fn(self):\n # While tf.while_loop is called, the body function, which invokes\n # `enqueue_fn` passed in, is called to construct the graph. So, input_fn\n # structure is recorded.\n enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (\n self._invoke_input_fn_and_record_structure())\n\n self._validate_input_pipeline()\n\n def dequeue_fn():\n \"\"\"dequeue_fn is used by TPU to retrieve the tensors.\"\"\"\n # In the model-parallel case, both the host-side and device-side\n # computations must agree on the core on which infeed takes place. We\n # choose to perform infeed on logical core 0 of each replica.\n values = self._infeed_queue.generate_dequeue_op(tpu_device=0)\n # The unflatten process uses the structure information recorded above.\n return self._inputs_structure_recorder.unflatten_features_and_labels(\n values)\n\n return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)", "def _dequeue(self):\n node = self.head.next\n self._remove_node(node)\n return node", "def test_dequeue_removes_value():\n queue = Queue()\n queue.enqueue('a')\n queue.dequeue()\n assert queue._queue.last_node is None\n assert queue._queue.first_node is None", "def convert_to_single_tpu_eval_step(self, dequeue_fn):\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n\n def eval_step(total_loss):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n tpu_estimator_spec = self._call_model_fn(features, labels)\n if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):\n raise RuntimeError(\n 'estimator_spec used by TPU evaluation must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n loss = tpu_estimator_spec.loss\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n to_record = {}\n to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics\n if tpu_estimator_spec.host_call is not None:\n # We assume that evaluate won't update global step, so we don't wrap\n # this host_call.\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return math_ops.add(total_loss, loss)\n\n return eval_step, host_calls, captured_scaffold_fn", "def test_dequeue_2(self):\r\n from numpy import random\r\n queue = Queue(shape=(100,10), dtype='int16')\r\n for i in range(5): queue.enqueue( random.randint(0,4096,(16,10)) )\r\n for i in range(1000):\r\n self.assertEqual(queue.dequeue(16).shape,(16,10))\r\n queue.enqueue(random.randint(0,4096,(16,10)) )", "def _dequeue(self) -> Optional[torch.cuda.Event]:\n if self._queue:\n event = self._queue.popleft()\n return event\n return None", "def dequeue(self):\n raise NotImplementedError(\"dequeue: You should have implemented this method!\")", "def test_the_queue_dequeue_multi_values_phase_two(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n the_queue.dequeue()\n assert (the_queue.dequeue(),\n the_queue._new_dll.tail.data) == (3, 4)", "def get_input_fn_queue(pattern, flags, batch_size):\n\n def input_fn(params=None):\n \"\"\"Input function using queues for GPU.\"\"\"\n del params\n filenames = gfile.Glob(os.path.join(flags.data_dir, pattern))\n if not filenames:\n raise RuntimeError('No data files found.')\n filename_queue = tf.train.string_input_producer(filenames, shuffle=True)\n reader = tf.TFRecordReader()\n\n _, val = reader.read(filename_queue)\n serialized_input = tf.reshape(val, shape=[1])\n\n image_seq = None\n\n for i in range(0, flags.sequence_length, flags.skip_num):\n image_name = 'image_' + str(i)\n\n if flags.dataset_type == 'robot':\n pose_name = 'state_' + str(i)\n action_name = 'action_' + str(i)\n joint_pos_name = 'joint_positions_' + str(i)\n features = {\n pose_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n image_name:\n tf.FixedLenFeature([1], tf.string),\n action_name:\n tf.FixedLenFeature([flags.pose_dim], tf.float32),\n joint_pos_name:\n tf.FixedLenFeature([flags.joint_pos_dim], tf.float32)\n }\n else:\n features = {\n image_name: tf.FixedLenFeature([1], tf.string),\n }\n\n parsed_input = tf.parse_example(serialized_input, features)\n\n # Process image\n image_buffer = tf.reshape(parsed_input[image_name], shape=[])\n image = tf.image.decode_jpeg(image_buffer, channels=COLOR_CHAN)\n image = tf.image.resize_images(\n image, (IMG_HEIGHT, IMG_WIDTH), method=tf.image.ResizeMethod.BICUBIC)\n image = tf.cast(tf.expand_dims(image, 0), tf.float32) / 255.0\n\n if flags.dataset_type == 'robot':\n pose = tf.reshape(parsed_input[pose_name], shape=[flags.pose_dim])\n pose = tf.expand_dims(pose, 0)\n action = tf.reshape(parsed_input[action_name], shape=[flags.pose_dim])\n action = tf.expand_dims(action, 0)\n joint_pos = tf.reshape(\n parsed_input[joint_pos_name], shape=[flags.joint_pos_dim])\n joint_pos = tf.expand_dims(joint_pos, 0)\n else:\n pose = tf.zeros([1, flags.pose_dim])\n action = tf.zeros([1, flags.pose_dim])\n joint_pos = tf.zeros([1, flags.joint_pos_dim])\n\n if i == 0:\n image_seq = image\n action_seq, pose_seq, joint_pos_seq = action, pose, joint_pos\n else:\n image_seq = tf.concat([image_seq, image], 0)\n action_seq = tf.concat([action_seq, action], 0)\n pose_seq = tf.concat([pose_seq, pose], 0)\n joint_pos_seq = tf.concat([joint_pos_seq, joint_pos], 0)\n\n [images, actions, poses, joint_pos] = tf.train.shuffle_batch(\n [image_seq, action_seq, pose_seq, joint_pos_seq],\n batch_size,\n num_threads=4,\n capacity=200 * batch_size,\n min_after_dequeue=batch_size * 10,\n )\n\n joint_poses = tf.concat([joint_pos, poses], 2)\n\n output_features = {\n IMAGE_FEATURE_NAME: images,\n JOINT_POSE_FEATURE_NAME: joint_poses,\n ACTION_FEATURE_NAME: actions\n }\n\n return output_features, None\n\n return input_fn", "def test_the_queue_dequeue_multi_values_phase_one(the_queue):\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.enqueue(4)\n the_queue.enqueue(5)\n the_queue.dequeue()\n assert the_queue._new_dll.tail.data == 3", "def test_the_queue_dequeue(the_queue):\n the_queue.enqueue(2)\n assert the_queue.dequeue() == 2", "def _safe_dequeue(self):\n @retry(\n stop_max_attempt_number=self.max_sequential_errors,\n # Wait 2^n * 1 seconds between retries, up to 10 seconds.\n wait_exponential_multiplier=1000, wait_exponential_max=10000,\n retry_on_exception=lambda e: not isinstance(e, KeyboardInterrupt))\n def inner():\n return self.queue.dequeue()\n return inner()", "def dequeue(queue):\n item = front(queue)\n queue.front = queue.front.next\n if empty_queue(queue):\n queue.back = None\n\n queue.size = queue.size - 1\n\n return item", "def convert_to_single_tpu_predict_step(self, dequeue_fn):\n host_calls = _OutfeedHostCall(self._ctx)\n captured_scaffold_fn = _CapturedObject()\n\n def predict_step(unused_scalar_stopping_signal):\n \"\"\"Evaluation step function for use inside a while loop.\"\"\"\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n stopping_signals = inputs.signals()\n\n assert stopping_signals is not None, (\n 'Internal Error: `signals` is missing.')\n\n tpu_estimator_spec = self._call_model_fn(\n features, labels, is_export_mode=False)\n if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):\n raise RuntimeError(\n 'estimator_spec used by TPU prediction must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n to_record = {}\n identity_fn = lambda **kwargs: kwargs\n # TODO(xiejw): Adds validation for prediction dictionrary.\n # TODO(xiejw): Adds support for single tensor as predictions.\n if not isinstance(tpu_estimator_spec.predictions, dict):\n raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')\n to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]\n to_record['signals'] = [identity_fn, stopping_signals]\n if tpu_estimator_spec.host_call is not None:\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return _StopSignals.as_scalar_stopping_signal(stopping_signals)\n\n return predict_step, host_calls, captured_scaffold_fn", "def dequeue(self):\n\n temp = self.front\n self.front = self.front.next\n return temp.data", "def test_dequeue_returns_value():\n queue = Queue()\n queue.enqueue('a')\n assert queue.dequeue() is 'a'", "def dequeue(self): ##################### <-\n value = self.lst[0]\n self.lst = self.lst[1:]\n return value", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops" ]
[ "0.6000878", "0.5903571", "0.58939743", "0.58114827", "0.5764046", "0.5732872", "0.5658689", "0.5658689", "0.5578449", "0.5576985", "0.5566643", "0.5562891", "0.5546443", "0.55329204", "0.5524676", "0.5519212", "0.5513018", "0.55024093", "0.5486132", "0.54662687", "0.54569936", "0.54174584", "0.5413765", "0.53943807", "0.53309405", "0.5298198", "0.5297616", "0.5289376", "0.5284201", "0.5263972" ]
0.72722965
0
Deploys the input pipeline and record input structure.
def _invoke_input_fn_and_record_structure(self): enqueue_ops = [] infeed_queues = [] all_hooks = [] num_hosts = self._ctx.num_hosts tpu_host_placement_fn = self._ctx.tpu_host_placement_function run_infeed_loop_on_coordinator = True if self._sharded_per_core: # Per-Core input pipeline deployment. # Invoke input pipeline for each core and placed on the corresponding # host. for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): enqueue_ops_fn, captured_infeed_queue = ( generate_per_core_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder)) if _WRAP_INPUT_FN_INTO_WHILE_LOOP: run_infeed_loop_on_coordinator = False enqueue_ops.append( _wrap_computation_in_while_loop( device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) # Infeed_queue_getter must be called after enqueue_ops_fn is called. infeed_queues.append(captured_infeed_queue.get()) else: for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): if self._ctx.is_input_per_host_with_iterators(): enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = ( generate_per_host_v2_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) else: enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = ( generate_per_host_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, self._batch_axis, host_device, host_id)) all_hooks.extend(hooks) # NOTE(xiejw): We dispatch here based on the return type of the # users `input_fn`. # # 1. If input_fn returns a Dataset instance, we initialize the # iterator outside of tf.while_loop, and call the iterator.get_next # inside tf.while_loop. This should be always safe. # # 2. If input_fn returns (features, labels), it is too late to wrap # them inside tf.while_loop, as resource initialization cannot be # handled in TF control flow properly. In this case, we will use # python loop to enqueue the data into TPU system. This may be # slow compared to the previous case. if is_dataset: run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append( wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) # infeed_queue is used to generate dequeue ops. The only thing it uses for # dequeue is dtypes and types. So, any one can be used. Here, grab the # first one. self._infeed_queue = infeed_queues[0] return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def dataflow():\n print 'Building',TRAINER_NAME,'package.'\n subprocess.check_call(['python', 'setup.py', 'sdist', '--format=gztar'])\n subprocess.check_call(['gsutil', '-q', 'cp',\n os.path.join('dist', TRAINER_NAME),\n TRAINER_URI])\n opts = None\n if args.cloud:\n options = {\n 'staging_location': os.path.join(args.output_dir, 'tmp', 'staging'),\n 'temp_location': os.path.join(args.output_dir, 'tmp'),\n 'job_name': ('cloud-ml-sample-iris' + '-'\n + datetime.datetime.now().strftime('%Y%m%d%H%M%S')),\n 'project': args.project_id,\n # Dataflow needs a copy of the version of the cloud ml sdk that\n # is being used.\n 'extra_packages': [ml.sdk_location, TRAINER_URI],\n 'teardown_policy': 'TEARDOWN_ALWAYS',\n 'no_save_main_session': True\n }\n opts = beam.pipeline.PipelineOptions(flags=[], **options)\n else:\n # For local runs, the trainer must be installed as a module.\n subprocess.check_call(['pip', 'install', '--upgrade', '--force-reinstall',\n '--user', os.path.join('dist', TRAINER_NAME)])\n\n p = beam.Pipeline(get_pipeline_name(), options=opts)\n\n # Every function below writes its ouput to a file. The inputs to these\n # functions are also optional; if they are missing, the input values are read\n # from a file. Therefore if running this script multiple times, some steps can\n # be removed to prevent recomputing values.\n metadata, train_features, eval_features, predict_features = preprocess(p)\n\n trained_model, results = train(p, train_features, eval_features, metadata)\n\n evaluations = evaluate(p, trained_model, eval_features)\n\n confusion_matrix, precision_recall, logloss = (\n model_analysis(p, evaluations, metadata))\n\n if args.cloud:\n deployed = deploy_model(p, args.deploy_model_name,\n args.deploy_model_version, trained_model)\n # Use our deployed model to run a batch prediction.\n output_uri = os.path.join(args.output_dir, 'batch_prediction_results')\n deployed | \"Batch Predict\" >> ml.Predict([args.predict_data], output_uri,\n region='us-central1',\n data_format='TEXT')\n\n print 'Deploying %s version: %s' % (args.deploy_model_name,\n args.deploy_model_version)\n\n p.run()\n\n if args.cloud:\n print 'Deployed %s version: %s' % (args.deploy_model_name,\n args.deploy_model_version)", "def process_inputs(self, inputs):", "def pipeline(self):\n\n self._get_data()\n self._upload_to_raw()", "def __call__(self, inputs):\n self.inputs = inputs\n self.process_inputs()\n self.init_mesh()\n mesh_modified = self.run_tasks()\n self.write_output_mesh(mesh_modified)", "def setup(self):\n self.ctx.current_structure = self.inputs.structure", "def processInputs(self):", "def bundle_inputs(self):\n pass", "def __prepare_input_files_locally(self, job_wrapper):\n prepare_input_files_cmds = getattr(job_wrapper, 'prepare_input_files_cmds', None)\n if prepare_input_files_cmds is not None:\n for cmd in prepare_input_files_cmds: # run the commands to stage the input files\n if 0 != os.system(cmd):\n raise Exception('Error running file staging command: %s' % cmd)\n job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line", "def begin(self, pipeline: osbuild.Pipeline):", "def _stage_inputs(stage, phase):\n\n def arrayify(martian_io_field):\n \"\"\"Convert the type of a Martian input field to an array of that type.\n\n This is necessary for the join phase.\n \"\"\"\n return mro_parser.MartianIOField(\n martian_io_field.modifier,\n martian_io_field.type + '[]',\n martian_io_field.name,\n martian_io_field.help)\n\n def add_tag_to_name(martian_io_field, tag):\n return mro_parser.MartianIOField(\n martian_io_field.modifier,\n martian_io_field.type,\n martian_io_field.name + '_' + tag,\n martian_io_field.help)\n\n if phase == 'split':\n return stage.inputs\n elif phase == 'main':\n return stage.inputs + stage.splits\n elif phase == 'join':\n # The inputs to join are arrays of the split and output fields since it's pulling\n # together outputs of multiple main steps.\n # Also, \"split\" and \"output\" need to be added to the field names or there are collisions\n return stage.inputs + \\\n [add_tag_to_name(arrayify(s), \"split\") for s in stage.splits] + \\\n [add_tag_to_name(arrayify(s), \"output\") for s in stage.outputs]", "def pipeline(root):\n _ = (\n root | \"Create test files\" >> beam.Create(test_files)\n | \"Read test files\" >> beam.FlatMap(read_file)\n | \"test Shuffle\" >> beam.Reshuffle()\n | \"Preproc test docs\" >> beam.FlatMap(preproc_doc)\n | \"record test Shuffle\" >> beam.Reshuffle()\n | \"Write to test tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".\" + FLAGS.format + \".test.tfrecord\",\n num_shards=100))\n _ = (\n root | \"Create dev files\" >> beam.Create(dev_files)\n | \"Read dev files\" >> beam.FlatMap(read_file)\n | \"dev Shuffle\" >> beam.Reshuffle()\n | \"Preproc dev docs\" >> beam.FlatMap(preproc_doc)\n | \"record dev Shuffle\" >> beam.Reshuffle()\n | \"Write to dev tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".\" + FLAGS.format + \".dev.tfrecord\",\n num_shards=100))\n _ = (\n root | \"Create train files\" >> beam.Create(train_files)\n | \"Read train files\" >> beam.FlatMap(read_file)\n | \"train Shuffle\" >> beam.Reshuffle()\n | \"Preproc train docs\" >> beam.FlatMap(preproc_doc)\n | \"record train Shuffle\" >> beam.Reshuffle()\n | \"Write to train tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".\" + FLAGS.format + \".train.tfrecord\",\n num_shards=500))\n return", "def pipeline(root):\n _ = (\n root | \"Create test files\" >> beam.Create(test_files)\n | \"Read test files\" >> beam.FlatMap(read_file)\n | \"test Shuffle\" >> beam.Reshuffle()\n | \"Preproc test docs\" >> beam.FlatMap(preproc_doc)\n | \"record test Shuffle\" >> beam.Reshuffle()\n | \"Write to test tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".cc_cpc.test.tfrecord\", num_shards=50))\n _ = (\n root | \"Create train files\" >> beam.Create(train_files)\n | \"Read train files\" >> beam.FlatMap(read_file)\n | \"train Shuffle\" >> beam.Reshuffle()\n | \"Preproc train docs\" >> beam.FlatMap(preproc_doc)\n | \"record train Shuffle\" >> beam.Reshuffle()\n | \"Write to train tfrecord\" >> beam.io.WriteToTFRecord(\n FLAGS.output_file + \".cc_cpc.train.tfrecord\", num_shards=450))\n return", "def write_model_inputs(\n scenario_directory, scenario_id, subscenarios, subproblem, stage, conn\n):\n # Load in the required capacity type modules\n required_capacity_type_modules = get_required_capacity_type_modules(\n scenario_id, subscenarios, conn\n )\n imported_capacity_type_modules = load_tx_capacity_type_modules(\n required_capacity_type_modules\n )\n\n # Write module-specific inputs\n for op_m in required_capacity_type_modules:\n if hasattr(imported_capacity_type_modules[op_m], \"write_model_inputs\"):\n imported_capacity_type_modules[op_m].write_model_inputs(\n scenario_directory, scenario_id, subscenarios, subproblem, stage, conn\n )\n else:\n pass", "def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))", "def on_incoming_records(self, connection: ConnectionInterface) -> None:\n self.generate_metadata()\n\n df = connection.record_containers[0].build_dataframe()\n df[\"optional_value\"] = self.workflow_config[\"Value\"]\n\n self.output_anchor.push_records(\n generate_records_from_df(df, self.output_anchor.record_info)\n )\n\n connection.clear_records()", "def impulse_to_input(stages, pipeline_context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n for transform in list(stage.transforms):\n if transform.spec.urn == common_urns.primitives.IMPULSE.urn:\n stage.transforms.remove(transform)\n stage.transforms.append(\n beam_runner_api_pb2.PTransform(\n unique_name=transform.unique_name,\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=bundle_processor.DATA_INPUT_URN,\n payload=IMPULSE_BUFFER),\n outputs=transform.outputs))\n yield stage", "def main():\n\n args = _parse_arguments(sys.argv)\n config_path = os.path.abspath(\n os.path.join(__file__, os.pardir, 'preprocessing_config.ini'))\n config = _parse_config('CLOUD' if args.cloud else 'LOCAL',\n config_path)\n ml_project = args.project_id\n options = {'project': ml_project}\n\n if args.cloud:\n if not args.job_name:\n raise ValueError('Job name must be specified for cloud runs.')\n options.update({\n 'job_name': args.job_name,\n 'num_workers': int(config.get('num_workers')),\n 'max_num_workers': int(config.get('max_num_workers')),\n 'staging_location': os.path.join(args.job_dir, 'staging'),\n 'temp_location': os.path.join(args.job_dir, 'tmp'),\n 'region': config.get('region'),\n 'setup_file': os.path.abspath(\n os.path.join(__file__, '../..', 'dataflow_setup.py')),\n })\n pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)\n _set_logging(config.get('log_level'))\n\n with beam.Pipeline(config.get('runner'), options=pipeline_options) as p:\n preprocess.run(p, args.input_data, args.job_dir)", "def run(argv=None):\n program_conf = get_program_conf()\n\n # Test usage of utils... do something with ddl\n print(get_ddl_list('dev')[2])\n\n # Parse args\n program_args = program_conf[PROGRAM_ARGS_CONF]\n pipeline_args = program_conf[PIPELINE_ARGS_CONF]\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n\n # Get schema path key for validation\n gcs_client = storage.Client()\n etl_config_dict = get_gcs_json_as_dict(program_args[ETL_CONFIG_PATH_CONF], gcs_client)\n schema_dict = get_gcs_json_as_dict(program_args[SCHEMA_PATH_KEY], gcs_client)\n input_file_path = program_args[INPUT_CONF]\n\n with beam.Pipeline(options=pipeline_options) as pipeline:\n validated_records = (\n pipeline \n | 'read' >> ReadFromText(input_file_path)\n | 'validate' >> beam.ParDo(ValidateRecord(schema_dict, input_file_path, etl_config_dict))\n | 'filter_data' >> beam.Filter(lambda x: x[1] and x[2] and x[3])\n | 'recover_data' >> beam.Map(lambda x: x[0])\n # | beam.Map(print)\n )\n if IS_VALID_FILE:\n validated_records | 'write_success' >> WriteToText(program_args[OUTPUT_CONF]) \n else:\n validated_records | 'write_reject' >> WriteToText(program_args[REJECT_CONF])", "def Do(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> None:\n self._log_startup(input_dict, output_dict, exec_properties)\n model_push = artifact_utils.get_single_instance(\n output_dict[PUSHED_MODEL_KEY])\n\n model_push_uri = model_push.uri\n model_export = artifact_utils.get_single_instance(input_dict[MODEL_KEY])\n model_export_uri = model_export.uri\n logging.info('Model pushing.')\n # Copy the model to pushing uri.\n model_path = path_utils.serving_model_path(model_export_uri)\n model_version = path_utils.get_serving_model_version(model_export_uri)\n logging.info('Model version is %s', model_version)\n io_utils.copy_dir(model_path, os.path.join(model_push_uri, model_version))\n logging.info('Model written to %s.', model_push_uri)\n\n push_destination = pusher_pb2.PushDestination()\n json_format.Parse(exec_properties['push_destination'], push_destination)\n serving_path = os.path.join(push_destination.filesystem.base_directory,\n model_version)\n if tf.io.gfile.exists(serving_path):\n logging.info(\n 'Destination directory %s already exists, skipping current push.',\n serving_path)\n else:\n # tf.serving won't load partial model, it will retry until fully copied.\n io_utils.copy_dir(model_path, serving_path)\n logging.info('Model written to serving path %s.', serving_path)\n\n model_push.set_int_custom_property('pushed', 1)\n model_push.set_string_custom_property('pushed_model', model_export_uri)\n model_push.set_int_custom_property('pushed_model_id', model_export.id)\n logging.info('Model pushed to %s.', serving_path)", "def writeInput(self):\n\n #self.collect.writeInput()", "def _create_input_data(self):\n SCHEMA = parse_table_schema_from_json(\n '{\"fields\": [{\"name\": \"data\", \"type\": \"BYTES\"}]}')\n\n def format_record(record):\n # Since Synthetic Source returns data as a dictionary, we should skip one\n # of the part\n import base64\n return {'data': base64.b64encode(record[1])}\n\n with TestPipeline() as p:\n ( # pylint: disable=expression-not-assigned\n p\n | 'Produce rows' >> Read(\n SyntheticSource(self.parse_synthetic_source_options()))\n | 'Format' >> Map(format_record)\n | 'Write to BigQuery' >> WriteToBigQuery(\n dataset=self.input_dataset,\n table=self.input_table,\n schema=SCHEMA,\n create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,\n write_disposition=BigQueryDisposition.WRITE_EMPTY))", "def run(self):\r\n for pipe in self.inputs:\r\n for row in pipe.rows():\r\n self.put(row)", "def _buildInput (self):\n\n\t\tindata = self.config['input']\n\t\tif not isinstance (indata, dict):\n\t\t\tindata = ','.join(utils.alwaysList (indata))\t\t\t\n\t\t\tdepdchan = channel.fromChannels (*[d.channel for d in self.depends])\n\t\t\tindata = {indata: depdchan if self.depends else channel.fromArgv()}\n\t\t\t\n\t\t# expand to one key-channel pairs\n\t\tfor inkeys, invals in indata.iteritems():\n\t\t\tkeys = utils.split(inkeys, ',')\n\t\t\tif callable (invals):\n\t\t\t\tvals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())\n\t\t\t\tvals = vals.split()\n\t\t\telif isinstance (invals, basestring): # only for files: \"/a/b/*.txt, /a/c/*.txt\"\n\t\t\t\tvals = utils.split(invals, ',')\n\t\t\telif isinstance (invals, channel):\n\t\t\t\tvals = invals.split()\n\t\t\telif isinstance (invals, list):\n\t\t\t\tvals = channel.create(invals).split()\n\t\t\telse:\n\t\t\t\traise ValueError (\"%s: Unexpected values for input. Expect dict, list, str, channel, callable.\" % self._name())\n\t\t\t\n\t\t\twidth = len (vals)\n\t\t\tif len (keys) > width:\n\t\t\t\traise ValueError ('%s: Not enough data for input variables.\\nVarialbes: %s\\nData: %s' % (self._name(), keys, vals))\n\t\t\t\n\t\t\tfor i, key in enumerate(keys):\n\t\t\t\tintype = key.split(':')[-1]\n\t\t\t\tthekey = key.split(':')[0]\n\t\t\t\tval = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]\n\n\t\t\t\tif intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:\n\t\t\t\t\tintype = proc.IN_VARTYPE[0]\n\t\t\t\t\n\t\t\t\tif intype in proc.IN_FILESTYPE:\n\t\t\t\t\tfor x, v in enumerate(val):\n\t\t\t\t\t\tif isinstance (v, basestring):\n\t\t\t\t\t\t\tval[x] = channel.fromPath (v).toList()\n\t\t\t\t\n\t\t\t\tif self.length == 0: \n\t\t\t\t\tself.props['length'] = len (val)\n\t\t\t\tif self.length != len (val):\n\t\t\t\t\traise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))\n\t\t\t\tself.props['indata'][thekey] = {\n\t\t\t\t\t'type': intype,\n\t\t\t\t\t'data': val\n\t\t\t\t}\n\t\t\tself.props['jobs'] = [None] * self.length", "def flow(self):\n if not self.__input_ready():\n return\n inputs = self.__get_input_df()\n output_df = self.__call__(inputs)\n if self.clear_input:\n self.input_df = {}\n for o in self.outputs:\n o.__set_input_df(self, output_df)\n o.flow()", "def read_input():\n\n filenames = sorted(glob.glob(\"%s/openflow_input/*\" % root_dir))\n\n for filename in filenames:\n log(\"Processing struct file: \" + filename)\n ofinput = process_input_file(filename)\n\n # Populate global state\n for wire_version in ofinput.wire_versions:\n version_name = of_g.of_version_wire2name[wire_version]\n versions[version_name]['classes'].update(copy.deepcopy(ofinput.classes))\n of_g.ordered_classes[wire_version].extend(ofinput.ordered_classes)", "def main():\n try:\n name = sys.argv[1]\n asset_id = sys.argv[2]\n is_new = int(sys.argv[3]) != 0\n\n work_item = {\n 'Process-Name' : name,\n 'Asset-ID' : asset_id,\n 'Is-New' : is_new,\n }\n\n module = driver.init_module(name)\n processor = driver.init_processor(module)\n\n\n try:\n work_item.update(operations.instantiate_asset(asset_id))\n except models.Asset.DoesNotExist:\n logging.error('Asset no longer exists: %s' % asset_id)\n except S3ResponseError, error:\n if error.status == 404:\n logging.error('Could not find asset in S3: %s' % asset_id)\n else:\n logging.exception('Unexpected error!')\n raise\n else:\n new_items = handle_work_item(module, processor, work_item)\n operations.publish_work_item( *new_items )\n\n except NotReadyException, e:\n logging.info(e)\n sys.exit(1)\n\n except:\n logging.exception('Failed to run processor')\n sys.exit(1)\n\n finally:\n local_path = work_item.get('Local-Path')\n if local_path and os.path.exists(local_path):\n shutil.rmtree(os.path.dirname(local_path))", "def _prepare_for_submission(self, tempfolder, inputdict):\n input_validator = self._get_input_validator(inputdict=inputdict)\n local_input_folder = input_validator(\n name='local_input_folder', valid_types=FolderData, required=False\n )\n remote_input_folder = input_validator(\n name='remote_input_folder', valid_types=RemoteData, required=False\n )\n\n parameters = input_validator(\n name='parameters', valid_types=ParameterData\n )\n param_dict = self._get_validated_parameters_dict(parameters)\n\n projections = input_validator(\n name='projections',\n valid_types=(OrbitalData, List),\n required=False\n )\n kpoints = input_validator(name='kpoints', valid_types=KpointsData)\n kpoint_path = input_validator(\n name='kpoint_path', valid_types=ParameterData, required=False\n )\n structure = input_validator(\n name='structure', valid_types=StructureData\n )\n\n settings = input_validator(\n name='settings', valid_types=ParameterData, required=False\n )\n if settings is None:\n settings_dict = {}\n else:\n settings_dict_raw = settings.get_dict()\n settings_dict = {\n key.lower(): val\n for key, val in settings_dict_raw.items()\n }\n if len(settings_dict_raw) != len(settings_dict):\n raise InputValidationError(\n 'Input settings contain duplicate keys.'\n )\n pp_setup = settings_dict.pop('postproc_setup', False)\n if pp_setup:\n param_dict.update({'postproc_setup': True})\n\n if local_input_folder is None and remote_input_folder is None and pp_setup is False:\n raise InputValidationError(\n 'Either local_input_folder or remote_input_folder must be set.'\n )\n\n code = input_validator(name='code', valid_types=Code)\n\n ############################################################\n # End basic check on inputs\n ############################################################\n random_projections = settings_dict.pop('random_projections', False)\n\n write_win(\n filename=tempfolder.get_abs_path(self._INPUT_FILE),\n parameters=param_dict,\n structure=structure,\n kpoints=kpoints,\n kpoint_path=kpoint_path,\n projections=projections,\n random_projections=random_projections,\n )\n\n if remote_input_folder is not None:\n remote_input_folder_uuid = remote_input_folder.get_computer().uuid\n remote_input_folder_path = remote_input_folder.get_remote_path()\n\n t_dest = get_authinfo(\n computer=remote_input_folder.get_computer(),\n aiidauser=remote_input_folder.get_user()\n ).get_transport()\n with t_dest:\n remote_folder_content = t_dest.listdir(\n path=remote_input_folder_path\n )\n\n if local_input_folder is not None:\n local_folder_content = local_input_folder.get_folder_list()\n if pp_setup:\n required_files = []\n else:\n required_files = [\n self._SEEDNAME + suffix for suffix in ['.mmn', '.amn']\n ]\n optional_files = [\n self._SEEDNAME + suffix for suffix in ['.eig', '.chk', '.spn']\n ]\n input_files = required_files + optional_files\n wavefunctions_files = ['UNK*']\n\n def files_finder(file_list, exact_patterns, glob_patterns):\n result = [f for f in exact_patterns if (f in file_list)]\n import fnmatch\n for glob_p in glob_patterns:\n result += fnmatch.filter(file_list, glob_p)\n return result\n\n # Local FolderData has precedence over RemoteData\n if local_input_folder is not None:\n found_in_local = files_finder(\n local_folder_content, input_files, wavefunctions_files\n )\n else:\n found_in_local = []\n if remote_input_folder is not None:\n found_in_remote = files_finder(\n remote_folder_content, input_files, wavefunctions_files\n )\n found_in_remote = [\n f for f in found_in_remote if f not in found_in_local\n ]\n else:\n found_in_remote = []\n\n not_found = [\n f for f in required_files\n if f not in found_in_remote + found_in_local\n ]\n if len(not_found) != 0:\n raise InputValidationError(\n \"{} necessary input files were not found: {} \".format(\n len(not_found), ', '.join(str(nf) for nf in not_found)\n )\n )\n\n remote_copy_list = []\n remote_symlink_list = []\n local_copy_list = []\n #Here we enforce that everything except checkpoints are symlinked\n #because in W90 you never modify input files on the run\n ALWAYS_COPY_FILES = [self._CHK_FILE]\n for f in found_in_remote:\n file_info = (\n remote_input_folder_uuid,\n os.path.join(remote_input_folder_path, f), os.path.basename(f)\n )\n if f in ALWAYS_COPY_FILES:\n remote_copy_list.append(file_info)\n else:\n remote_symlink_list.append(file_info)\n for f in found_in_local:\n local_copy_list.append(\n (local_input_folder.get_abs_path(f), os.path.basename(f))\n )\n\n # Add any custom copy/sym links\n remote_symlink_list += settings_dict.pop(\n \"additional_remote_symlink_list\", []\n )\n remote_copy_list += settings_dict.pop(\n \"additional_remote_copy_list\", []\n )\n local_copy_list += settings_dict.pop(\"additional_local_copy_list\", [])\n\n #######################################################################\n\n calcinfo = CalcInfo()\n calcinfo.uuid = self.uuid\n calcinfo.local_copy_list = local_copy_list\n calcinfo.remote_copy_list = remote_copy_list\n calcinfo.remote_symlink_list = remote_symlink_list\n\n codeinfo = CodeInfo()\n codeinfo.code_uuid = code.uuid\n #codeinfo.withmpi = True # Current version of W90 can be run in parallel\n codeinfo.cmdline_params = [self._INPUT_FILE]\n\n calcinfo.codes_info = [codeinfo]\n calcinfo.codes_run_mode = code_run_modes.SERIAL\n\n # Retrieve files\n calcinfo.retrieve_list = []\n calcinfo.retrieve_list.append(self._OUTPUT_FILE)\n calcinfo.retrieve_list.append(self._ERROR_FILE)\n if pp_setup:\n calcinfo.retrieve_list.append(self._NNKP_FILE)\n calcinfo.retrieve_singlefile_list = [\n ('output_nnkp', 'singlefile', self._NNKP_FILE)\n ]\n\n calcinfo.retrieve_list += [\n '{}_band.dat'.format(self._SEEDNAME),\n '{}_band.kpt'.format(self._SEEDNAME)\n ]\n\n if settings_dict.pop('retrieve_hoppings', False):\n calcinfo.retrieve_list += [\n '{}_wsvec.dat'.format(self._SEEDNAME),\n '{}_hr.dat'.format(self._SEEDNAME),\n '{}_centres.xyz'.format(self._SEEDNAME),\n ]\n\n # Retrieves bands automatically, if they are calculated\n\n calcinfo.retrieve_list += settings_dict.pop(\n \"additional_retrieve_list\", []\n )\n\n # pop input keys not used here\n settings_dict.pop('seedname', None)\n if settings_dict:\n raise InputValidationError(\n \"The following keys in settings are unrecognized: {}\".format(\n settings_dict.keys()\n )\n )\n\n return calcinfo", "def process(self):\n if len(self.inputs):\n self._process_input()\n while len(self.servers) > 0:\n self._process_input()\n self._write_file()", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()" ]
[ "0.62789917", "0.5918441", "0.5827906", "0.5817481", "0.56151706", "0.5610592", "0.55843145", "0.5573068", "0.5568397", "0.5530364", "0.5478247", "0.5455659", "0.5426491", "0.54174775", "0.53775316", "0.53011864", "0.52758986", "0.52615386", "0.5242524", "0.52380687", "0.52357167", "0.5230712", "0.5228013", "0.5194446", "0.5181124", "0.51735497", "0.5166121", "0.51545674", "0.5146935", "0.514432" ]
0.59553194
1
Converts user provided model_fn` as a single train step on TPU. The user provided `model_fn` takes input tuple (features, labels) and produces the EstimatorSpec with train_op and loss for train `mode`. This usually represents a single train computation on CPU. For TPU training, a train (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input should be taken from TPU infeed rather than input pipeline (input_fn) directly. To fit TPU loop and replicate pattern, the original train computation should be reformed, which is the returned `train_step`.
def convert_to_single_tpu_train_step(self, dequeue_fn): host_call = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def train_step(loss): """Training step function for use inside a while loop.""" del loss # unused; required in function signature. inputs = dequeue_fn() features, labels = inputs.features_and_labels() estimator_spec = self._verify_estimator_spec( self._call_model_fn(features, labels)) loss, train_op = estimator_spec.loss, estimator_spec.train_op if isinstance(estimator_spec, TPUEstimatorSpec): captured_scaffold_fn.capture(estimator_spec.scaffold_fn) else: captured_scaffold_fn.capture(None) # We must run train_op to update the variables prior to running the # outfeed. with ops.control_dependencies([train_op]): host_call_outfeed_ops = [] if (isinstance(estimator_spec, TPUEstimatorSpec) and estimator_spec.host_call is not None): host_call.record({'host_call': estimator_spec.host_call}) host_call_outfeed_ops = host_call.create_enqueue_op() with ops.control_dependencies(host_call_outfeed_ops): return array_ops.identity(loss) return train_step, host_call, captured_scaffold_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_train_step, host_call, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n def multi_tpu_train_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_train_step, [_INITIAL_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_train_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_call, scaffold", "def _augment_model_fn(self, model_fn, batch_axis):\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)\n\n return _model_fn", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def _call_input_fn(self, input_fn, mode):\n input_fn_args = util.fn_args(input_fn)\n config = self.config # a deep copy.\n kwargs = {}\n if 'params' in input_fn_args:\n kwargs['params'] = self.params # a deep copy.\n else:\n raise ValueError('input_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\"batch_size\"]'.format(input_fn))\n if 'config' in input_fn_args:\n kwargs['config'] = config\n\n if 'mode' in input_fn_args:\n kwargs['mode'] = mode\n\n # Records the fact input_fn has been invoked.\n self._is_input_fn_invoked = True\n\n with self._ctx.with_mode(mode) as ctx:\n # Setting the batch size in params first. This helps user to have same\n # input_fn for use_tpu=True/False.\n batch_size_for_input_fn = ctx.batch_size_for_input_fn\n if batch_size_for_input_fn is not None:\n if isinstance(kwargs['params'], hparam.HParams):\n kwargs['params'].add_hparam(_BATCH_SIZE_KEY, batch_size_for_input_fn)\n else:\n kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn\n\n # For export_savedmodel, input_fn is never passed to Estimator. So,\n # `is_export_mode` must be False.\n if ctx.is_running_on_cpu(is_export_mode=False):\n with ops.device('/device:CPU:0'):\n return input_fn(**kwargs)\n\n # For TPU computation, input_fn should be invoked in a tf.while_loop for\n # performance. While constructing the tf.while_loop, the structure of\n # inputs returned by the `input_fn` needs to be recorded. The structure\n # includes whether features or labels is dict or single Tensor, dict keys,\n # tensor shapes, and dtypes. The recorded structure is used to create the\n # infeed dequeue ops, which must be wrapped and passed as a Fn, called\n # inside the TPU computation, as the TPU computation is wrapped inside a\n # tf.while_loop also. So, we either pass input_fn to model_fn or pass\n # dequeue_fn to model_fn. Here, `input_fn` is passed directly as\n # `features` in `model_fn` signature.\n def _input_fn():\n return input_fn(**kwargs)\n\n return _input_fn", "def run_fn(fn_args: TrainerFnArgs):\n\n # Training set size\n TRAIN_SIZE = get_dataset_size(fn_args.train_files)\n NUM_STEPS = TRAIN_SIZE / BATCH_SIZE # number of steps per epoch for which to train model\n \n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n \n train_dataset = _input_fn(fn_args.train_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n\n model = _build_keras_model(\n tf_transform_output,\n hidden_units=[HIDDEN_UNITS_1, HIDDEN_UNITS_2, HIDDEN_UNITS_3],\n learning_rate=LEARNING_RATE)\n\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n \n model.fit(\n train_dataset,\n epochs=NUM_EPOCHS, \n steps_per_epoch=NUM_STEPS,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n LABEL_COLUMN,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def train_step(input, target, model, loss_fn, optimizer, **unused):\r\n model.train()\r\n output = model(input)\r\n loss = loss_fn(output, target)\r\n optimizer.backward(loss)\r\n optimizer.step()", "def train_step(loss):\n del loss # unused; required in function signature.\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n\n estimator_spec = self._verify_estimator_spec(\n self._call_model_fn(features, labels))\n loss, train_op = estimator_spec.loss, estimator_spec.train_op\n\n if isinstance(estimator_spec, TPUEstimatorSpec):\n captured_scaffold_fn.capture(estimator_spec.scaffold_fn)\n else:\n captured_scaffold_fn.capture(None)\n\n # We must run train_op to update the variables prior to running the\n # outfeed.\n with ops.control_dependencies([train_op]):\n host_call_outfeed_ops = []\n if (isinstance(estimator_spec, TPUEstimatorSpec) and\n estimator_spec.host_call is not None):\n host_call.record({'host_call': estimator_spec.host_call})\n host_call_outfeed_ops = host_call.create_enqueue_op()\n with ops.control_dependencies(host_call_outfeed_ops):\n return array_ops.identity(loss)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec", "def create_predict_step(model_fn = gin.REQUIRED):\n def predict_step_v2(variables, batch, rng):\n features, _ = batch if isinstance(batch, tuple) else (batch, {})\n rng, _ = jax.random.split(rng)\n pred_model_fn = model_fn(mode=ExecutionMode.EVAL)\n model_outputs = pred_model_fn.apply(\n variables,\n **features,\n mutable=False,\n _do_remap=True,\n rngs=generate_rng_dict(rng))\n return model_outputs\n\n return predict_step_v2", "def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def run_from_generator(\n model, input_func=None, input_func_dict=None,\n eval_func_dict=None, nb_epochs=10, optimizer=None, model_dir=None):\n\n # 1. Create optimizer and compile model if optimizer is None\n if (optimizer is None):\n optimizer = tf.keras.optimizers.SGD(\n lr=1e-3, decay=1e-5, momentum=0.9, nesterov=True)\n\n # 2. compile the model\n model.compile(\n optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # 3. create estimator\n dir_path = os.path.join(os.getcwd(), model_dir)\n print(\"Model path chosen : \", dir_path)\n if (not os.path.exists(dir_path)):\n os.mkdir(dir_path)\n\n print(\"Creating estimator...\")\n est = tf.keras.estimator.model_to_estimator(\n keras_model=model, model_dir=dir_path)\n\n # 4. Train and Evaluate the model\n print(\"Training...\")\n\n # training spec\n train_spec = tf.estimator.TrainSpec(input_fn=lambda: input_func(input_func_dict),\n max_steps=500)\n\n # evaluation spec\n eval_spec = tf.estimator.EvalSpec(input_fn=lambda: input_func(eval_func_dict))\n\n # Run the training\n model_est = tf.estimator.train_and_evaluate(est, train_spec, eval_spec)\n #est.train(input_fn=lambda: input_func(input_func_dict),\n # steps=None)\n #\n #est.evalute(input_fn=lambda: input_func(eval_func_dict))\n\n return est", "def _call_model_fn(self, features, labels, is_export_mode=False):\n model_fn_args = util.fn_args(self._model_fn)\n kwargs = {}\n\n # Makes deep copy with `config` and params` in case user mutates them.\n config = copy.deepcopy(self._config)\n params = copy.deepcopy(self._params)\n\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n elif labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = self._ctx.mode\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n\n if 'params' not in model_fn_args:\n raise ValueError('model_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\\'batch_size\\']'.format(self._model_fn))\n\n if is_export_mode:\n batch_size_for_model_fn = None\n else:\n batch_size_for_model_fn = self._ctx.batch_size_for_model_fn\n\n if batch_size_for_model_fn is not None:\n if isinstance(params, hparam.HParams):\n params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)\n else:\n params[_BATCH_SIZE_KEY] = batch_size_for_model_fn\n\n estimator_spec = self._model_fn(features=features, **kwargs)\n if (self._ctx.is_running_on_cpu(is_export_mode) and\n isinstance(estimator_spec, TPUEstimatorSpec)):\n # The estimator_spec will be passed to `Estimator` directly, which expects\n # type `EstimatorSpec`.\n return estimator_spec.as_estimator_spec()\n else:\n return estimator_spec", "def model_fn(features, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n scaffold_fn = None\n initialized_variable_names = []\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs,\n \"label_ids\": label_ids,\n },\n scaffold_fn=scaffold_fn)\n\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def model_fn(self, features, labels, mode, params):\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n output_shape = labels.get_shape().as_list()[1:] # labels are true images in this case\n reconstructions = self.forward_pass(features, output_shape, is_training=is_training)\n per_sample_loss = losses.make_reconstruction_loss(labels, reconstructions)\n reconstruction_loss = tf.reduce_mean(per_sample_loss)\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = optimizers.make_decoder_optimizer()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n train_op = optimizer.minimize(\n loss=reconstruction_loss, global_step=tf.train.get_global_step())\n train_op = tf.group([train_op, update_ops])\n tf.summary.scalar(\"reconstruction_loss\", reconstruction_loss)\n\n logging_hook = tf.train.LoggingTensorHook({\n \"reconstruction_loss\": reconstruction_loss,\n },\n every_n_iter=100)\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=reconstruction_loss,\n train_op=train_op,\n training_hooks=[logging_hook])\n elif mode == tf.estimator.ModeKeys.EVAL:\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=reconstruction_loss,\n eval_metrics=(make_metric_fn(\"reconstruction_loss\"), \n [reconstruction_loss]))\n else:\n raise NotImplementedError(\"Eval mode not supported.\")", "def model_fn(features, labels, mode): # pylint: disable=unused-argument\n # You can also use the Flatten layer if you want to test a model without any\n # weights.\n layer = tf.layers.Dense(1, use_bias=True)\n logits = tf.reduce_mean(layer(tf.cast(features[\"input_ids\"], tf.float32)))/1000\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\"logits\": logits}\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n def loss_fn():\n y = tf.reshape(logits, []) - tf.constant(1.)\n return y * y\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn())\n\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n global_step = tf.train.get_global_step()\n train_op = optimizer.minimize(loss_fn(), global_step=global_step)\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn(), train_op=train_op)", "def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)", "def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits\n ))\n\n def metric_fn(labels, logits):\n labels = tf.cast(labels, tf.int64)\n return {\n \"recall@1\": tf.metrics.recall_at_k(labels, logits, 1),\n \"recall@5\": tf.metrics.recall_at_k(labels, logits, 5)\n }\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n elif mode == tf_estimator.ModeKeys.TRAIN:\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=params[\"learning_rate\"], beta1=params[\"beta1\"],\n beta2=params[\"beta2\"], epsilon=params[\"epsilon\"])\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n ))\n\n train_op = optimizer.minimize(loss, tf.train.get_global_step())\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n\n else:\n raise NotImplementedError\n return _model_fn", "def _create_train_algorithm(\n model_fn: Callable[[], tff.learning.Model]\n) -> tff.learning.templates.LearningProcess:\n client_optimizer = optimizer_flag_utils.create_optimizer_from_flags('client')\n server_optimizer = optimizer_flag_utils.create_optimizer_from_flags('server')\n # Need to set `no_nan_division=True` to avoid NaNs in the learned model, which\n # can happen when a model is not selected by any client in a round.\n model_aggregator = tff.aggregators.MeanFactory(no_nan_division=True)\n initial_model_weights_list = None\n if _PATH_TO_INITIAL_MODEL_WEIGHTS_LIST.value is not None:\n initial_model_weights_list = []\n for path_to_saved_model in _PATH_TO_INITIAL_MODEL_WEIGHTS_LIST.value:\n saved_keras_model = tf.keras.models.load_model(path_to_saved_model)\n initial_model_weights_list.append(\n tff.learning.ModelWeights.from_model(saved_keras_model))\n return hypcluster_train.build_hypcluster_train(\n model_fn=model_fn,\n num_clusters=_NUM_CLUSTERS.value,\n client_optimizer=client_optimizer,\n server_optimizer=server_optimizer,\n model_aggregator=model_aggregator,\n initial_model_weights_list=initial_model_weights_list)", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n print(f\"Parameters {fn_args}\")\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n batch_size=fn_args.train_batches)\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n batch_size=fn_args.eval_batches)\n\n # mirrored_strategy = tf.distribute.MirroredStrategy()\n # with mirrored_strategy.scope():\n model = encoder_decoder_model.build_keras_model(\n timesteps=fn_args.timesteps,\n number_features=fn_args.number_features,\n outer_units=fn_args.outer_units,\n inner_units=fn_args.inner_units)\n\n steps_per_epoch = fn_args.training_example_count / fn_args.train_batches\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard()\n\n model.fit(\n train_dataset,\n epochs=int(fn_args.train_steps / steps_per_epoch),\n steps_per_epoch=steps_per_epoch,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default': _get_serve_tf_examples_fn(\n model, tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n\n model.save(\n fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def get_model_train_step_function(model, optimizer, vars_to_fine_tune):\n\n # Use tf.function for a bit of speed.\n # Comment out the tf.function decorator if you want the inside of the\n # function to run eagerly.\n @tf.function\n def train_step_fn(image_tensors,\n groundtruth_boxes_list,\n groundtruth_classes_list):\n \"\"\"A single training iteration.\n\n Args:\n image_tensors: A list of [1, height, width, 3] Tensor of type tf.float32.\n Note that the height and width can vary across images, as they are\n reshaped within this function to be 640x640.\n groundtruth_boxes_list: A list of Tensors of shape [N_i, 4] with type\n tf.float32 representing groundtruth boxes for each image in the batch.\n groundtruth_classes_list: A list of Tensors of shape [N_i, num_classes]\n with type tf.float32 representing groundtruth boxes for each image in\n the batch.\n\n Returns:\n A scalar tensor representing the total loss for the input batch.\n \"\"\"\n shapes = tf.constant(batch_size * [[640, 640, 3]], dtype=tf.int32)\n model.provide_groundtruth(\n groundtruth_boxes_list=groundtruth_boxes_list,\n groundtruth_classes_list=groundtruth_classes_list)\n with tf.GradientTape() as tape:\n preprocessed_images = tf.concat(\n [detection_model.preprocess(image_tensor)[0]\n for image_tensor in image_tensors], axis=0)\n prediction_dict = model.predict(preprocessed_images, shapes)\n losses_dict = model.loss(prediction_dict, shapes)\n total_loss = losses_dict['Loss/localization_loss'] + losses_dict['Loss/classification_loss']\n gradients = tape.gradient(total_loss, vars_to_fine_tune)\n optimizer.apply_gradients(zip(gradients, vars_to_fine_tune))\n return total_loss\n\n return train_step_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def model_fn(features, labels, mode, params):\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec", "def tf_train_flow(train_once_fn, \n model_dir=None,\n log_dir=None, \n max_models_keep=1, \n save_interval_seconds=600, \n save_interval_steps=1000, \n num_epochs=None,\n num_steps=None, \n save_model=True,\n save_interval_epochs=None, \n freeze_graph=False,\n num_steps_per_epoch=0,\n restore_from_latest=True,\n metric_eval_fn=None,\n valid_interval_epochs=0,\n first_interval_epoch=-1.,\n inference_fn=None, \n inference_interval_epochs=0,\n init_fn=None,\n restore_fn=None,\n restore_include=None,\n restore_exclude=None,\n save_all_scope=False, # TODO save load from restore scope only but svae all\n variables_to_restore=None,\n variables_to_save=None, # by default will be the same as variables_to_restore\n output_collection_names=None, \n output_node_names=None,\n learning_rate=None, # not use yet, just use in train_once\n learning_rate_patience=None,\n learning_rate_decay_factor=None,\n write_during_train=True,\n model=None,\n callbacks=[],\n sess=None):\n use_horovod = 'OMPI_COMM_WORLD_RANK' in os.environ\n if use_horovod:\n if FLAGS.torch:\n import horovod.torch as hvd\n else:\n import horovod.tensorflow as hvd\n rank = 0 \n if use_horovod:\n rank = hvd.rank()\n\n model_dir_ = model_dir\n if rank != 0:\n model_dir = None\n\n if not FLAGS.metric_eval:\n metric_eval_fn = None\n\n if FLAGS.ps_strategy and FLAGS.round == 0:\n server = gezi.get('server')\n sess = tf.compat.v1.train.MonitoredTrainingSession(master=server.target,\n is_chief=(FLAGS.task_index == 0),\n #checkpoint_dir=FLAGS.model_dir\n )\n sess.graph._unsafe_unfinalize()\n save_model = False\n \n if sess is None:\n #TODO melt.get_session is global session but may cause non close at last\n sess = melt.get_session()\n\n if FLAGS.use_tpu:\n sess.run(tpu.initialize_system())\n\n if model_dir:\n if model:\n # here have not saved optimizer checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\n checkpoint = tf.train.Checkpoint(model=model)\n ckpt_dir = model_dir + '/ckpt'\n checkpoint_prefix = os.path.join(ckpt_dir, 'ckpt')\n \n assert tf.__version__ < '2', 'tf 2+ not support slim, TODO remove slim dependency'\n ## be caefull slim.get_variables_to_restore include and exlcude means top scope actually you need to pass wide_deep/deep/doc_emb instead of only pass doc_emb\n if not variables_to_restore:\n # variables_to_restore = slim.get_variables_to_restore(include=restore_include, exclude=restore_exclude)\n variables_to_restore = slim.get_variables_to_restore(include=None, exclude=None)\n \n # logging.debug('restore_include', restore_include, 'restore_exclude', restore_exclude)\n # logging.debug('variables_to_restore', variables_to_restore)\n\n # TODO need to set variables to save otherwise will save dataset and fail FIXME\n if not variables_to_save:\n variables_to_save = variables_to_restore\n if save_all_scope:\n variables_to_save = None\n \n #load all var in checkpoint try to save all var(might more then original checkpoint) if not specifiy variables_to_save\n varnames_in_checkpoint = melt.get_checkpoint_varnames(model_dir)\n #logging.info('varnames_in_checkpoint: {}'.format(varnames_in_checkpoint))\n \n # TODO has someproblem say tf.Variable 'r_net/text_encoder/cudnn_rnn/cu_dnngru/recurrent_kernel/adam_v:0' even though in checkpoint I have renated it as ignore/rnet\n # TODO tf2 graph mode but could not use slim \n variables_to_restore_from_model = slim.get_variables_to_restore(include=varnames_in_checkpoint)\n #logging.info('variables_to_restore_from_model: {}'.format(variables_to_restore_from_model))\n if not variables_to_restore:\n variables_to_restore = variables_to_restore_from_model\n else:\n variables_to_restore = [v for v in variables_to_restore if v in variables_to_restore_from_model]\n\n # TODO add regex patter exlucde include\n if restore_exclude:\n def _exclude_ok(name, restore_exclude):\n for excl in restore_exclude:\n if excl in name:\n return False\n return True\n variables_to_restore = [v for v in variables_to_restore if _exclude_ok(v.name, restore_exclude)]\n if restore_include:\n def _include_ok(name, restore_include):\n for incl in restore_include:\n if incl in name:\n return True\n return False\n variables_to_restore = [v for v in variables_to_restore if _include_ok(v.name, restore_include)]\n \n #--tf 1.6 adadelta will have same vars... \n variables_to_restore = list(set(variables_to_restore))\n #logging.info('variables_to_restore', variables_to_restore[:100])\n logging.debug('variables_to_restore(not show Optimize):\\n', '\\n'.join([f'{x}' for x in variables_to_restore if not 'OptimizeLoss' in x.name][:100]))\n\n global_step = tf.compat.v1.train.get_or_create_global_step()\n\n loader = tf.compat.v1.train.Saver(var_list=variables_to_restore) \n\n logging.debug('max models to keep {}, keep every {} hours'.format(max_models_keep, save_interval_seconds / 3600.0))\n saver = tf.compat.v1.train.Saver(\n max_to_keep=max_models_keep, \n keep_checkpoint_every_n_hours=save_interval_seconds / 3600.0,\n var_list=variables_to_save) \n epoch_saver = tf.compat.v1.train.Saver(var_list=variables_to_save, max_to_keep=max_models_keep)\n best_epoch_saver = tf.compat.v1.train.Saver(var_list=variables_to_save) \n dist_saver = tf.compat.v1.train.Saver(var_list=variables_to_save, sharded=True, max_to_keep=1)\n\n init_op = tf.group(tf.compat.v1.global_variables_initializer(), #variables_initializer(global_variables())\n tf.compat.v1.local_variables_initializer()) #variables_initializer(local_variables())\n\n timer = gezi.Timer('sess run init_op in melt.tf_train_flow')\n #model.save('./weights')\n # notice \n \n init_iters(sess)\n if FLAGS.round == 0:\n sess.run(init_op)\n timer.print_elapsed()\n\n if model is not None and FLAGS.round == 0:\n if hasattr(model, 'init'):\n model.init()\n if init_fn:\n try:\n init_fn(model)\n except Exception:\n pass\n if hasattr(model, 'restore'):\n model.restore() \n if restore_fn:\n try:\n restore_fn(model)\n except Exception:\n pass\n \n print_fn = logging.info if FLAGS.round == 0 and FLAGS.work_mode == 'train' else logging.debug\n melt.print_model(model, print_fn=print_fn, depth=FLAGS.print_depth)\n\n #pre_step means the step last saved, train without pretrained,then -1\n pre_step = -1\n fixed_pre_step = -1 #fixed pre step is for epoch num to be correct if you change batch size\n #print(model_dir)\n pre_epoch = None\n del_model_path = None\n if model_dir:\n # TODO refactor\n model_path = _get_model_path(model_dir, save_model)\n\n model_dir = gezi.get_dir(model_dir) #incase you pass ./model/model-ckpt1000 -> ./model\n\n # for train_loop only load model from round 0\n if model_path is not None:\n if not restore_from_latest:\n logging.info('using recent but not latest model')\n model_path = melt.recent_checkpoint(model_dir)\n model_name = os.path.basename(model_path)\n \n if FLAGS.model_path:\n model_path = FLAGS.model_path\n if FLAGS.work_mode == 'train':\n FLAGS.del_model_path = model_path\n\n if FLAGS.round == 0:\n timer = gezi.Timer('Loading from existing model [%s]' % model_path, print_fn=logging.info)\n if restore_fn is not None:\n restore_fn(sess)\n loader.restore(sess, model_path)\n timer.print()\n\n # pre_step = melt.get_model_step(model_path) - 1 if FLAGS.global_step is None else FLAGS.global_step -1\n pre_step = sess.run(tf.compat.v1.train.get_global_step()) - 1 if FLAGS.global_step is None else -1\n pre_epoch = melt.get_model_epoch(model_path) if FLAGS.global_epoch is None else FLAGS.global_epoch\n fixed_pre_step = pre_step\n del_model_path = model_path\n else:\n latest_checkpoint = None\n if not use_horovod: # now will hang\n try:\n latest_checkpoint = tf.train.latest_checkpoint(ckpt_dir)\n if FLAGS.model_path:\n latest_checkpoint = FLAGS.model_path\n if latest_checkpoint:\n if FLAGS.round == 0:\n logging.info('Try start from eager trained mode, latest checkpoint:', latest_checkpoint)\n checkpoint.restore(latest_checkpoint).run_restore_ops(session=sess)\n\n pre_epoch = int(latest_checkpoint.split('-')[-1])\n\n pre_step = sess.run(tf.compat.v1.train.get_global_step()) - 1\n fixed_pre_step = pre_step\n logging.info('Start step is:', pre_step)\n except Exception:\n logging.info('Something wrong with restore from eager trained model')\n if latest_checkpoint is None:\n logging.info('Train all start step 0')\n \n if FLAGS.round == 0:\n if init_fn is not None:\n init_fn(sess)\n del_model_path = latest_checkpoint\n\n if FLAGS.local_rank != 0:\n model_dir = None\n del_model_path = None\n save_model = False\n\n try:\n learning_rate = tf.compat.v1.get_collection('learning_rate')[-1]\n learning_rate_weight = tf.compat.v1.get_collection('learning_rate_weight')[-1]\n sess.run(tf.compat.v1.assign(learning_rate, learning_rate * learning_rate_weight))\n except Exception:\n # if not using weight_decay but using optimizer decay then will go here as learning rate is a tensor can not assign\n pass\n \n try:\n logging.info('Actual start global step:', sess.run(global_step), 'learning rate:', sess.run(learning_rate), 'learning_rate_weight:', sess.run(learning_rate_weight))\n except Exception:\n pass\n \n if FLAGS.work_mode == 'train' and FLAGS.metric_eval and FLAGS.monitor_l2:\n # l2 consuming 0.75 s\n total_params = sess.run(tf.reduce_sum(input_tensor=[tf.reduce_prod(input_tensor=v.shape) for v in tf.compat.v1.trainable_variables()]))\n l2 = sess.run(tf.add_n([tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables()])) / total_params\n # total_params = 1\n # l2 = 0.\n logging.debug('Model total training parameters is:', total_params, 'with initial l2:', l2)\n FLAGS.l2_ = l2\n FLAGS.params_ = total_params\n\n if FLAGS.round == 0:\n if use_horovod:\n bcast = hvd.broadcast_global_variables(0)\n sess.run(bcast) \n\n if model_dir_:\n #if save_interval_epochs and num_steps_per_epoch and num_steps >= 0:\n epoch_dir = os.path.join(model_dir_, 'epoch')\n if rank == 0:\n gezi.try_mkdir(epoch_dir)\n checkpoint_path = os.path.join(model_dir_, 'model.ckpt')\n\n #tf.train.write_graph(sess.graph_def, model_dir, 'train.pbtxt')\n only_one_step = False\n \n if use_horovod:\n comm = gezi.get_global('dist').comm\n ## TODO FIXME why bcast here not work ? simple test work see tests/bcast.py\n #comm.bcast(pre_step, root=0)\n temp = np.array([pre_step, fixed_pre_step])\n comm.Bcast(temp, root=0)\n pre_step, fixed_pre_step = temp[0], temp[1]\n\n step = start = pre_step + 1\n fixed_step = fixed_pre_step + 1 \n\n #hack just for save one model after load\n if num_steps < 0 or (num_steps and num_steps < step):\n logging.info('just load and resave then exit, -1 means save model and pb, -2 means only save model, -3 means only save pb')\n model_path_ = _get_checkpoint_path(checkpoint_path, step, num_steps_per_epoch, epoch=pre_epoch)\n if num_steps != -3:\n saver.save(sess, model_path_, global_step=step + 1)\n # if freeze_graph:\n # melt.freeze_graph(sess, model_path_, step + 1, output_collection_names, output_node_names)\n if num_steps != -2:\n melt.freeze_graph(sess, os.path.join(model_dir, 'model'), None, output_collection_names, output_node_names)\n sess.close()\n exit(0)\n \n if num_epochs < 0:\n only_one_step = True\n logging.info('just run one step')\n\n _try_eval(model_dir_, log_dir, metric_eval_fn, inference_fn)\n\n #early_stop = True #TODO allow config\n num_bad_epochs = 0\n pre_epoch_eval_loss = 1e20\n best_epoch_eval_loss = 1e20\n num_allowed_bad_epochs = 4 #allow 5 non decrease eval loss epochs before stop\n epoch_saved_step = 0\n num_epochs = num_epochs if num_epochs else 1024\n\n for callback in callbacks:\n if hasattr(callback, 'set_model'):\n callback.set_model(model)\n\n #-------------------------------main loop\n timer_, FLAGS.total_time, FLAGS.train_time, FLAGS.valid_time = gezi.Timer(reset=False), None, None, None\n try:\n # num_epochs + 1 safe, since we need one more step to do final evaluation, and we can escape for loop, when step all done\n start_epoch = FLAGS.start_epoch or int(step / num_steps_per_epoch) if not FLAGS.train_loop else 0\n if FLAGS.round > 0:\n step = 0\n fixed_step = 0\n if not FLAGS.train_loop and (num_epochs > int(num_epochs)):\n end_epoch = start_epoch + int(num_epochs) + 1\n else:\n end_epoch = start_epoch + int(num_epochs)\n # epoch单独一个loop train 内部loop 这样 train和 async eval 的 valid loop的屏幕打印能分两行显示不干扰\n for epoch in tqdm(range(start_epoch, end_epoch), desc='Training', ascii=True):\n logging.debug('------------------------epoch:', epoch)\n for callback in callbacks:\n if hasattr(callback, 'on_epoch_begin'):\n kwargs = {}\n if 'lr' in inspect.getargspec(callback.on_epoch_begin).args:\n kwargs['lr'] = learning_rate\n callback.on_epoch_begin(epoch, **kwargs)\n\n\n train_hour = FLAGS.train_hour if FLAGS.loop_train else None\n desc = 'Epoch:%2d/%d' % (epoch + 1, int(num_epochs)) if not train_hour else '%s-%d/%d Epoch:%2d/%d' % (train_hour, FLAGS.round + 1, FLAGS.num_rounds, epoch + 1, int(num_epochs))\n t = tqdm(range(num_steps_per_epoch), total=num_steps_per_epoch, desc=desc, ascii=True)\n for i in t:\n gstep = sess.run(global_step)\n step = int(gstep)\n # if i % 10 == 0:\n # print(step, i, FLAGS.task_index)\n if step >= num_steps_per_epoch * (epoch + 1):\n break\n \n postfix = {}\n if gezi.get('loss'):\n postfix['loss'] = gezi.get('loss')\n if gezi.get('valid_loss'):\n postfix['val_loss'] = gezi.get('valid_loss')\n t.set_postfix(postfix)\n model_step_path = None\n if model_dir_:\n model_path_ = os.path.join(epoch_dir,'model.ckpt-%.2f'%((fixed_step + 1) / float(num_steps_per_epoch)))\n model_step_path_ = model_path_ + '-' + str(step + 1)\n if (write_during_train and metric_eval_fn is not None and valid_interval_epochs > 0 \\\n and (fixed_step + 1) % int(num_steps_per_epoch * valid_interval_epochs) == 0 \\\n or first_interval_epoch > 0 \\\n and (fixed_step + 1) == int(num_steps_per_epoch * first_interval_epoch)):\n # and (fixed_step == int(num_steps_per_epoch * first_interval_epoch)) or \\\n # fixed_step == int(num_steps_per_epoch * (1 + first_interval_epoch))):\n model_step_path = model_step_path_\n else:\n model_step_path = None\n\n # if step == 0:\n # model_step_path = None\n\n for callback in callbacks:\n if hasattr(callback, 'on_batch_begin'):\n kwargs = {}\n if 'lr' in inspect.getargspec(callback.on_batch_begin).args:\n kwargs['lr'] = learning_rate\n callback.on_batch_begin(step, **kwargs)\n\n #print('--------------------step', step)\n stop = train_once_fn(sess, \n step, \n is_start=(step==start), \n fixed_step=fixed_step,\n num_epochs=num_epochs,\n model_path=model_step_path,\n use_horovod=use_horovod,\n valid_interval_epochs=valid_interval_epochs,\n timer_=timer_,\n ## TODO FIXME this line will cause tensorflow.python.framework.errors_impl.NotFoundError: Resource localhost/save_counter/N10tensorflow3VarE does not exist. \n )\n\n if only_one_step:\n stop = True\n\n step += 1\n fixed_step += 1\n\n for callback in callbacks:\n if hasattr(callback, 'on_batch_end'):\n kwargs = {}\n if 'lr' in inspect.getargspec(callback.on_batch_end).args:\n kwargs['lr'] = learning_rate\n callback.on_batch_end(step, **kwargs)\n\n # Already inited in melt.apps.train\n #if step == 1 and model is not None and hasattr(model, 'init_predict'):\n # model.init_predict()\n\n if save_model and step and model_dir:\n #step 0 is also saved! actually train one step and save\n is_step_save = step % save_interval_steps == 0\n is_epoch_save = FLAGS.save_interval_epochs and FLAGS.save_interval_epochs > 0 \\\n and save_interval_steps and num_steps_per_epoch and fixed_step % int(num_steps_per_epoch * save_interval_epochs) == 0 \\\n and not (num_steps_per_epoch * num_epochs - step < int(num_steps_per_epoch * save_interval_epochs))\n\n is_step_save = is_step_save or is_epoch_save \n\n if is_step_save:\n model_path_ = _get_checkpoint_path(checkpoint_path, fixed_step, num_steps_per_epoch)\n timer = gezi.Timer('save model step %d to %s'%(step, checkpoint_path), False)\n if rank == 0:\n saver.save(sess, model_path_, global_step=step)\n if freeze_graph:\n melt.freeze_graph(sess, model_path_, step, output_collection_names, output_node_names)\n \n # if FLAGS.local_mark in log_dir and FLAGS.sync_hdfs and (rank == 0):\n # command = f\"rsync -a --update --exclude 'model*' --exclude 'ckpt*' %s %s &\" % (log_dir, os.path.dirname(FLAGS.ori_log_dir))\n # gezi.system(command)\n \n timer.print_elapsed()\n \n if is_epoch_save:\n epoch_saved_step = step\n if rank == 0:\n if FLAGS.async_valid and FLAGS.valid_input:\n FLAGS.total_time = (time.time() - gezi.get_global('start_time')) / 60\n _async_valid()\n #logging.info(timer.elapsed())\n timer.print_elapsed()\n \n if freeze_graph:\n melt.freeze_graph(sess, model_path_, step, output_collection_names, output_node_names)\n\n # TODO FIXME if add keras save below wil hang, might due to rank 0 save so slower then others(conflict with evaluate)\n # if not use evaluate just train + save ok... sitll not find reason...\n # Add comm.barrier below but still might hang, though not hang on first time, so not save using horovod\n # [1,0]<stderr>:Stalled ranks:\n # [1,0]<stderr>:0: [HorovodAllreduce_Const_9_0]\n # seems ok move it after freeze_graph\n if model and not use_horovod and FLAGS.save_eager_ckpt:\n #if model: \n #model.save_weights(epoch_dir + '/ckpt-%.2f' % (fixed_step / float(num_steps_per_epoch)))\n # TODO FIXME if restart will save from 1... again..\n timer = gezi.Timer('keras epoch save to {}'.format(checkpoint_prefix))\n checkpoint.save(checkpoint_prefix, session=sess)\n #print(sess.run(checkpoint.save_counter))\n #logging.info(timer.elapsed())\n timer.print_elapsed()\n\n # if write_during_train:\n # if inference_fn is not None and inference_interval_epochs and fixed_step % int(num_steps_per_epoch * inference_interval_epochs) == 0:\n # model_path_ = os.path.join(epoch_dir,'model.ckpt-%.2f' % (fixed_step / float(num_steps_per_epoch)))\n # model_step_path = model_path_ + '-' + str(step)\n # try:\n # inference_fn(model_path=model_step_path)\n # except Exception:\n # logging.warning(traceback.format_exc()) \n \n if stop is True:\n print('Early stop running %d stpes'%(step), file=sys.stderr)\n raise tf.errors.OutOfRangeError(None, None,'Early stop running %d stpes'%(step))\n if num_steps and (step + 1) == start + num_steps:\n raise tf.errors.OutOfRangeError(None, None,'Reached max num steps')\n #max_num_epochs = 1000\n max_num_epochs = num_epochs\n #if max_num_epochs and num_steps_per_epoch and fixed_step // num_steps_per_epoch >= max_num_epochs:\n if max_num_epochs and num_steps_per_epoch and fixed_step / num_steps_per_epoch > max_num_epochs:\n raise tf.errors.OutOfRangeError(None, None,'Reached max num epochs of %d'%max_num_epochs)\n\n # TODO might change learning rate here ?\n for callback in callbacks: \n if hasattr(callback, 'on_epoch_end'):\n kwargs = {}\n if 'lr' in inspect.getargspec(callback.on_epoch_end).args:\n kwargs['lr'] = learning_rate\n callback.on_epoch_end(epoch, **kwargs)\n if FLAGS.ps_strategy and FLAGS.local_rank == 0:\n model_path_ = _get_checkpoint_path(checkpoint_path, step, num_steps_per_epoch)\n # if you want to evaluate at last just set valid_interval_epochs=1 or 0.5 0.25 0.2 0.1\n dist_saver.save(get_session(sess), model_path_, global_step=step)\n raise tf.errors.OutOfRangeError(None, None, 'Reached max num epochs of %d' % max_num_epochs)\n except tf.errors.OutOfRangeError:\n if rank == 0:\n melt.inc_total_step(int(num_steps_per_epoch * num_epochs))\n if (step - epoch_saved_step > 1) and not (step==start) and save_model and step % save_interval_steps != 0 and model_dir:\n model_path_ = _get_checkpoint_path(checkpoint_path, step, num_steps_per_epoch)\n # if you want to evaluate at last just set valid_interval_epochs=1 or 0.5 0.25 0.2 0.1\n saver.save(sess, model_path_, global_step=step)\n if FLAGS.async_valid and FLAGS.valid_input and FLAGS.ev_last:\n FLAGS.total_time = (time.time() - gezi.get_global('start_time')) / 60\n _async_valid()\n if not use_horovod and FLAGS.save_eager_ckpt:\n checkpoint_prefix = os.path.join(model_dir, 'ckpt', 'ckpt.final')\n checkpoint.save(checkpoint_prefix, session=sess)\n\n #if freeze_graph:\n if save_model and FLAGS.freeze_graph_final:\n if (FLAGS.round == FLAGS.num_rounds - 1):\n melt.freeze_graph(sess, os.path.join(model_dir, 'model'), None, output_collection_names, output_node_names)\n \n # hack for hvd we store the last keras checkpoint\n if use_horovod and hvd.rank() == 0 and FLAGS.save_eager_ckpt:\n checkpoint_prefix = os.path.join(model_dir, 'ckpt', 'ckpt.final')\n checkpoint.save(checkpoint_prefix, session=sess)\n \n if only_one_step:\n # TODO strange logging.info will not show to screen if using horovod\n logging.info('Done one step')\n exit(0)\n \n if (num_epochs and fixed_step / num_steps_per_epoch >= num_epochs) or (num_steps and step == start + num_steps) :\n logging.info('Done training for %.3f epochs, %d steps.' % (fixed_step / num_steps_per_epoch, step))\n #FIXME becase coord.join seems not work, RuntimeError: Coordinator stopped with threads still running: Thread-9\n # exit(0)\n else:\n logging.info('Should not stop, but stopped at epoch: %.3f'%(fixed_step / num_steps_per_epoch))\n logging.info(traceback.format_exc())\n #raise e\n\n FLAGS.total_time = (time.time() - gezi.get_global('start_time')) / 60\n logging.info(f'Round:{FLAGS.round} Train:{FLAGS.train_hour} Valid:{FLAGS.valid_hour}', 'TotalTime:{:.1f}m'.format(FLAGS.total_time))\n _on_epoch_end(model_dir, log_dir, save_model, del_model_path)\n\n if inference_fn is not None:\n inference_fn(FLAGS.model_dir)\n\n if not FLAGS.train_loop:\n if FLAGS.use_tpu:\n sess.run(tpu.shutdown_system())\n sess.close()\n else:\n logging.info(f'Done for {FLAGS.train_input} round:{FLAGS.round}')", "def train_step(model, features, labels):\n with tf.GradientTape() as tape:\n predictions = model(features, training=True)\n loss = loss_func(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss.update_state(loss)\n train_metric.update_state(labels, predictions)", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn" ]
[ "0.72253805", "0.68108726", "0.6774271", "0.6719611", "0.65392244", "0.649606", "0.64581513", "0.64381677", "0.6420198", "0.6401497", "0.6346824", "0.6328738", "0.6324738", "0.6315999", "0.6287701", "0.6285973", "0.62623984", "0.6247418", "0.61984706", "0.61869395", "0.61794764", "0.6178008", "0.61779636", "0.61779505", "0.6138724", "0.61334664", "0.61189675", "0.61052275", "0.6079146", "0.60706097" ]
0.6929875
1
Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.
def validate(host_calls): for name, host_call in host_calls.items(): if not isinstance(host_call, (tuple, list)): raise ValueError('{} should be tuple or list'.format(name)) if len(host_call) != 2: raise ValueError('{} should have two elements.'.format(name)) if not callable(host_call[0]): raise TypeError('{}[0] should be callable.'.format(name)) if not isinstance(host_call[1], (tuple, list, dict)): raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) if isinstance(host_call[1], (tuple, list)): fullargspec = tf_inspect.getfullargspec(host_call[0]) fn_args = util.fn_args(host_call[0]) # wrapped_hostcall_with_global_step uses varargs, so we allow that. if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): raise RuntimeError( 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' 'method args of the function, which takes {}.'.format( name, len(host_call[1]), len(fn_args)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)", "def validate(self):\n errors = []\n app = errors.append\n\n if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:\n app(\"self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied\")\n\n if self.omp_threads > self.hw.cores_per_node:\n app(\"omp_threads > hw.cores_per_node\")\n\n if self.mem_per_proc > self.hw.mem_per_node:\n app(\"mem_mb >= self.hw.mem_per_node\")\n\n if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:\n app(\"self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied\")\n\n if self.priority <= 0:\n app(\"priority must be > 0\")\n\n if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):\n app(\"1 <= min_cores <= hardware num_cores >= hint_cores not satisfied\")\n\n if errors:\n raise self.Error(str(self) + \"\\n\".join(errors))", "def check_all_user_inputs_valid(self):\n self.check_RNN_layers_valid()\n self.check_activations_valid()\n self.check_embedding_dimensions_valid()\n self.check_initialiser_valid()\n self.check_y_range_values_valid()\n self.check_return_final_seq_only_valid()", "def _data_params_validation(self) -> None:\n extra_regressor_names = set(self.params._reqd_regressor_names)\n # univariate case\n if self.data.is_univariate():\n if len(extra_regressor_names) != 0:\n msg = (\n f\"Missing data for extra regressors: {self.params._reqd_regressor_names}! \"\n \"Please include the missing regressors in `data`.\"\n )\n raise ValueError(msg)\n # multivariate case\n else:\n value_cols = set(self.data.value.columns)\n if \"y\" not in value_cols:\n msg = \"`data` should contain a column called `y` representing the responsive value.\"\n raise ValueError(msg)\n if not extra_regressor_names.issubset(value_cols):\n msg = f\"`data` should contain all columns listed in {extra_regressor_names}.\"\n raise ValueError(msg)\n # validate cap\n if (self.params.cap is True) and (\"cap\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `cap` representing the cap when `cap = True`.\"\n _error_msg(msg)\n # validate floor\n if (self.params.floor is True) and (\"floor\" not in self.data.value.columns):\n msg = \"`data` should contain a column called `floor` representing the floor when `floor = True`.\"\n _error_msg(msg)", "def testValid(self):\n validate = timing_util.ValidateMeasurementsFlag\n self.assertIs(validate([]), True)\n self.assertIs(validate(['none']), True)\n self.assertIs(validate(['end_to_end_runtime']), True)\n self.assertIs(validate(['runtimes']), True)\n self.assertIs(validate(['timestamps']), True)\n self.assertIs(validate(['end_to_end_runtime', 'runtimes']), True)\n self.assertIs(validate(['end_to_end_runtime', 'timestamps']), True)\n self.assertIs(validate(['runtimes', 'timestamps']), True)\n self.assertIs(\n validate(['end_to_end_runtime', 'runtimes', 'timestamps']), True)", "def validate_parameters(self):\n #################### metrics_params/metrics ####################\n if (self.metrics is not None) and (\"metrics\" in self.metrics_params.keys()):\n raise ValueError(\n \"`metrics` may be provided as a kwarg, or as a `metrics_params` key, but NOT BOTH. Received: \"\n + f\"\\n `metrics`={self.metrics}\\n `metrics_params`={self.metrics_params}\"\n )\n else:\n _metrics_alias = \"metrics\"\n if self.metrics is None:\n try:\n self.metrics = self.metrics_params[\"metrics\"]\n except KeyError:\n self.metrics = self.metrics_params[\"metrics_map\"]\n _metrics_alias = \"metrics_map\"\n self.metrics = format_metrics(self.metrics)\n self.metrics_params = {**{_metrics_alias: self.metrics}, **self.metrics_params}", "def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()", "def validate(self, inputs, labels):\n # Set the phase to test.\n tf.keras.backend.set_learning_phase(0)\n accuracy = self.sess.run([self.accuracy_eval],\n feed_dict={\n self.inputs_eval: inputs,\n self.labels_eval: labels\n })\n costs = self.sess.run(self.cost_eval,\n feed_dict={\n self.inputs_eval: inputs,\n self.labels_eval: labels\n })\n return accuracy, costs", "def validate(*tf_records):\n if FLAGS.use_tpu:\n def _input_fn(params):\n return preprocessing.get_tpu_input_tensors(\n params['train_batch_size'], params['input_layout'], tf_records,\n filter_amount=1.0)\n else:\n def _input_fn():\n return preprocessing.get_input_tensors(\n FLAGS.train_batch_size, FLAGS.input_layout, tf_records,\n filter_amount=1.0, shuffle_examples=False)\n\n steps = FLAGS.examples_to_validate // FLAGS.train_batch_size\n if FLAGS.use_tpu:\n steps //= FLAGS.num_tpu_cores\n\n estimator = dual_net.get_estimator()\n with utils.logged_timer(\"Validating\"):\n estimator.evaluate(_input_fn, steps=steps, name=FLAGS.validate_name)", "def validate(self):\n with torch.no_grad():\n val_loss, val_acc = self.run_epoch(self.validate_dataloader, train=False)\n self.log_performance(self.summary_writer,\n {'loss': val_loss, 'acc': val_acc},\n self.epoch,\n self.total_steps,\n summary_group='validate')\n return val_loss, val_acc", "def validate(self):\n X_orig = make_X_from_features(self._conf)\n train_sz = len(load_array(self._conf, 'task.dataset.id_train'))\n X = X_orig[:train_sz, :]\n y = load_array(self._conf, 'task.dataset.y_train')\n y = y.reshape(y.size)\n\n cv_method_name = self._conf['task']['params']['validation']['class']\n cv_params_name = self._conf['task']['params']['validation'].get(\n 'params', {})\n cv_params_name = _to_str_value(cv_params_name)\n\n cv_method = dynamic_load(cv_method_name)\n mean_cv_score = cv_method(X, y, self, **cv_params_name)\n\n task_metrics = self._conf['task']['params']['metrics']\n task_method = task_metrics['method']\n\n ume.db.add_validation_score(\n os.path.basename(self._jn),\n ume.__version__,\n task_method,\n mean_cv_score)", "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def _validate(\n self,\n point: tensorboard_data.TimeSeriesDataPoint,\n event: tf.compat.v1.Event,\n value: tf.compat.v1.Summary.Value,\n ):\n return True", "def _check_evaluate_implementation(self) -> None:\n logging.debug(f\"Evaluate_batch_defined: {self._evaluate_batch_defined()}.\")\n logging.debug(f\"Evaluate full dataset defined: {self._evaluate_full_dataset_defined()}.\")\n check.not_eq(\n self._evaluate_batch_defined(),\n self._evaluate_full_dataset_defined(),\n \"Please define exactly one of: `evaluate_batch()` or `evaluate_full_dataset()`. \"\n \"For most use cases `evaluate_batch()` is recommended is recommended because \"\n \"it can be parallelized across all devices.\",\n )", "def check_test_case_validity(test_case_dataset):\n for i, test_case in enumerate(test_case_dataset):\n assert \"NAME\" in test_case, f\"Test case #{i} Invalid NAME\"\n\n assert (\n \"N_STATES\" in test_case\n and isinstance(test_case[\"N_STATES\"], int)\n and 0 < test_case[\"N_STATES\"] <= 64\n ), f\"Test case #{i} Invalid N_STATES\"\n\n assert (\n \"N_SYMBOLS\" in test_case\n and isinstance(test_case[\"N_SYMBOLS\"], int)\n and 0 < test_case[\"N_SYMBOLS\"] <= 64\n ), f\"Test case #{i} Invalid N_SYMBOLS\"\n\n assert (\n \"PLAYER_INPUT_SIZES\" in test_case\n and isinstance(test_case[\"PLAYER_INPUT_SIZES\"], list)\n and len(test_case[\"PLAYER_INPUT_SIZES\"]) > 1\n and all(\n (isinstance(x, int) and x > 0) for x in test_case[\"PLAYER_INPUT_SIZES\"]\n )\n ), f\"Test case #{i} Invalid PLAYER_INPUT_SIZES\"\n\n assert \"REPETITIONS\" not in test_case or (\n isinstance(test_case[\"REPETITIONS\"], int) and 0 < test_case[\"REPETITIONS\"]\n ), f\"Test case #{i} Invalid REPETITIONS\"\n\n assert \"DEBUG\" not in test_case or isinstance(\n test_case[\"DEBUG\"], bool\n ), f\"Test case #{i} Invalid DEBUG\"\n\n assert \"VIRTUAL_MACHINE\" not in test_case or (\n isinstance(test_case[\"VIRTUAL_MACHINE\"], str)\n and test_case[\"VIRTUAL_MACHINE\"] in [\"./spdz2k-party.x\", \"./semi2k-party.x\"]\n ), f\"Test case #{i} Invalid VIRTUAL_MACHINE\"\n\n if \"PLAYER_DATA\" in test_case:\n assert isinstance(\n test_case[\"PLAYER_DATA\"], list\n ), f\"Test case #{i} Invalid PLAYER_DATA - Not a list\"\n for j, size in enumerate(test_case[\"PLAYER_INPUT_SIZES\"]):\n player_data = test_case[\"PLAYER_DATA\"][j]\n max_value = test_case[\"N_SYMBOLS\"]\n assert (\n isinstance(player_data, list)\n and len(player_data) == size\n and all(\n (isinstance(x, int) and 0 <= x <= max_value)\n for x in player_data\n )\n ), f\"Test case #{i} Invalid PLAYER_DATA - User {j} inputs are invalid\"", "def _validate_tpu_training_graph():\n operations = ops.get_default_graph().get_operations()\n\n # Check if there is atleast one CrossReplicaSum operation in the graph\n # This should be introduced by using the CrossShardOptimizer wrapper\n cross_replica_sum_ops = [\n o for o in operations if o.type == _CROSS_REPLICA_SUM_OP\n ]\n if not cross_replica_sum_ops:\n raise ValueError(\n 'CrossShardOptimizer must be used for model training on TPUs.')", "def validate(net, val_data, ctx, eval_metric, args):\n clipper = gcv.nn.bbox.BBoxClipToImage()\n eval_metric.reset()\n if not args.disable_hybridization:\n # input format is differnet than training, thus rehybridization is needed.\n net.hybridize(static_alloc=args.static_alloc)\n for i, batch in enumerate(val_data):\n batch = split_and_load(batch, ctx_list=ctx)\n det_bboxes = []\n det_ids = []\n det_scores = []\n gt_bboxes = []\n gt_ids = []\n gt_difficults = []\n for x, y, im_scale in zip(*batch):\n # get prediction results\n ids, scores, bboxes = net(x)\n det_ids.append(ids)\n det_scores.append(scores)\n # clip to image size\n det_bboxes.append(clipper(bboxes, x))\n # rescale to original resolution\n im_scale = im_scale.reshape((-1)).asscalar()\n det_bboxes[-1] *= im_scale\n # split ground truths\n gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))\n gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))\n gt_bboxes[-1] *= im_scale\n gt_difficults.append(\n y.slice_axis(axis=-1, begin=5, end=6)\n if y.shape[-1] > 5\n else None\n )\n\n # update metric\n for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(\n det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults\n ):\n eval_metric.update(\n det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff\n )\n return eval_metric.get()", "def validate_kitti(model, args, eval_loader, group, iters=24):\n model.eval()\n epe_list = torch.zeros(2).cuda(device=args.gpu)\n out_list = torch.zeros(2).cuda(device=args.gpu)\n\n for val_id, batch in enumerate(tqdm(eval_loader)):\n image1, image2, flow_gt, valid_gt = batch\n\n image1 = Variable(image1, requires_grad=True)\n image1 = image1.cuda(args.gpu, non_blocking=True)\n\n image2 = Variable(image2, requires_grad=True)\n image2 = image2.cuda(args.gpu, non_blocking=True)\n\n flow_gt = Variable(flow_gt, requires_grad=True)\n flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)\n flow_gt = flow_gt[0]\n\n valid_gt = Variable(valid_gt, requires_grad=True)\n valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)\n valid_gt = valid_gt[0]\n\n padder = InputPadder(image1.shape, mode='kitti')\n image1, image2 = padder.pad(image1, image2)\n\n flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)\n flow = padder.unpad(flow_pr[0])\n\n epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()\n mag = torch.sum(flow_gt**2, dim=0).sqrt()\n\n epe = epe.view(-1)\n mag = mag.view(-1)\n val = valid_gt.view(-1) >= 0.5\n\n out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()\n\n epe_list[0] += epe[val].mean().item()\n epe_list[1] += 1\n\n out_list[0] += out[val].sum()\n out_list[1] += torch.sum(val)\n\n if args.distributed:\n dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)\n dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)\n\n if args.gpu == 0:\n epe = epe_list[0] / epe_list[1]\n f1 = 100 * out_list[0] / out_list[1]\n\n print(\"Validation KITTI: %f, %f\" % (epe, f1))\n return {'kitti-epe': float(epe.detach().cpu().numpy()), 'kitti-f1': float(f1.detach().cpu().numpy())}\n else:\n return None", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def test_metrics_server(self):\n validate_metrics_server()", "def __validate__(self):\n if self.train:\n assert self.random is not None", "def validate_inputs(self):\n self.ctx.inputs = AttributeDict(self.exposed_inputs(FleurCalculation))\n\n self.ctx.max_queue_nodes = self.inputs.add_comp_para['max_queue_nodes']\n self.ctx.max_queue_wallclock_sec = self.inputs.add_comp_para['max_queue_wallclock_sec']\n\n input_options = self.inputs.options.get_dict()\n self.ctx.optimize_resources = input_options.pop('optimize_resources', True)\n self.ctx.inputs.metadata.options = input_options\n\n if 'description' in self.inputs:\n self.ctx.inputs.metadata.description = self.inputs.description\n else:\n self.ctx.inputs.metadata.description = ''\n if 'label' in self.inputs:\n self.ctx.inputs.metadata.label = self.inputs.label\n else:\n self.ctx.inputs.metadata.label = ''\n\n if not self.ctx.optimize_resources:\n self.ctx.can_be_optimised = False # set this for handlers to not change resources\n return\n\n resources_input = self.ctx.inputs.metadata.options['resources']\n try:\n self.ctx.num_machines = int(resources_input['num_machines'])\n self.ctx.num_mpiprocs_per_machine = int(resources_input['num_mpiprocs_per_machine'])\n except KeyError:\n self.ctx.can_be_optimised = False\n self.report('WARNING: Computation resources were not optimised.')\n else:\n try:\n self.ctx.num_cores_per_mpiproc = int(resources_input['num_cores_per_mpiproc'])\n self.ctx.use_omp = True\n self.ctx.suggest_mpi_omp_ratio = self.ctx.num_mpiprocs_per_machine / self.ctx.num_cores_per_mpiproc\n except KeyError:\n self.ctx.num_cores_per_mpiproc = 1\n self.ctx.use_omp = False\n self.ctx.suggest_mpi_omp_ratio = 1\n\n status = self.check_kpts()\n if status is None:\n self.ctx.can_be_optimised = True\n else:\n self.report('ERROR: Not optimal computational resources.')\n return status", "def _validate_training_process(self, sess, epoch):\n logger.info('Epoch %d: validating training process ...' % epoch)\n \n if self.val_cpu_only:\n logger.warn('The option \\'val_cpu_only\\' is enabled, but not ' + \\\n 'supported by this class. Option will be ignored.')\n\n val_handle = sess.run(self._val_iter.string_handle())\n sess.run(self._val_iter.initializer,\n feed_dict={self._t_val_raw_in: self._val_batch[0],\n self._t_val_raw_out: self._val_batch[1],\n self._t_val_batch_size: self._val_batch[0].shape[0]})\n\n mi_estimate, mi_real, summary = sess.run( \\\n [self._t_mi, self._t_real_mi, self._t_summaries],\n feed_dict={self._t_handle: val_handle,\n self._t_mi_known: True,})\n\n logger.info('Real MI: %f' % mi_real)\n logger.info('Estimated MI on validation batch: %f' % mi_estimate)\n\n self._val_summary_writer.add_summary(summary, epoch)\n self._val_summary_writer.flush()\n\n logger.info('Epoch %d: validating training process ... Done' % epoch)", "def do_checks(self):\n # ## get valid experiment variables\n all_subexperiments = [1, 2, 3]\n all_plates = list(range(1, 19))\n all_cell_ids = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n all_samples = list(self.experiment.design['Sample'])\n all_genes = self.experiment.subexperiments[1].plates[1].samples[all_samples[0]].genes\n all_replicates = list(range(1, 7))\n all_time = [0.5, 1.0, 2.0, 3.0, 4.0, 8.0, 12.0, 24.0, 48.0, 72.0, 96.0]\n\n if self.time is None:\n if self.treatment is 'Baseline':\n self.time = [0.0, 96.0]\n else:\n self.time = all_time\n\n if self.cell_id is None:\n self.cell_id = all_cell_ids\n\n if self.gene is None:\n self.gene = all_genes\n\n if self.replicate is None:\n self.replicate = all_replicates\n\n if self.treatment is None:\n raise ValueError('treatment cannot be None. Specify one of \"TGFb\", \"Control\", \"Baseline\"')\n\n if not isinstance(self.treatment, str):\n raise ValueError('treatment must be a string. Got \"{}\" a \"{}\"'.format(\n self.treatment, type(self.treatment)\n ))\n\n if not isinstance(self.normed, bool):\n raise ValueError('normed argument should be boolean. Got \"{}\"'.format(\n type(self.normed)\n ))\n\n if not isinstance(self.time, list):\n self.time = [self.time]\n\n for time_point in self.time:\n if time_point not in sorted(list(set(self.data.columns.get_level_values(1)))):\n raise ValueError('\"{}\" is invalid time point. Valid time '\n 'points are: {}'.format(\n time_point, list(self.data.columns))\n )", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def validate(self, epn, num_samples_to_test = 1000):\n batch_size = epn.batch_size\n dataloader = torch.utils.data.DataLoader(dataset = self, batch_size = batch_size, shuffle=True)\n num_samples_evaluated = 0\n num_correct = 0\n for batch_idx, (x_data, y_target) in enumerate(dataloader):\n epn.randomize_initial_state(batch_size = batch_size)\n epn.set_x_state(x_data)\n s = epn.evolve_to_equilbrium(y_target = None, beta = 0)\n compared = s[:,epn.iy].argmax(dim = 1) == y_target[:].argmax(dim = 1)\n num_samples_evaluated += batch_size\n num_correct += torch.sum(compared)\n if num_samples_evaluated > num_samples_to_test:\n break\n error = (1-num_correct.item()/num_samples_evaluated)\n return error", "def validate(self):\n stats = {}\n evaluate_config = {\"verbose\": self.verbose}\n evaluate_config.update(self.config.get(\"evaluate_config\", {}))\n\n results = self.model.evaluate(self.test_dataset, **evaluate_config)\n if results is None:\n # Using local Model since model.evaluate() returns None\n # for MultiWorkerMirroredStrategy\n logger.warning(\"Running a local model to get validation score.\")\n self.local_model = self.model_creator(self.config)\n self.local_model.set_weights(self.model.get_weights())\n results = self.local_model.evaluate(self.test_dataset,\n **evaluate_config)\n\n if isinstance(results, list):\n stats = {\n \"validation_\" + k: v\n for k, v in zip(self.model.metrics_names, results)\n }\n else:\n stats = {\"loss\": results}\n\n return stats", "def _validate_toolbox(tb):\n assert hasattr(tb, 'select'), \"The toolbox must have a selection operator 'select'.\"\n\n # check if all operators in 'tb.pbs' are registered\n for op in tb.pbs:\n assert op.startswith('mut') or op.startswith('cx'), \"Mutation operator must begin with 'mut' and \" \\\n \"crossover with 'cx' except selection.\"\n assert hasattr(tb, op), \"The operator '{}' is not registered in the toolbox, but a probability \" \\\n \"value is specified.\".format(op)\n\n # check if all the mutation and crossover operators have their probabilities assigned in 'tb.pbs'\n for op in [attr for attr in dir(tb) if attr.startswith('mut') or attr.startswith('cx')]:\n if op not in tb.pbs:\n warnings.warn('Operator {0} has no probability value assigned. By default, the probability is '\n 'zero and the operator {0} will not be applied.'.format(op), category=UserWarning)", "def validate_dataset(self):\n pass" ]
[ "0.646635", "0.6258575", "0.62169003", "0.6163073", "0.6159874", "0.6156967", "0.6031898", "0.60262036", "0.60199577", "0.6002835", "0.59988576", "0.59860957", "0.5957013", "0.5948449", "0.59424144", "0.59343827", "0.59333295", "0.59328943", "0.5898939", "0.5875248", "0.58541554", "0.5849492", "0.58381534", "0.58227354", "0.579154", "0.5779303", "0.5752314", "0.57428795", "0.5732462", "0.5732206" ]
0.72171295
0
Records the host_call structure.
def record(self, host_calls): for name, host_call in host_calls.items(): host_fn, tensor_list_or_dict = host_call self._names.append(name) self._host_fns[name] = host_fn if isinstance(tensor_list_or_dict, dict): for (key, tensor) in six.iteritems(tensor_list_or_dict): self._tensor_keys[name].append(key) self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) else: # List or tuple. self._tensor_keys[name] = None for tensor in tensor_list_or_dict: self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_callbacks(host, typ):\n \n ip = host.replace(\"-\", \".\")\n src = typ\n call_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\n if os.path.isfile(\"/tmp/cc/calls.log\"): # write callback to calls log\n with open(\"/tmp/cc/calls.log\", 'a') as f:\n s = \"{0:<25} {1:<16} {2:<10}\\n\".format(call_time, ip, src)\n f.write(s)\n else:\n with open(\"/tmp/cc/calls.log\", 'w') as f:\n s = \"{0:<25} {1:<16} {2:<10}\\n\".format(call_time, ip, src)\n f.write(s)\n\n com_file = \"/tmp/cc/hosts/\" + ip # read commands from the appropriate file\n if os.path.isfile(com_file):\n with open(com_file, 'r') as f:\n c = f.read()\n os.remove(com_file)\n return c + \"\\n\"\n else:\n return \"#lmao\\n\"", "def record(method, arguments, result):\n recorder[call_to_key(method, arguments)] = result", "def record(self, port_name, t_start=None):", "def _handle_HostEvent (self, event):\n self.host_alive.append(event.entry) \n print type(event.entry).__name__", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def create_cpu_hostcall(host_calls):\n\n _OutfeedHostCall.validate(host_calls)\n ret = {}\n for name, host_call in host_calls.items():\n host_fn, tensors = host_call\n if isinstance(tensors, (tuple, list)):\n ret[name] = host_fn(*tensors)\n else:\n # Must be dict.\n try:\n ret[name] = host_fn(**tensors)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n return ret", "def add_call(self, func, path):\n if path not in self.called_at:\n self.called_at[path] = {func}\n else:\n self.called_at[path].add(func)", "def add(self, host, flow):\n if host not in list(self.__locusts__.keys()):\n self.__locusts__[host] = self.__locust_code(flow)\n else:\n tmp = self.__locusts__[host][:-100]\n tmp += self.__locust_task(flow)\n tmp += '\\n'\n tmp += self.__locusts__[host][-100:]\n self.__locusts__[host] = tmp\n return", "def record(self):\n # TODO: record the data", "def report(self, host, **kwargs):\n kwargs.update({'host': host})\n self.dbreport('host', kwargs)", "def append(self, hostinfo: HostInfo) -> None:\n\n self.hostinfo_list.append(hostinfo)", "def update(call: ServiceCall) -> None:\n called_host = call.data[ATTR_HOST]\n if called_host in hass.data[DOMAIN]:\n hass.data[DOMAIN][called_host].update()\n else:\n for iperf3_host in hass.data[DOMAIN].values():\n iperf3_host.update()", "def test_call_log(self):\n\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.INCOMMING_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time() * 1000))\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.INCOMMING_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - 4 * CALL_LOG_TIME_OFFSET_IN_MSEC)\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.OUTGOING_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - CALL_LOG_TIME_OFFSET_IN_MSEC)\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.MISSED_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - 2 * CALL_LOG_TIME_OFFSET_IN_MSEC)\n bt_contacts_utils.add_call_log(\n self.pse, bt_contacts_utils.MISSED_CALL_TYPE,\n bt_contacts_utils.generate_random_phone_number().phone_number,\n int(time.time()) * 1000 - 2 * CALL_LOG_TIME_OFFSET_IN_MSEC)\n\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse.droid.bluetoothGetLocalAddress())\n self.pce.droid.bluetoothPbapClientDisconnect(\n self.pse2.droid.bluetoothGetLocalAddress())\n\n bt_test_utils.connect_pri_to_sec(\n self.pce, self.pse,\n set([BtEnum.BluetoothProfile.PBAP_CLIENT.value]))\n pse_call_log_count = self.pse.droid.callLogGetCount()\n self.log.info(\"Waiting for {} call logs to be transfered\".format(\n pse_call_log_count))\n bt_contacts_utils.wait_for_call_log_update_complete(self.pce,\n pse_call_log_count)\n\n if not bt_contacts_utils.get_and_compare_call_logs(\n self.pse, self.pce, bt_contacts_utils.INCOMMING_CALL_TYPE):\n return False\n if not bt_contacts_utils.get_and_compare_call_logs(\n self.pse, self.pce, bt_contacts_utils.OUTGOING_CALL_TYPE):\n return False\n if not bt_contacts_utils.get_and_compare_call_logs(\n self.pse, self.pce, bt_contacts_utils.MISSED_CALL_TYPE):\n return False\n\n return True", "def _add_to_history(self,\n argnames, argvals,\n varargs,\n explicit_kwargs, defaulted_kwargs, implicit_kwargs,\n retval,\n elapsed_secs, process_secs,\n timestamp_secs,\n prefixed_func_name,\n caller_chain\n ):\n # Convert timestamp_secs to datetime\n timestamp = datetime.datetime.fromtimestamp(timestamp_secs).\\\n strftime('%x %X.%f') # or '%Y-%m-%d %I:%M:%S.%f %p'\n\n ## 0.2.3+ len(argnames) == len(argvals)\n ## assert len(argnames) == len(argvals)\n # n = min(len(argnames), len(argvals))\n # argnames = argnames[:n]\n # argvals = argvals[:n]\n\n self._call_history.append(\n CallRecord(\n self._num_calls_logged,\n argnames, argvals,\n varargs,\n explicit_kwargs, defaulted_kwargs, implicit_kwargs,\n retval,\n elapsed_secs, process_secs,\n timestamp,\n prefixed_func_name=prefixed_func_name,\n caller_chain=caller_chain)\n )", "def __call__(self, this):\n if self.logging is True:\n self.trace += '{Now} {Host} {Proc} {Event}\\n'.format(\n Now=time.strftime('%c', time.localtime()),\n Host=node(),\n Proc=self.tag,\n Event=this,\n )", "def apply_call(self, call_hash: str) -> \"Handle\":\n return self.__handle__.apply_call(self, call_hash)", "def record_ports(self, xrep_port, pub_port, req_port, hb_port):\n self._recorded_ports = {\n 'xrep_port' : xrep_port,\n 'pub_port' : pub_port,\n 'req_port' : req_port,\n 'hb_port' : hb_port\n }", "def add(self, datarecord):\n if datarecord[\"linetype\"] == \"print_ccInstance\":\n instanceId = datarecord[\"instanceId\"]\n ownerId = datarecord[\"ownerId\"]\n timestamp = datarecord[\"ts\"]\n status = datarecord[\"state\"].lower()\n t = datarecord[\"date\"]\n\n id = instanceId + \" \" + ownerId + \" \" + str(timestamp)\n\n instance = self.data\n\n try:\n current = instance[id]\n # if were wereto do a data base this line needs to be replaced\n except:\n current = datarecord\n\n try:\n current[\"t_end\"]\n except:\n # if not (\"t_end\" in current):\n # time in the future\n f = self.in_the_future\n\n current[\"trace\"] = {\n \"pending\": {\"start\": f, \"stop\": t},\n \"teardown\": {\"start\": f, \"stop\": t},\n \"extant\": {\"start\": f, \"stop\": t}\n }\n current[\"t_end\"] = current[\"date\"]\n current[\"t_start\"] = current[\"ts\"] # for naming consitency\n current[\"duration\"] = 0.0\n\n current[\"t_end\"] = max(current[\"t_end\"], t)\n current[\"trace\"][status][\"start\"] = min(\n current[\"trace\"][status][\"start\"], t)\n current[\"trace\"][status][\"stop\"] = max(\n current[\"trace\"][status][\"stop\"], t)\n\n instance[id] = current", "def append(self, callb):\n self.callbacks.append(callb)\n self.monitor = self.monitor | callb.bit\n notify_begin(self.handle, self.monitor)", "def append(self, signature, fields=(), response=None):\n if signature == RUN:\n if self._supports_statement_reuse:\n statement = fields[0]\n if statement.upper() not in (\"BEGIN\", \"COMMIT\", \"ROLLBACK\"):\n if statement == self._last_run_statement:\n fields = (\"\",) + fields[1:]\n else:\n self._last_run_statement = statement\n log_debug(\"C: RUN %r\", fields)\n elif signature == PULL_ALL:\n log_debug(\"C: PULL_ALL %r\", fields)\n elif signature == DISCARD_ALL:\n log_debug(\"C: DISCARD_ALL %r\", fields)\n elif signature == RESET:\n log_debug(\"C: RESET %r\", fields)\n elif signature == ACK_FAILURE:\n log_debug(\"C: ACK_FAILURE %r\", fields)\n elif signature == INIT:\n log_debug(\"C: INIT (%r, {...})\", fields[0])\n else:\n raise ValueError(\"Unknown message signature\")\n self.packer.pack_struct(signature, fields)\n self.output_buffer.chunk()\n self.output_buffer.chunk()\n self.responses.append(response)", "def callee(calls):\n calls.append(1)", "def CALL_addr(self, addr):\n\t\tself.stack[self.SP] = self.IP\n\t\tself.SP += 1\n\t\tself.IP = addr", "def usage(self, host):", "def save_user_defined_calls(*args):\n return _ida_hexrays.save_user_defined_calls(*args)", "def push(host):\n dispatcher = Dispatch(host)\n\n post(host)\n\n context = zmq.Context()\n zmq_socket = context.socket(zmq.PUSH)\n zmq_socket.bind('tcp://127.0.0.1:5560')\n\n for record in dispatcher:\n zmq_socket.send_pyobj((int(time.time()),record.raw))", "def add(self, host, **kwargs):\n if host in self.hosts_:\n raise ValueError(\"Host %s: exists (use update).\" % host)\n self.hosts_.add(host)\n self.lines_.append(ConfigLine(line=\"\", host=None))\n self.lines_.append(ConfigLine(line=\"Host %s\" % host, host=host, key=\"Host\", value=host))\n for k, v in kwargs.items():\n if type(v) not in [list, tuple]:\n v = [v]\n mapped_k = _remap_key(k)\n for value in v:\n new_line = self._new_line(mapped_k, value)\n self.lines_.append(ConfigLine(line=new_line, host=host, key=mapped_k, value=value))\n self.lines_.append(ConfigLine(line=\"\", host=None))", "def record(*args, **kwargs):\n LOG.info(\"args={}, kwargs={}\".format(args, kwargs))", "def show_hosts():\n host_str = \"\"\n data = parse(\"/tmp/cc/calls.log\")\n for ip in data:\n ln = \"{}: {}\".format(ip, data[ip]) + \"\\n\"\n host_str += ln\n return host_str", "def put_record(self, obj):\r\n for output in self.outputs:\r\n output.put_record(obj)", "def _update_call(self):\n for entry in self._entry_nodes:\n self._update_call_visit(entry.get_func_first_node(), {})" ]
[ "0.5483742", "0.5457527", "0.5410701", "0.52416027", "0.5203142", "0.517999", "0.50630707", "0.5054913", "0.5040014", "0.5017073", "0.5015763", "0.5003606", "0.499811", "0.4976404", "0.4963154", "0.49531364", "0.49517584", "0.4951451", "0.49355006", "0.4930302", "0.49170408", "0.4890773", "0.48342973", "0.48020828", "0.47822925", "0.47817737", "0.47749308", "0.47730726", "0.47694013", "0.4767938" ]
0.7327148
0
Create the op to enqueue the recorded host_calls.
def create_enqueue_op(self): if not self._names: return [] tensors = [] # TODO(jhseu): Consider deduping tensors. for name in self._names: tensors.extend(self._tensors[name]) with ops.device(tpu.core(0)): return [tpu_ops.outfeed_enqueue_tuple(tensors)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record(self, host_calls):\n\n for name, host_call in host_calls.items():\n host_fn, tensor_list_or_dict = host_call\n self._names.append(name)\n self._host_fns[name] = host_fn\n\n if isinstance(tensor_list_or_dict, dict):\n for (key, tensor) in six.iteritems(tensor_list_or_dict):\n self._tensor_keys[name].append(key)\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)\n else:\n # List or tuple.\n self._tensor_keys[name] = None\n for tensor in tensor_list_or_dict:\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)", "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue", "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def generate_per_host_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):\n captured_infeed_queue = _CapturedObject()\n\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n if batch_axis is not None:\n raise TypeError('For mode PREDICT, batch_axis is not supported yet.')\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n hooks.append(inputs.dataset_initializer_hook())\n\n # TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the\n # _TPUContext.tpu_ordinal_function. We should either introduce another\n # abstraction or a different helper method.\n def _tpu_ordinal_function_impl(shard_index_in_host):\n # We put both enqueue/dequeue op at tpu.core(0) in each replica.\n replica = ctx.device_assignment.lookup_replicas(\n host_id, (0, 0, 0))[shard_index_in_host]\n return ctx.device_assignment.tpu_ordinal(replica=replica)\n\n if ctx.model_parallelism_enabled:\n tpu_ordinal_function = _tpu_ordinal_function_impl\n else:\n tpu_ordinal_function = None\n\n def enqueue_ops_fn():\n with ops.device(device):\n num_of_replicas_per_host = ctx.num_of_replicas_per_host\n # Convert user input to features and labels. If the user returns a\n # dataset, it is initialized and the features and labels extracted via\n # `dataset.iterator.get_next()`\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels, signals)\n unsharded_tensor_list = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n\n infeed_queue = tpu_feed.InfeedQueue(\n tuple_types=[t.dtype for t in unsharded_tensor_list],\n tuple_shapes=[t.shape for t in unsharded_tensor_list],\n shard_dimensions=batch_axis)\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_number_of_shards(num_of_replicas_per_host)\n per_host_enqueue_ops = (\n infeed_queue.split_inputs_and_generate_enqueue_ops(\n unsharded_tensor_list,\n placement_function=lambda x: device,\n tpu_ordinal_function=tpu_ordinal_function))\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def generate_per_host_v2_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, device, host_id):\n del host_id # unused\n captured_infeed_queue = _CapturedObject()\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if not is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '\n 'input pipeline configuration.')\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n # TODO(b/XXX): Add predict support for PER_HOST_V2\n raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')\n\n hooks.append(inputs.dataset_initializer_hook())\n\n def enqueue_ops_fn():\n \"\"\"Generates the per_host enqueue ops.\"\"\"\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def create_tpu_hostcall(self):\n if not self._names:\n return []\n\n ret = {}\n # For each i, dequeue_ops[i] is a list containing the tensors from all\n # shards. This list is concatenated later.\n dequeue_ops = []\n tensor_dtypes = []\n tensor_shapes = []\n for name in self._names:\n for _ in self._tensors[name]:\n dequeue_ops.append([])\n for dtype in self._tensor_dtypes[name]:\n tensor_dtypes.append(dtype)\n for shape in self._tensor_shapes[name]:\n tensor_shapes.append(shape)\n\n # Outfeed ops execute on each replica's first logical core. Note: we must\n # constraint it such that we have at most one outfeed dequeue and enqueue\n # per replica.\n tpu_device_placement_fn = self._ctx.tpu_device_placement_function\n for i in xrange(self._ctx.num_replicas):\n with ops.device(tpu_device_placement_fn(i)):\n outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(\n dtypes=tensor_dtypes, shapes=tensor_shapes)\n for j, item in enumerate(outfeed_tensors):\n dequeue_ops[j].append(item)\n\n # Deconstruct dequeue ops.\n dequeue_ops_by_name = {}\n pos = 0\n for name in self._names:\n dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]\n pos += len(self._tensors[name])\n\n # It is assumed evaluation always happens on single host TPU system. So,\n # place all ops on tpu host if possible.\n #\n # TODO(jhseu): Evaluate whether this is right for summaries.\n with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):\n for name in self._names:\n dequeue_ops = dequeue_ops_by_name[name]\n for i, item in enumerate(dequeue_ops):\n if dequeue_ops[i][0].shape.ndims == 0:\n raise RuntimeError(\n 'All tensors outfed from TPU should preserve batch size '\n 'dimension, but got scalar {}'.format(dequeue_ops[i][0]))\n # TODO(xiejw): Allow users to specify the axis for batch size\n # dimension.\n dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)\n\n if self._tensor_keys[name] is not None:\n # The user-provided eval_metrics[1] is a dict.\n dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))\n try:\n ret[name] = self._host_fns[name](**dequeue_ops)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n else:\n ret[name] = self._host_fns[name](*dequeue_ops)\n\n return ret", "def push(host):\n dispatcher = Dispatch(host)\n\n post(host)\n\n context = zmq.Context()\n zmq_socket = context.socket(zmq.PUSH)\n zmq_socket.bind('tcp://127.0.0.1:5560')\n\n for record in dispatcher:\n zmq_socket.send_pyobj((int(time.time()),record.raw))", "def create_cpu_hostcall(host_calls):\n\n _OutfeedHostCall.validate(host_calls)\n ret = {}\n for name, host_call in host_calls.items():\n host_fn, tensors = host_call\n if isinstance(tensors, (tuple, list)):\n ret[name] = host_fn(*tensors)\n else:\n # Must be dict.\n try:\n ret[name] = host_fn(**tensors)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n return ret", "def add_socket_switch_operation_to_queue(self, address, unit, state):\n self.__operations.append([int(address), int(unit), int(state)])", "def add(self, host, flow):\n if host not in list(self.__locusts__.keys()):\n self.__locusts__[host] = self.__locust_code(flow)\n else:\n tmp = self.__locusts__[host][:-100]\n tmp += self.__locust_task(flow)\n tmp += '\\n'\n tmp += self.__locusts__[host][-100:]\n self.__locusts__[host] = tmp\n return", "def enqueue(self, func):\n self.queue.put(func)", "def _prepare_host_call_fn(self, processed_t_fetches,\n op_fetches, graph, graph_summary_tag):\n if self._parameters.trace_dir is None:\n raise ValueError('Provide a trace_dir for tensor tracer in summary mode. '\n '--trace_dir=/model/dir')\n\n def _write_cache(step, event_file_suffix=None, **kwargs):\n \"\"\"Writes the given caches as tensor summary.\n\n Args:\n step: Step tensor with dimension [num_cores].\n event_file_suffix: Event filename suffix tensor.\n **kwargs: The dictionary of tensors that needs to be written as\n summaries. Key and value pairs within kwargs correspond to the tag\n name, and tensor content that will be written using summary.write.\n The trace_modes that use this function are:\n - summary: In summary mode, kwargs includes a single (tag, content)\n pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache\n variable. The dimension of the signature_cache is:\n num_cores x num_traced_tensors x num_signatures.\n - full_tensor_summary: kwargs will include all traced tensors. Tag\n and content correspond to the name of the tensor, and its actual\n content.\n Returns:\n A tf.Operation that needs to be executed for the host call dependencies.\n \"\"\"\n file_suffix = _TT_EVENT_FILE_SUFFIX\n if event_file_suffix is not None:\n file_suffix = string_ops.string_join([file_suffix, event_file_suffix],\n separator='.')\n # TODO(deveci): Parametrize max_queue, so that flushing op can be called\n # less frequently.\n # Setting max_queue to 100 appears to be safe even when the number of\n # iterations are much lower, as the destructor of the writer flushes it.\n summary_write_ops = []\n summary_writer = summary.create_file_writer_v2(\n self._parameters.trace_dir,\n filename_suffix=file_suffix,\n max_queue=_TT_SUMMARY_MAX_QUEUE)\n graph.add_to_collection(\n TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n\n step_value = step[0]\n dt = step_value.dtype\n\n # The step parameter to a summary write call must be 64-bit.\n if dt.__ne__(dtypes.int64) and dt.__ne__(\n dtypes.uint64) and dt.__ne__(dtypes.float64):\n step_value = math_ops.cast(step_value, dtypes.int64)\n\n with summary_writer.as_default():\n summary_metadata = summary_pb2.SummaryMetadata(\n plugin_data=summary_pb2.SummaryMetadata.PluginData(\n plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n for key, value in kwargs.items():\n # Check whether we need to compute aggregated statistics that merge\n # all cores statistics.\n if not self._parameters.collect_summary_per_core:\n # Merge only statistics tensor, if it is any other tensor we simply,\n # concatenate them.\n # Also, if there is only a single core (first dim. is 0), then skip\n # aggregation.\n if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n value = self.aggregate_global_cache(value)\n with ops.control_dependencies([summary_writer.init()]):\n summary_write_ops.append(summary.write(\n _TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag,\n value, metadata=summary_metadata,\n step=step_value))\n return control_flow_ops.group(summary_write_ops)\n\n global_step = training_util.get_or_create_global_step()\n step = array_ops.reshape(global_step, [1])\n self._host_call_fn = {}\n\n host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]\n\n caches_to_write = {}\n with ops.control_dependencies(host_call_deps):\n all_caches = self._cache_variable_for_graph(graph)\n for cache_name, cache_variable in all_caches.items():\n # Increase the cache rank by 1, so that when host call concatenates\n # tensors from different replicas, we can identify them with [core_id].\n new_cache_shape = [1]\n new_cache_shape.extend(cache_variable.shape.as_list())\n cache = array_ops.reshape(cache_variable, new_cache_shape)\n caches_to_write[cache_name] = cache\n # Add step to parameter dictionary.\n caches_to_write['step'] = step\n # Other options without adding step to parameter dictionary are\n # * host_call_fn = (_write_cache(step, caches_to_write)) : fails as it\n # considers caches_to_write as a single parameter, rather than a keyword\n # parameters.\n # * host_call_fn = (_write_cache(step, **caches_to_write)) : fails with\n # a syntax error.\n self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write)", "def add(self, requester: int, track: dict):\n self.queue.append(AudioTrack().build(track, requester))", "def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)", "def _execute(self, op, time):\n raise NotImplementedError", "def _notify_listeners_start_operation(self, op):\n self.operation = op\n for lstnr in self.listeners:\n lstnr.start_operation(self, op)", "def test_collection_controller_heappush_calls(setup_controller, mock_run):\n # given\n sources = {\n 'kraken': KrakenOHLCV(Interval.MINUTE, SymbolPair(Symbol.LITECOIN, Symbol.USD), 360),\n 'crypto_compare': CryptoCompareOHLCV(Interval.MINUTE, SymbolPair(Symbol.BITCOIN, Symbol.USD), 1000),\n }\n controller = setup_controller(sources, 3, 180)\n mock_run(sources)\n # when\n controller.run()\n # then\n expected = [\n call(controller._queue, (1560144660, 0, 'kraken')),\n call(controller._queue, (1560183060, 0, 'crypto_compare')),\n call(controller._queue, (1560144660, 1, 'kraken')),\n call(controller._queue, (1560144660, 2, 'kraken')),\n call(controller._queue, (1560183060, 1, 'crypto_compare')),\n call(controller._queue, (1560183060, 2, 'crypto_compare')),\n ]\n assert heapq.heappush.call_args_list == expected", "def AddOperation(self, op):\n self._operations.append(op)", "def push(self, oplog):\n ns = oplog['ns']\n if ns not in self._map:\n self._map[ns] = []\n self._map[ns].append(oplog)\n self._count += 1\n self._last_optime = oplog['ts']", "def enqueue(self, name):\n pass", "def addOp(self, op):\n self.operations << op", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "async def dispatch_auto_starts(self, ctx):\n for operation in self.config.dataflow.operations.values():\n if operation.inputs or not await self.ictx.check_conditions(\n operation, self.config.dataflow, ctx\n ):\n continue\n parameter_set = MemoryParameterSet(\n MemoryParameterSetConfig(ctx=ctx, parameters=[])\n )\n task = await self.nctx.dispatch(self, operation, parameter_set)\n task.operation = operation\n task.parameter_set = parameter_set\n yield task", "def add_op(self, op):\n self._operations.append(op)", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def record(self, batch_axis, op, *args):\n assert callable(op), 'op must be callable. Got: %s' % op\n flat_args, fmt = _flatten(args)\n op_sig = OpSig(op, fmt, batch_axis)\n arg_sig = _arg_hash(flat_args)\n arg_types = tuple(_type_code(arg) for arg in flat_args)\n if arg_sig not in self.cached_nodes[op_sig]:\n steps = [arg.step+1 for arg, arg_type in zip(flat_args, arg_types) if arg_type == 1]\n step = 0 if not steps else max(steps)\n node = Virtual(op_sig, arg_sig, step, len(self.steps[step][op_sig]))\n self.steps[step][op_sig].append((flat_args, arg_types))\n self.cached_nodes[op_sig][arg_sig] = node\n return self.cached_nodes[op_sig][arg_sig]", "def creator(self, q, data, num_sub_proc):\n for d in data:\n idx = d[0]\n q.put((idx, d[1]))\n\n for i in range(0, num_sub_proc):\n q.put('DONE')", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def runQueueEnqueue(self):\n raise NotImplementedError" ]
[ "0.6292755", "0.5712997", "0.56591284", "0.5607669", "0.5587878", "0.5528072", "0.54914623", "0.54818785", "0.53225166", "0.5263789", "0.51789", "0.50644535", "0.50429875", "0.5037634", "0.49689317", "0.49309674", "0.49283302", "0.4920256", "0.4901082", "0.48963195", "0.48628485", "0.48536164", "0.48531994", "0.48266482", "0.481501", "0.47991067", "0.47859898", "0.47756866", "0.47744808", "0.47708347" ]
0.5739713
1
Sends the tensors through outfeed and runs the host_fn on CPU. The tensors are concatenated along dimension 0 to form a global tensor across all shards. The concatenated function is passed to the host_fn and executed on the first host.
def create_tpu_hostcall(self): if not self._names: return [] ret = {} # For each i, dequeue_ops[i] is a list containing the tensors from all # shards. This list is concatenated later. dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for name in self._names: for _ in self._tensors[name]: dequeue_ops.append([]) for dtype in self._tensor_dtypes[name]: tensor_dtypes.append(dtype) for shape in self._tensor_shapes[name]: tensor_shapes.append(shape) # Outfeed ops execute on each replica's first logical core. Note: we must # constraint it such that we have at most one outfeed dequeue and enqueue # per replica. tpu_device_placement_fn = self._ctx.tpu_device_placement_function for i in xrange(self._ctx.num_replicas): with ops.device(tpu_device_placement_fn(i)): outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) # Deconstruct dequeue ops. dequeue_ops_by_name = {} pos = 0 for name in self._names: dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])] pos += len(self._tensors[name]) # It is assumed evaluation always happens on single host TPU system. So, # place all ops on tpu host if possible. # # TODO(jhseu): Evaluate whether this is right for summaries. with ops.device(self._ctx.tpu_host_placement_function(core_id=0)): for name in self._names: dequeue_ops = dequeue_ops_by_name[name] for i, item in enumerate(dequeue_ops): if dequeue_ops[i][0].shape.ndims == 0: raise RuntimeError( 'All tensors outfed from TPU should preserve batch size ' 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) # TODO(xiejw): Allow users to specify the axis for batch size # dimension. dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) if self._tensor_keys[name] is not None: # The user-provided eval_metrics[1] is a dict. dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) try: ret[name] = self._host_fns[name](**dequeue_ops) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e else: ret[name] = self._host_fns[name](*dequeue_ops) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue", "def generate_per_host_v2_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, device, host_id):\n del host_id # unused\n captured_infeed_queue = _CapturedObject()\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if not is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '\n 'input pipeline configuration.')\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n # TODO(b/XXX): Add predict support for PER_HOST_V2\n raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')\n\n hooks.append(inputs.dataset_initializer_hook())\n\n def enqueue_ops_fn():\n \"\"\"Generates the per_host enqueue ops.\"\"\"\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def generate_per_host_enqueue_ops_fn_for_host(\n ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):\n captured_infeed_queue = _CapturedObject()\n\n hooks = []\n\n with ops.device(device):\n inputs = _Inputs.from_input_fn(input_fn())\n\n is_dataset = inputs.is_dataset\n if ctx.mode == model_fn_lib.ModeKeys.PREDICT:\n if not is_dataset:\n raise TypeError(\n 'For mode PREDICT, `input_fn` must return `Dataset` instead of '\n '`features` and `labels`.')\n if batch_axis is not None:\n raise TypeError('For mode PREDICT, batch_axis is not supported yet.')\n inputs = _InputsWithStoppingSignals(\n dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,\n add_padding=True)\n\n if is_dataset:\n hooks.append(inputs.dataset_initializer_hook())\n\n # TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the\n # _TPUContext.tpu_ordinal_function. We should either introduce another\n # abstraction or a different helper method.\n def _tpu_ordinal_function_impl(shard_index_in_host):\n # We put both enqueue/dequeue op at tpu.core(0) in each replica.\n replica = ctx.device_assignment.lookup_replicas(\n host_id, (0, 0, 0))[shard_index_in_host]\n return ctx.device_assignment.tpu_ordinal(replica=replica)\n\n if ctx.model_parallelism_enabled:\n tpu_ordinal_function = _tpu_ordinal_function_impl\n else:\n tpu_ordinal_function = None\n\n def enqueue_ops_fn():\n with ops.device(device):\n num_of_replicas_per_host = ctx.num_of_replicas_per_host\n # Convert user input to features and labels. If the user returns a\n # dataset, it is initialized and the features and labels extracted via\n # `dataset.iterator.get_next()`\n features, labels = inputs.features_and_labels()\n signals = inputs.signals()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels, signals)\n unsharded_tensor_list = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels, signals))\n\n infeed_queue = tpu_feed.InfeedQueue(\n tuple_types=[t.dtype for t in unsharded_tensor_list],\n tuple_shapes=[t.shape for t in unsharded_tensor_list],\n shard_dimensions=batch_axis)\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_number_of_shards(num_of_replicas_per_host)\n per_host_enqueue_ops = (\n infeed_queue.split_inputs_and_generate_enqueue_ops(\n unsharded_tensor_list,\n placement_function=lambda x: device,\n tpu_ordinal_function=tpu_ordinal_function))\n if signals is None:\n return per_host_enqueue_ops\n else:\n return {\n 'ops': per_host_enqueue_ops,\n 'signals': signals,\n }\n\n return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset", "def input_fn(self, params):\n with tf.variable_scope('data_provider'):\n if self.mode == enums.ModelMode.INFERENCE:\n images = tf.placeholder(tf.float32, [\n None, self.preprocessor.preprocessing_options.image_size,\n self.preprocessor.preprocessing_options.image_size, 3\n ])\n return tf_estimator.export.TensorServingInputReceiver(\n features=images, receiver_tensors=images)\n\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # tf.contrib.tpu.RunConfig for details.\n batch_size = params['batch_size']\n\n if 'context' in params:\n current_host = params['context'].current_input_fn_deployment()[1]\n num_hosts = params['context'].num_hosts\n num_cores = params['context'].num_replicas\n else:\n current_host = 0\n num_hosts = 1\n num_cores = 1\n\n dataset = self.make_source_dataset(current_host, num_hosts)\n\n if (self.mode == enums.ModelMode.TRAIN and self.max_samples and\n self.max_samples > 0):\n dataset = dataset.take(self.max_samples)\n\n dataset = dataset.map(self.dataset_parser, num_parallel_calls=num_cores)\n if self.label_noise_prob > 0. and self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.map(\n self._label_noise_fn, num_parallel_calls=num_cores)\n\n if self.cache:\n dataset = dataset.cache()\n if self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.shuffle(self.shuffle_buffer).repeat()\n\n # Use the fused map-and-batch operation.\n #\n # For XLA, we must used fixed shapes. Because we repeat the source\n # training dataset indefinitely, we can use `drop_remainder=True` to get\n # fixed-size batches without dropping any training examples.\n #\n # When evaluating, `drop_remainder=True` prevents accidentally evaluating\n # the same image twice by dropping the final batch if it is less than a\n # full batch size. As long as this validation is done with consistent\n # batch size, exactly the same images will be used.\n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n self._preprocess_image,\n batch_size=batch_size,\n num_parallel_batches=num_cores,\n drop_remainder=True))\n\n # Assign static batch size dimension\n dataset = dataset.map(\n functools.partial(self._set_static_batch_dim, batch_size))\n\n # Prefetch overlaps in-feed with training\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset", "def _prepare_host_call_fn(self, processed_t_fetches,\n op_fetches, graph, graph_summary_tag):\n if self._parameters.trace_dir is None:\n raise ValueError('Provide a trace_dir for tensor tracer in summary mode. '\n '--trace_dir=/model/dir')\n\n def _write_cache(step, event_file_suffix=None, **kwargs):\n \"\"\"Writes the given caches as tensor summary.\n\n Args:\n step: Step tensor with dimension [num_cores].\n event_file_suffix: Event filename suffix tensor.\n **kwargs: The dictionary of tensors that needs to be written as\n summaries. Key and value pairs within kwargs correspond to the tag\n name, and tensor content that will be written using summary.write.\n The trace_modes that use this function are:\n - summary: In summary mode, kwargs includes a single (tag, content)\n pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache\n variable. The dimension of the signature_cache is:\n num_cores x num_traced_tensors x num_signatures.\n - full_tensor_summary: kwargs will include all traced tensors. Tag\n and content correspond to the name of the tensor, and its actual\n content.\n Returns:\n A tf.Operation that needs to be executed for the host call dependencies.\n \"\"\"\n file_suffix = _TT_EVENT_FILE_SUFFIX\n if event_file_suffix is not None:\n file_suffix = string_ops.string_join([file_suffix, event_file_suffix],\n separator='.')\n # TODO(deveci): Parametrize max_queue, so that flushing op can be called\n # less frequently.\n # Setting max_queue to 100 appears to be safe even when the number of\n # iterations are much lower, as the destructor of the writer flushes it.\n summary_write_ops = []\n summary_writer = summary.create_file_writer_v2(\n self._parameters.trace_dir,\n filename_suffix=file_suffix,\n max_queue=_TT_SUMMARY_MAX_QUEUE)\n graph.add_to_collection(\n TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n\n step_value = step[0]\n dt = step_value.dtype\n\n # The step parameter to a summary write call must be 64-bit.\n if dt.__ne__(dtypes.int64) and dt.__ne__(\n dtypes.uint64) and dt.__ne__(dtypes.float64):\n step_value = math_ops.cast(step_value, dtypes.int64)\n\n with summary_writer.as_default():\n summary_metadata = summary_pb2.SummaryMetadata(\n plugin_data=summary_pb2.SummaryMetadata.PluginData(\n plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n for key, value in kwargs.items():\n # Check whether we need to compute aggregated statistics that merge\n # all cores statistics.\n if not self._parameters.collect_summary_per_core:\n # Merge only statistics tensor, if it is any other tensor we simply,\n # concatenate them.\n # Also, if there is only a single core (first dim. is 0), then skip\n # aggregation.\n if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n value = self.aggregate_global_cache(value)\n with ops.control_dependencies([summary_writer.init()]):\n summary_write_ops.append(summary.write(\n _TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag,\n value, metadata=summary_metadata,\n step=step_value))\n return control_flow_ops.group(summary_write_ops)\n\n global_step = training_util.get_or_create_global_step()\n step = array_ops.reshape(global_step, [1])\n self._host_call_fn = {}\n\n host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]\n\n caches_to_write = {}\n with ops.control_dependencies(host_call_deps):\n all_caches = self._cache_variable_for_graph(graph)\n for cache_name, cache_variable in all_caches.items():\n # Increase the cache rank by 1, so that when host call concatenates\n # tensors from different replicas, we can identify them with [core_id].\n new_cache_shape = [1]\n new_cache_shape.extend(cache_variable.shape.as_list())\n cache = array_ops.reshape(cache_variable, new_cache_shape)\n caches_to_write[cache_name] = cache\n # Add step to parameter dictionary.\n caches_to_write['step'] = step\n # Other options without adding step to parameter dictionary are\n # * host_call_fn = (_write_cache(step, caches_to_write)) : fails as it\n # considers caches_to_write as a single parameter, rather than a keyword\n # parameters.\n # * host_call_fn = (_write_cache(step, **caches_to_write)) : fails with\n # a syntax error.\n self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write)", "def __call__(self, *args, **kwargs) -> None:\n # The device portion needs to run every step to produce a stable graph\n device_outputs = self.run_on_device(*args, **kwargs)\n assert isinstance(device_outputs, DeviceOutputs), (\n f\"Expected device outputs to be of type `DeviceOutputs`, \"\n f\"but got `{type(device_outputs)}`.\"\n )\n\n # Detach and clone device outputs to ensure we use the \"current\" value\n for idx, tensor in enumerate(device_outputs.args):\n if isinstance(tensor, torch.Tensor):\n device_outputs.args[idx] = tensor.detach().clone()\n for key, tensor in device_outputs.kwargs.items():\n if isinstance(tensor, torch.Tensor):\n device_outputs.kwargs[key] = tensor.detach().clone()\n\n if cm.use_cs():\n state = cbtorch.state()\n state.track_object(\n {\n \"cb_summary\": {\n self.name: [device_outputs.args, device_outputs.kwargs]\n }\n },\n force=self._is_appliance,\n )\n\n if self._is_appliance:\n\n def _on_activations_received():\n cpu_args = [\n state.get_activation_for_output(tensor)\n if isinstance(tensor, torch.Tensor)\n else tensor\n for tensor in device_outputs.args\n ]\n cpu_kwargs = {\n key: state.get_activation_for_output(tensor)\n if isinstance(tensor, torch.Tensor)\n else tensor\n for key, tensor in device_outputs.kwargs.items()\n }\n\n self._cached_cpu_activations.append(\n self.run_on_host(*cpu_args, **cpu_kwargs)\n )\n\n state.register_activation_callback(_on_activations_received)\n else:\n\n @cm.step_closure\n def _run_on_host_closure(\n device_args: List[Any], device_kwargs: Dict[str, Any],\n ):\n device_args = cm.to_cpu(device_args)\n device_kwargs = cm.to_cpu(device_kwargs)\n self._cached_cpu_activations.append(\n self.run_on_host(*device_args, **device_kwargs)\n )\n\n _run_on_host_closure(\n device_outputs.args, device_outputs.kwargs,\n )", "def _GetHostTrainLoop(\n self, strategy: tf.distribute.TPUStrategy\n ) -> Callable[..., Any]:\n replicas_per_host = strategy.extended.num_replicas_per_host\n\n def Split(batch, replicas_per_host, axis=0):\n \"\"\"Splits a NestedMap into replicas_per_host pieces.\"\"\"\n def _SplitFn(t):\n return tf.sparse.split if isinstance(t, tf.SparseTensor) else tf.split\n\n split = batch.Transform(lambda t: _SplitFn(t)(t, replicas_per_host, axis))\n return [\n nest.map_structure_up_to(batch, lambda t: t[i], split) # pylint: disable=cell-var-from-loop\n for i in range(replicas_per_host)\n ]\n\n def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n \"\"\"Fetch and shard one batch per attached device.\"\"\"\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )\n\n def _Step(batch: py_utils.NestedMap):\n \"\"\"A single forward/backward step.\n\n Processes the given input batch and updates the distributed metrics\n accumulator. We use FProp (instead of FPropDefaultTheta) and\n _BPropForVariables (instead of BProp) in order to permit the tf.distribute\n library to handle threading values across devices.\n\n Args:\n batch: NestedMap of input batch data.\n \"\"\"\n with tf.name_scope('tpu_train'):\n with py_utils.GradientTape(persistent=True):\n batch.Update(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Dequeue(batch)\n )\n metrics_dict, _ = self.task.FPropDefaultTheta(batch)\n # py_utils.ComputeGradientsSimple() needs to access the tape, so BProp\n # needs to be within the GradientTape context.\n self.task.BProp()\n\n self._metrics_dict_structure = metrics_dict\n self._metrics_mgr.AccumulateStepMetrics(metrics_dict)\n\n @tf.function\n def _TpuFunction():\n \"\"\"Runs several training steps and returns a flattened metrics list.\"\"\"\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )\n\n # Trace the train function so it can create the optimizer slot vars and save\n # them at step 0.\n return _TpuFunction.get_concrete_function()", "def test_local_gpu_elemwise_0():\r\n a = tensor.bmatrix()\r\n b = tensor.fmatrix()\r\n c = tensor.fmatrix()\r\n\r\n a_v = (numpy.random.rand(4, 5) * 10).astype(\"int8\")\r\n b_v = (numpy.random.rand(4, 5) * 10).astype(\"float32\")\r\n c_v = (numpy.random.rand(4, 5) * 10).astype(\"float32\")\r\n\r\n # Due to optimization order, this composite is created when all\r\n # the op are on the gpu.\r\n f = theano.function([a, b, c], [a + b + c], mode=mode_with_gpu)\r\n #theano.printing.debugprint(f)\r\n topo = f.maker.fgraph.toposort()\r\n assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1\r\n assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1\r\n f(a_v, b_v, c_v)\r\n\r\n # Now test with the composite already on the cpu before we move it\r\n # to the gpu\r\n a_s = theano.scalar.int8()\r\n b_s = theano.scalar.float32()\r\n c_s = theano.scalar.float32()\r\n out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])\r\n out_op = tensor.Elemwise(out_s)\r\n f = theano.function([a, b, c], [out_op(a, b, c)], mode=mode_with_gpu)\r\n #theano.printing.debugprint(f)\r\n topo = f.maker.fgraph.toposort()\r\n assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1\r\n assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1\r\n f(a_v, b_v, c_v)", "def _invoke_input_fn_and_record_structure(self):\n enqueue_ops = []\n infeed_queues = []\n all_hooks = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n else:\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id))\n else:\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n all_hooks.extend(hooks)\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if is_dataset:\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator", "def _send(x, dst=0):\n x = torch.tensor(x)\n x = to_device(x)\n dist.send(x, dst)\n del x \n torch.cuda.empty_cache()", "def run_on_host(self, tensor: torch.Tensor) -> torch.Tensor:\n return tensor", "def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_eval_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))\n\n def multi_tpu_eval_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_eval_step, [_ZERO_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_eval_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_calls, scaffold", "def gather(self, outputs, target_device=None, dim=0):\n if not self.configer.get('network', 'gathered'):\n if target_device is None:\n target_device = list(range(torch.cuda.device_count()))[0]\n\n return torch_gather(outputs, target_device, dim=dim)\n\n else:\n return outputs", "def InfeedTFFunc(self, inp_instance):\n inp_instance.DeviceLoopSetupEager()\n inp_instance.CreateTpuEnqueueOps()\n # `CreateTpuEnqueueOps` and `CreateCpuPassthroughEnqueueOps` must be in the\n # same place, because the former enqueues `_per_host_passthrough_batches`,\n # while the latter consumes it.\n inp_instance.CreateCpuPassthroughEnqueueOps()\n # `CreateCpuPassthroughEnqueueOps` and `DequeueCpuPassthrough` must be in\n # the same place, because the former enqueues `_host_queues`,\n # while the latter consumes it.\n cpu_pt = inp_instance.DequeueCpuPassthrough()\n return cpu_pt", "def serving_input_fn():\n feature_placeholder = tf.placeholder(tf.float32, [None, IMG_H, IMG_W,1])\n features = feature_placeholder\n return tf.estimator.export.TensorServingInputReceiver(features,\n feature_placeholder)", "def record(self, host_calls):\n\n for name, host_call in host_calls.items():\n host_fn, tensor_list_or_dict = host_call\n self._names.append(name)\n self._host_fns[name] = host_fn\n\n if isinstance(tensor_list_or_dict, dict):\n for (key, tensor) in six.iteritems(tensor_list_or_dict):\n self._tensor_keys[name].append(key)\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)\n else:\n # List or tuple.\n self._tensor_keys[name] = None\n for tensor in tensor_list_or_dict:\n self._tensors[name].append(tensor)\n self._tensor_dtypes[name].append(tensor.dtype)\n self._tensor_shapes[name].append(tensor.shape)", "def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass", "def forward(self, in_tensors: List[Tensor], out_tensors: List[Tensor]):\n pass", "def create_cpu_hostcall(host_calls):\n\n _OutfeedHostCall.validate(host_calls)\n ret = {}\n for name, host_call in host_calls.items():\n host_fn, tensors = host_call\n if isinstance(tensors, (tuple, list)):\n ret[name] = host_fn(*tensors)\n else:\n # Must be dict.\n try:\n ret[name] = host_fn(**tensors)\n except TypeError as e:\n logging.warning(\n 'Exception while calling %s: %s. It is likely the tensors '\n '(%s[1]) do not match the '\n 'function\\'s arguments', name, e, name)\n raise e\n return ret", "def test_elemwise_fusion():\r\n shape = (3, 4)\r\n a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fmatrix()\r\n c = tensor.fmatrix()\r\n f = pfunc([b, c], [a + b + c], mode=mode_with_gpu)\r\n topo = f.maker.fgraph.toposort()\r\n for i, node in enumerate(topo):\r\n print >> sys.stdout, i, node\r\n assert len(topo) == 4\r\n assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite)\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32'),\r\n theano._asarray(numpy.random.rand(*shape), dtype='float32'))", "def local_gpu_lazy_ifelse(node):\r\n if isinstance(node.op, theano.ifelse.IfElse) and not node.op.gpu:\r\n gpu_ifelse = theano.ifelse.IfElse(node.op.n_outs, gpu=True)\r\n outs_clients = reduce(list.__add__,\r\n [out.clients for out in node.outputs])\r\n if any([(i.owner and isinstance(i.owner.op, HostFromGpu))\r\n for i in node.inputs]) or any(\r\n [c != 'output' and c.op == gpu_from_host for c, idx\r\n in outs_clients]):\r\n\r\n c = node.inputs[0]\r\n outs = node.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n return [host_from_gpu(out) for out in\r\n gpu_ifelse.make_node(c, *outs).outputs]\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if (host_input.owner and\r\n isinstance(host_input.owner.op, theano.ifelse.IfElse) and\r\n not host_input.owner.op.gpu and\r\n # If there is more then 1 outputs, we can't replace it\r\n # here with a local optimizer as we replace the\r\n # GpuFromHost node and the other output of the if won't be\r\n # replaced.\r\n host_input.owner.op.n_outs == 1):\r\n gpu_ifelse = theano.ifelse.IfElse(host_input.owner.op.n_outs,\r\n gpu=True)\r\n\r\n c = host_input.owner.inputs[0]\r\n outs = host_input.owner.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n\r\n outs = gpu_ifelse.make_node(c, *outs).outputs\r\n return outs\r\n\r\n return False", "def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*32, attention_poses=None, intrinsic=None, training = False, images_features = None, world_fn = None, gather_func = None):\n #flattened points\n inputs_flat = tf.reshape(inputs, [-1, inputs.shape[-1]])\n embedded = embed_fn(inputs_flat)\n if viewdirs is not None:\n input_dirs = tf.broadcast_to(viewdirs[:, None], inputs.shape)\n input_dirs_flat = tf.reshape(input_dirs, [-1, input_dirs.shape[-1]])\n embedded_dirs = embeddirs_fn(input_dirs_flat)\n embedded = tf.concat([embedded,embedded_dirs],-1)\n\n outputs_flat, attention_cache = batchify(fn, netchunk,\n world_fn = world_fn, gather_func = gather_func)([embedded,\n attention_poses, intrinsic, images_features, inputs_flat], training)\n outputs = tf.reshape(outputs_flat, list(\n inputs.shape[:-1]) + [outputs_flat.shape[-1]])\n return outputs, attention_cache", "def local_gpu_elemwise_0(node):\r\n if (isinstance(node.op, tensor.Elemwise) and\r\n dtype_in_elemwise_supported(node.op)):\r\n if any([i.owner and\r\n isinstance(i.owner.op, HostFromGpu)\r\n for i in node.inputs]):\r\n if all([o.type.dtype == 'float32' for o in node.outputs]):\r\n # Don't set any inplace pattern.\r\n # gpu_inplace_elemwise_optimizer will do it later\r\n\r\n if isinstance(node.op.scalar_op, Erfinv):\r\n new_op = GpuElemwise(erfinv_gpu)\r\n else:\r\n try:\r\n new_op = GpuElemwise(node.op.scalar_op)\r\n except SupportCodeError:\r\n # This happens when scalar_op requires support code\r\n return False\r\n\r\n # first establish that float32 can store all inputs\r\n upcastable = set(['float32', 'int8', 'int16', 'uint8',\r\n 'uint16'])\r\n # case 1 - all inputs are already float32\r\n if all([i.type.dtype == 'float32' for i in node.inputs]):\r\n #TODO: change this when fusion makes Elemwise with multiple\r\n # outputs\r\n gpu_elemwise = new_op(*(gpu_from_host(i)\r\n for i in node.inputs))\r\n # case 2 - it is still ok if some inputs were upcast to float32\r\n elif all([i.type.dtype in upcastable\r\n for i in node.inputs]):\r\n # second - establish that a new node with upcasted inputs\r\n # has the same outputs types as the original node\r\n upcasted = node.op.make_node(*[tensor.cast(i, 'float32')\r\n for i in node.inputs])\r\n if [o.type for o in upcasted.outputs] ==\\\r\n [o.type for o in node.outputs]:\r\n\r\n new_inputs = [gpu_from_host(tensor.cast(i, 'float32'))\r\n for i in node.inputs]\r\n gpu_elemwise = new_op(*new_inputs)\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n gpu_elemwise = split_huge_add_or_mul(gpu_elemwise.owner)\r\n if not gpu_elemwise:\r\n return False\r\n if max_inputs_to_GpuElemwise(node) < len(gpu_elemwise.inputs):\r\n return False\r\n return [host_from_gpu(gpu_elemwise.outputs[0])]", "def make_sharded_output_tensor(\n output: DTensor, _device_mesh: Optional[DeviceMesh] = None\n) -> torch.Tensor:\n\n return output.to_local() # type: ignore[call-arg]", "def schedule_concatenate(outs):\n def vectorize(sch, tensor, vectorize_limit):\n \"\"\"Internal vectorization function for concatenate.\"\"\"\n inner_axis = s[tensor].op.axis[len(s[tensor].op.axis) - 1]\n inner_length = tensor.shape[len(tensor.shape) - 1].value\n if inner_length <= vectorize_limit:\n sch[tensor].vectorize(inner_axis)\n else:\n split_factor = 1\n for i in range(vectorize_limit, 1, -1):\n if inner_length % i == 0:\n split_factor = i\n break\n if split_factor > 1:\n _, inner_i = sch[tensor].split(inner_axis, split_factor)\n sch[tensor].vectorize(inner_i)\n\n outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs\n x = outs[0]\n s = tvm.create_schedule([x.op for x in outs])\n tvm.schedule.AutoInlineInjective(s)\n if len(s[x].op.axis) >= 5:\n fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1], s[x].op.axis[2])\n vectorize(s, x, 64)\n s[x].parallel(fused)\n elif len(s[x].op.axis) >= 3:\n fused = s[x].fuse(s[x].op.axis[0], s[x].op.axis[1])\n s[x].parallel(fused)\n else:\n s[x].parallel(s[x].op.axis[0])\n return s", "def __call__(self, fn, *args, **kwargs):\n # construct lists of args and kwargs for each function\n if args:\n my_args = _transpose_list_of_lists(\n [_maybe_repeat(arg, self.n) for arg in args])\n else:\n my_args = [[] for _ in range(self.n)]\n my_kwargs = [{} for _ in range(self.n)]\n for k, v in six.iteritems(kwargs):\n vals = _maybe_repeat(v, self.n)\n for i in range(self.n):\n my_kwargs[i][k] = vals[i]\n\n # construct lists of functions\n fns = _maybe_repeat(fn, self.n)\n\n # apply fns\n outputs = []\n cache = {}\n load = dict([(d, 0) for d in self._devices])\n for device_id, device in enumerate(self._devices):\n\n def daisy_chain_getter(getter, name, *args, **kwargs):\n \"\"\"Get a variable and cache in a daisy chain.\"\"\"\n device_var_key = (device, name)\n if device_var_key in cache:\n # if we have the variable on the correct device, return it.\n return cache[device_var_key]\n if name in cache:\n # if we have it on a different device, copy it from the last device\n v = tf.identity(cache[name])\n else:\n var = getter(name, *args, **kwargs)\n v = tf.identity(var._ref()) # pylint: disable=protected-access\n # update the cache\n cache[name] = v\n cache[device_var_key] = v\n return v\n\n def balanced_device_setter(op):\n \"\"\"Balance variables to all devices.\"\"\"\n if op.type in {'Variable', 'VariableV2', 'VarHandleOp'}:\n # return self._sync_device\n min_load = min(load.values())\n min_load_devices = [d for d in load if load[d] == min_load]\n chosen_device = random.choice(min_load_devices)\n load[chosen_device] += op.outputs[0].get_shape().num_elements()\n return chosen_device\n return device\n\n def identity_device_setter(op):\n return device\n\n if self._mode == ModeKeys.TRAIN:\n custom_getter = daisy_chain_getter\n # device_setter = balanced_device_setter\n device_setter = device\n else:\n custom_getter = None\n device_setter = device\n\n # with tf.name_scope(\"parallel_{}\".format(device_id)):\n with tf.variable_scope(\n tf.get_variable_scope(),\n reuse=True if device_id > 0 or self._reuse else None,\n custom_getter=custom_getter):\n with tf.device(device_setter):\n outputs.append(fns[device_id](*my_args[device_id], **my_kwargs[device_id]))\n\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_train_step, host_call, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n def multi_tpu_train_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_train_step, [_INITIAL_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_train_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_call, scaffold", "def run_network(inputs, styles, viewdirs, fn, alpha, feature, embed_fn, embeddirs_fn, netchunk=1024 * 64):\n inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])\n embedded = embed_fn(inputs_flat)\n if viewdirs is not None:\n input_dirs = viewdirs[:, None].expand(inputs.shape)\n input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])\n embedded_dirs = embeddirs_fn(input_dirs_flat)\n embedded = torch.cat([embedded, embedded_dirs], -1)\n if alpha is not None:\n alpha = torch.reshape(alpha, [-1, 1])\n if feature is not None:\n feature = torch.reshape(feature, [-1, feature.shape[-1]])\n outputs_flat = batchify(fn, netchunk)(embedded, styles, alpha, feature)\n outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])\n return outputs" ]
[ "0.63360953", "0.62956405", "0.6291536", "0.6241733", "0.58995396", "0.5864348", "0.5839464", "0.58250535", "0.56826615", "0.56309867", "0.5552339", "0.548088", "0.5467413", "0.5426072", "0.53615206", "0.53566736", "0.5268573", "0.5245989", "0.5235768", "0.5235768", "0.52312547", "0.52261984", "0.52259696", "0.52212703", "0.51924586", "0.5183451", "0.51798165", "0.5161087", "0.51564074", "0.5134606" ]
0.6630872
0
Returns a new model_fn, which wraps the TPU support.
def _augment_model_fn(self, model_fn, batch_axis): def _model_fn(features, labels, mode, config, params): """A Estimator `model_fn` for TPUEstimator.""" with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) if mode != model_fn_lib.ModeKeys.PREDICT: is_export_mode = False else: # For export_savedmodel, input_fn is never passed to Estimator. So, by # checking the self._is_input_fn_invoked bit, we can know, given the # mode == PREDICT, it is the .predict API, not export_savedmodel API. if self._is_input_fn_invoked: is_export_mode = False else: is_export_mode = True # Clear the bit. self._is_input_fn_invoked = None if ctx.is_running_on_cpu(is_export_mode=is_export_mode): logging.info('Running %s on CPU', mode) return model_fn_wrapper.call_without_tpu( features, labels, is_export_mode=is_export_mode) assert labels is None, '`labels` passed to `model_fn` must be `None`.' # TPUEstimator._call_input_fn passes `input_fn` as features to here. assert callable(features), '`input_fn` is not callable.' input_fn = features input_holders = _InputPipeline(input_fn, batch_axis, ctx) enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) graph = ops.get_default_graph() for enqueue_op in enqueue_ops: if isinstance(enqueue_op, list): graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) else: graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) if mode == model_fn_lib.ModeKeys.TRAIN: loss, host_call, scaffold = ( _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) host_ops = host_call.create_tpu_hostcall() if host_ops is None: host_ops = [] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), ExamplesPerSecondHook(ctx.global_batch_size, output_dir=self.model_dir), InstallSignalHandlerHook(), training.LoggingTensorHook( { 'loss': array_ops.identity(loss), 'step': training.get_global_step() }, every_n_secs=30) ] + input_hooks chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): chief_hooks.append( training.CheckpointSaverHook( self.model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, steps_per_run=self._config.tpu_config.iterations_per_loop, scaffold=scaffold)) summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) with ops.control_dependencies([loss]): update_ops = _sync_variables_ops() # Validate the TPU training graph to catch basic errors _validate_tpu_training_graph() train_op = control_flow_ops.group(*update_ops) graph.add_to_collection(_TPU_TRAIN_OP, train_op) return model_fn_lib.EstimatorSpec( mode, loss=loss, training_chief_hooks=chief_hooks, training_hooks=hooks, train_op=train_op, scaffold=scaffold) if mode == model_fn_lib.ModeKeys.EVAL: total_loss, host_calls, scaffold = _eval_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) iterations_per_loop_var = _create_or_get_iterations_per_loop() mean_loss = math_ops.div(total_loss, math_ops.cast( iterations_per_loop_var, dtype=total_loss.dtype)) # Creates a dummy metric update_op for all metrics. Estimator expects # all metrics in eval_metric_ops have update_op and calls them one by # one. The real metric update_ops are invoked in a separated thread. # So, here give Estimator the dummy op for all metrics. with ops.control_dependencies([mean_loss]): # After TPU evaluation computation is done (the mean_loss tensor), # reads all variables back from TPU and updates the eval step # counter properly internal_ops_to_run = _sync_variables_ops() internal_ops_to_run.append( _increase_eval_step_op(iterations_per_loop_var)) with ops.control_dependencies(internal_ops_to_run): dummy_update_op = control_flow_ops.no_op() host_call_ret = host_calls.create_tpu_hostcall() eval_metric_ops = {} eval_update_ops = [] for k, v in host_call_ret['eval_metrics'].items(): eval_metric_ops[k] = (v[0], dummy_update_op) eval_update_ops.append(v[1]) if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, eval_update_ops + host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, loss=mean_loss, evaluation_hooks=hooks, eval_metric_ops=eval_metric_ops, scaffold=scaffold) # Predict assert mode == model_fn_lib.ModeKeys.PREDICT dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) with ops.control_dependencies([dummy_predict_op]): internal_ops_to_run = _sync_variables_ops() with ops.control_dependencies(internal_ops_to_run): dummy_predict_op = control_flow_ops.no_op() # In train and evaluation, the main TPU program is passed to monitored # training session to run. Infeed enqueue and outfeed dequeue are # executed in side threads. This is not the configuration for # prediction mode. # # For prediction, the Estimator executes the EstimatorSpec.predictions # directly and yield the element (via generator) to call site. So, the # outfeed based prediction must be passed to MonitoredSession directly. # Other parts of the TPU execution are organized as follows. # # 1. All outfeed based Tensors must be grouped with predictions Tensors # to form a single invocation. This avoid the issue we might trigger # multiple outfeeds incorrectly. To achieve this, `host_call` is # placed in control_dependencies of `stopping_signals`, and # `stopping_signals` is passed into _StoppingPredictHook, which sets # the `stopping_signals` as SessionRunArgs. MonitoredSession merges # all SessionRunArgs with the fetch in session.run together. # # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) # are grouped together. They will be launched once and only once in # side threads and they quit naturally according to the SAME stopping # condition. enqueue_ops.append(dummy_predict_op) host_call_ret = host_calls.create_tpu_hostcall() if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] predictions = host_call_ret['predictions'] _verify_cross_hosts_transfer_size( predictions, message=( 'The estimated size for TPUEstimatorSpec.predictions is too ' 'large.')) signals = host_call_ret['signals'] with ops.control_dependencies(host_ops): host_ops = [] # Empty, we do do not need it anymore. scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( signals) predictions = _PaddingSignals.slice_tensor_or_dict( predictions, signals) hooks = [ _StoppingPredictHook(scalar_stopping_signal), TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops, host_ops), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, prediction_hooks=hooks, predictions=predictions, scaffold=scaffold) return _model_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits\n ))\n\n def metric_fn(labels, logits):\n labels = tf.cast(labels, tf.int64)\n return {\n \"recall@1\": tf.metrics.recall_at_k(labels, logits, 1),\n \"recall@5\": tf.metrics.recall_at_k(labels, logits, 5)\n }\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n elif mode == tf_estimator.ModeKeys.TRAIN:\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=params[\"learning_rate\"], beta1=params[\"beta1\"],\n beta2=params[\"beta2\"], epsilon=params[\"epsilon\"])\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n ))\n\n train_op = optimizer.minimize(loss, tf.train.get_global_step())\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n\n else:\n raise NotImplementedError\n return _model_fn", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def ufunc_model(name):\n ufunc = getattr(np, name)\n nin = ufunc.nin\n nout = ufunc.nout\n if nin == 1:\n separable = True\n\n def evaluate(self, x):\n return self.func(x)\n\n else:\n separable = False\n\n def evaluate(self, x, y):\n return self.func(x, y)\n\n klass_name = _make_class_name(name)\n\n members = {\n \"n_inputs\": nin,\n \"n_outputs\": nout,\n \"func\": ufunc,\n \"linear\": False,\n \"fittable\": False,\n \"_separable\": separable,\n \"_is_dynamic\": True,\n \"evaluate\": evaluate,\n }\n\n klass = type(str(klass_name), (_NPUfuncModel,), members)\n klass.__module__ = \"astropy.modeling.math_functions\"\n return klass", "def create_tpu_estimator(model_fn, feature_columns, params):\n\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n params[\"tpu\"],\n zone=params[\"tpu_zone\"],\n project=params[\"gcp_project\"],\n coordinator_name=\"coordinator\")\n\n config = tf_estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=params[\"model_dir\"],\n tpu_config=tf_estimator.tpu.TPUConfig(\n iterations_per_loop=params[\"iterations_per_loop\"],\n experimental_host_call_every_n_steps=100,\n per_host_input_for_training=tf_estimator.tpu.InputPipelineConfig\n .PER_HOST_V2))\n\n return tf_estimator.tpu.TPUEstimator(\n use_tpu=params[\"use_tpu\"],\n model_fn=model_fn,\n config=config,\n train_batch_size=params[\"global_batch_size\"],\n eval_batch_size=params[\"eval_global_batch_size\"],\n params=params,\n embedding_config_spec=tf_estimator.tpu.experimental.EmbeddingConfigSpec(\n feature_columns=feature_columns,\n pipeline_execution_with_tensor_core=params[\"pipeline_execution\"],\n optimization_parameters=tf.tpu.experimental.AdagradParameters(\n learning_rate=params[\"learning_rate\"],\n use_gradient_accumulation=params[\"use_gradient_accumulation\"])))", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def __init__(self,\n model_fn=None,\n model_dir=None,\n config=None,\n params=None,\n use_tpu=True,\n train_batch_size=None,\n eval_batch_size=None,\n predict_batch_size=None,\n batch_axis=None):\n if config is None or not isinstance(config, tpu_config.RunConfig):\n raise ValueError(\n '`config` must be provided with type `tpu_config.RunConfig`')\n\n if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):\n raise ValueError('{} are reserved keys but existed in params {}.'.format(\n _RESERVED_PARAMS_KEYS, params))\n\n if use_tpu:\n # Perform some very basic validations. More validations will be found in\n # _TPUContext.\n if train_batch_size is None:\n raise ValueError('`train_batch_size` cannot be `None`')\n util_lib.check_positive_integer(train_batch_size, 'train_batch_size')\n\n if (config.tpu_config.per_host_input_for_training is\n tpu_config.InputPipelineConfig.PER_SHARD_V1 and\n config.tpu_config.computation_shape):\n raise ValueError(\n 'Model parallelism only supports per host input for training. '\n 'Please adjust TPURunconfig.per_host_input_for_training.')\n\n if eval_batch_size is not None:\n util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')\n\n if predict_batch_size is not None:\n util_lib.check_positive_integer(predict_batch_size,\n 'predict_batch_size')\n\n # Verifies the model_fn signature according to Estimator framework.\n estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access\n # We cannot store config and params in this constructor as parent\n # constructor might change them, such as assigning a temp dir for\n # config.model_dir.\n model_function = self._augment_model_fn(model_fn, batch_axis)\n\n # Passing non-None params as wrapped model_fn has it.\n params = params or {}\n super(TPUEstimator, self).__init__(\n model_fn=model_function,\n model_dir=model_dir,\n config=config,\n params=params)\n self._iterations_per_training_loop = (\n self._config.tpu_config.iterations_per_loop)\n\n # All properties passed to _TPUContext are immutable.\n # pylint: disable=protected-access\n self._ctx = tpu_context._get_tpu_context(\n self._config, train_batch_size,\n eval_batch_size, predict_batch_size,\n use_tpu)\n\n self._is_input_fn_invoked = None", "def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_train_step, host_call, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n def multi_tpu_train_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_train_step, [_INITIAL_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_train_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_call, scaffold", "def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)", "def _tpu_build(self):\n def _define_model(features, labels, mode, params):\n data_source = (features, labels)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses, eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(\n mode=mode, predictions=outputs\n )\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error('Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses[0], train_op=self.optimize_ops[0]\n )\n\n tpu_name = ['node-1'] # TODO Bring outside\n tpu_iterations = 500 # TODO Bring outside\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=self.output_path,\n cluster=tpu_cluster_resolver,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations),\n )\n\n self.estimator = tpu.TPUEstimator(\n model_fn=_define_model,\n use_tpu=True,\n train_batch_size=32*4, #self.dataset['train'].batch_size,\n eval_batch_size=32*4, #self.dataset['validation'].batch_size,\n config=run_config,\n params={\"data_dir\": self.data_dir}\n )", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_eval_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))\n\n def multi_tpu_eval_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_eval_step, [_ZERO_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_eval_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_calls, scaffold", "def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n num_cores = ctx.num_cores\n\n single_tpu_predict_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))\n\n def multi_tpu_predict_steps_on_single_shard():\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n inputs = [_StopSignals.NON_STOPPING_SIGNAL]\n outputs = training_loop.while_loop(\n cond, single_tpu_predict_step, inputs=inputs, name=b'loop')\n return outputs\n\n (dummy_predict_op,) = tpu.shard(\n multi_tpu_predict_steps_on_single_shard,\n inputs=[],\n num_shards=num_cores,\n outputs_from_all_shards=False)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return dummy_predict_op, host_calls, scaffold", "def with_cpu(ops, model):\n ...", "def create(model: TModel) -> ModelTransformer:\n model_backend = get_backend(model)\n if model_backend == BackendType.ONNX:\n from nncf.onnx.graph.model_transformer import ONNXModelTransformer\n\n return ONNXModelTransformer(model)\n if model_backend == BackendType.OPENVINO:\n from nncf.openvino.graph.model_transformer import OVModelTransformer\n\n return OVModelTransformer(model)\n if model_backend == BackendType.TORCH:\n from nncf.torch.model_transformer import PTModelTransformer\n\n return PTModelTransformer(model)\n raise RuntimeError(\n \"Cannot create backend-specific model transformer because {} is not supported!\".format(model_backend)\n )", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec\n\n return model_fn", "def transform_npu_function(self, _, func: relay.Function) -> relay.Function:\n\n tir_mod, const_dict = _lower_to_tir(func, self.scheduler)\n\n for param in const_dict.keys():\n const_dict[param] = tvm.nd.array(const_dict[param])\n\n compiler_name = \"ethos-u\"\n primfunc = tir_mod[\"main\"]\n primfunc = primfunc.with_attr(\"global_symbol\", func.attrs[\"global_symbol\"])\n primfunc = primfunc.with_attr(\"ethos-u.constants\", const_dict)\n primfunc = primfunc.with_attr(\"target\", tvm.target.Target(compiler_name))\n return primfunc", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec", "def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def _TpuFunction():\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def model_fn_builder(config):\n init_checkpoint = config.init_checkpoint\n coref_model = CorefQAModel(config)\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions, total_loss = coref_model.forward(features, is_training)\n doc_idx, subtoken_map, top_span_starts, top_span_ends, antecedent_starts, antecedent_ends, antecedent_scores = predictions\n tvars = tf.trainable_variables()\n initialized_variables = {}\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, initialized_variables = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if config.use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \", *INIT_FROM_CKPT*\" if var.name in initialized_variables else \"\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = create_custom_optimizer(total_loss, config)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(loss):\n return {\"eval_loss\": tf.metrics.mean(loss)}\n\n eval_metrics = (metric_fn, [total_loss])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"doc_idx\": doc_idx, \"subtoken_map\": subtoken_map,\n \"top_span_starts\": top_span_starts, \"top_span_ends\": top_span_ends,\n \"antecedent_starts\": antecedent_starts, \"antecedent_ends\": antecedent_ends,\n \"antecedent_scores\": antecedent_scores, \"loss\": total_loss},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, model_function):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def model_fn_builder(self, bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def construct_model_fn(problem, optimizer_class, base_optimizer_class,\n eval_weights=None, eval_num_samples=10,\n training_params_class=None,\n training_params_conditioning_class=None,\n base_optimizer_conditioning_class=None):\n def model_fn(features, mode, params):\n \"\"\"Returns a TPU estimator spec for the task at hand.\"\"\"\n problem.initialize_model()\n optimizer = optimizer_class(problem, batch_size=params[\"batch_size\"])\n training_params = training_params_class()\n learning_rate_normal = get_learning_rate(training_params)\n separate_conditioning_optimizer = (\n training_params_conditioning_class and base_optimizer_conditioning_class\n and isinstance(optimizer,\n optimizers.MultiLossOptimizerWithConditioning))\n if not separate_conditioning_optimizer and (\n training_params_conditioning_class\n or base_optimizer_conditioning_class):\n raise ValueError(\"training_params_conditioning_class and \"\n \"base_optimizer_conditioning_class should be provided \"\n \"together and only when the optimizer is \"\n \"MultiLossOptimizerWithConditioning.\")\n\n tf.logging.info(\"separate_conditioning_optimizer: %s\",\n separate_conditioning_optimizer)\n\n if separate_conditioning_optimizer:\n training_params_conditioning = training_params_conditioning_class()\n learning_rate_conditioning = get_learning_rate(\n training_params_conditioning)\n\n if mode == tf_estimator.ModeKeys.TRAIN:\n\n base_optimizer = get_optimizer(base_optimizer_class, learning_rate_normal,\n params[\"use_tpu\"])\n if separate_conditioning_optimizer:\n base_optimizer_conditioning = get_optimizer(\n base_optimizer_conditioning_class, learning_rate_conditioning,\n params[\"use_tpu\"])\n loss, opt_step = optimizer.compute_train_loss_and_update_op(\n features, base_optimizer, base_optimizer_conditioning)\n all_vars_str = \"\\n\".join([str(v) for v in optimizer.all_vars])\n normal_vars_str = \"\\n\".join([str(v) for v in optimizer.normal_vars])\n conditioning_vars_str = \"\\n\".join([str(v) for\n v in optimizer.conditioning_vars])\n tf.logging.info(\"\\n\\nall_vars\\n %s\", all_vars_str)\n tf.logging.info(\"\\n\\nnormal_vars\\n %s\", normal_vars_str)\n tf.logging.info(\"\\n\\nconditioning_vars\\n %s\", conditioning_vars_str)\n else:\n loss, opt_step = optimizer.compute_train_loss_and_update_op(\n features, base_optimizer)\n\n # weight decay op\n decay_op = get_decay_op(training_params.weight_decay,\n learning_rate_normal, opt_step,\n vars_to_decay=optimizer.normal_vars)\n if separate_conditioning_optimizer:\n decay_op_conditioning = get_decay_op(\n training_params_conditioning.weight_decay,\n learning_rate_conditioning,\n opt_step, vars_to_decay=optimizer.conditioning_vars)\n decay_op = tf.group([decay_op, decay_op_conditioning])\n # batch norm update ops\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n train_op = tf.group([opt_step, decay_op] + update_ops)\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n elif mode == tf_estimator.ModeKeys.EVAL:\n def unstack_metrics(**metrics):\n \"\"\"Unstack separate metrics from one big aggregate tensor.\n\n This is needed because otherwise evaluation on TPU with many metrics\n gets horribly slow. Concatenating all metrics into one tensor makes\n things much better.\n\n Args:\n **metrics: Dict[ Str: tf.Tensor ]. Dictionary with one element, for\n which the key the concatenation of all metric names separated by \"!\"\n and the value are all metric values stacked along axis 1.\n\n Returns:\n metrics_dict: Dict[ Str: tf.Tensor ]. Dictionary mapping metrics names\n to tensors with their per-sample values.\n \"\"\"\n if len(metrics) != 1:\n raise ValueError(\"Stacked metrics dict should have one element, got \"\n \"{}\".format(len(metrics)))\n names_stacked = list(metrics.keys())[0]\n values_stacked = metrics[names_stacked]\n names = names_stacked.split(\"!\")\n values = tf.unstack(values_stacked, axis=1)\n return {name: tf.metrics.mean(value) for name, value in\n zip(names, values)}\n\n loss = optimizer.compute_eval_loss(features)\n\n if isinstance(optimizer, optimizers.MultiLossOptimizerWithConditioning):\n sampled_weights = distributions.get_samples_as_dicts(\n eval_weights, num_samples=eval_num_samples,\n names=problem.losses_keys, seed=17)\n all_metrics = {}\n for idx, weights in enumerate(sampled_weights):\n with tf.variable_scope(\"\", reuse=tf.AUTO_REUSE):\n losses_id, metrics_id = \\\n optimizer.compute_eval_losses_and_metrics_for_weights(features,\n weights)\n all_metrics.update({\"{}/{}\".format(key, idx): value\n for key, value in losses_id.items()})\n all_metrics.update({\"{}/{}\".format(key, idx): value\n for key, value in metrics_id.items()})\n full_loss = 0.\n for loss_name in losses_id.keys():\n full_loss += weights[loss_name] * losses_id[loss_name]\n all_metrics.update({\"full_loss/{}\".format(idx): full_loss})\n else:\n with tf.variable_scope(\"\", reuse=tf.AUTO_REUSE):\n losses, metrics = problem.losses_and_metrics(features, training=False)\n all_metrics = losses\n all_metrics.update(metrics)\n metrics_shape_out = all_metrics[list(all_metrics.keys())[0]].get_shape()\n # Need this broadcasting because on TPU all output tensors should have\n # the same shape\n all_metrics.update(\n {\"learning_rate_normal\": tf.broadcast_to(\n learning_rate_normal, metrics_shape_out)})\n if separate_conditioning_optimizer:\n all_metrics.update(\n {\"learning_rate_conditioning\": tf.broadcast_to(\n learning_rate_conditioning, metrics_shape_out)})\n # Stacking all metrics for efficiency (otherwise eval is horribly slow)\n sorted_keys = sorted(all_metrics.keys())\n sorted_values = [all_metrics[key] for key in sorted_keys]\n metrics_stacked = {\"!\".join(sorted_keys): tf.stack(sorted_values, axis=1)}\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(unstack_metrics, metrics_stacked))\n else:\n raise ValueError(\"Unknown mode: {}\".format(mode))\n\n return model_fn", "def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n scaffold_fn = None\n initialized_variable_names = []\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs,\n \"label_ids\": label_ids,\n },\n scaffold_fn=scaffold_fn)\n\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def cudify(fn):\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n result = fn(*args, **kwargs)\n return cuda_if_gpu(result)\n\n return wrapper" ]
[ "0.65870714", "0.65342593", "0.6415281", "0.640519", "0.62561655", "0.62051016", "0.6142609", "0.61087984", "0.60520357", "0.6005414", "0.60032", "0.59343463", "0.5921695", "0.591066", "0.5891964", "0.5812297", "0.58100814", "0.5809237", "0.578478", "0.5782048", "0.5770619", "0.5714441", "0.57048035", "0.56787795", "0.5643806", "0.5636934", "0.5607809", "0.5596778", "0.5595365", "0.5589659" ]
0.67717654
0
A Estimator `model_fn` for TPUEstimator.
def _model_fn(features, labels, mode, config, params): with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) if mode != model_fn_lib.ModeKeys.PREDICT: is_export_mode = False else: # For export_savedmodel, input_fn is never passed to Estimator. So, by # checking the self._is_input_fn_invoked bit, we can know, given the # mode == PREDICT, it is the .predict API, not export_savedmodel API. if self._is_input_fn_invoked: is_export_mode = False else: is_export_mode = True # Clear the bit. self._is_input_fn_invoked = None if ctx.is_running_on_cpu(is_export_mode=is_export_mode): logging.info('Running %s on CPU', mode) return model_fn_wrapper.call_without_tpu( features, labels, is_export_mode=is_export_mode) assert labels is None, '`labels` passed to `model_fn` must be `None`.' # TPUEstimator._call_input_fn passes `input_fn` as features to here. assert callable(features), '`input_fn` is not callable.' input_fn = features input_holders = _InputPipeline(input_fn, batch_axis, ctx) enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) graph = ops.get_default_graph() for enqueue_op in enqueue_ops: if isinstance(enqueue_op, list): graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) else: graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) if mode == model_fn_lib.ModeKeys.TRAIN: loss, host_call, scaffold = ( _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) host_ops = host_call.create_tpu_hostcall() if host_ops is None: host_ops = [] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), ExamplesPerSecondHook(ctx.global_batch_size, output_dir=self.model_dir), InstallSignalHandlerHook(), training.LoggingTensorHook( { 'loss': array_ops.identity(loss), 'step': training.get_global_step() }, every_n_secs=30) ] + input_hooks chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): chief_hooks.append( training.CheckpointSaverHook( self.model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, steps_per_run=self._config.tpu_config.iterations_per_loop, scaffold=scaffold)) summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) with ops.control_dependencies([loss]): update_ops = _sync_variables_ops() # Validate the TPU training graph to catch basic errors _validate_tpu_training_graph() train_op = control_flow_ops.group(*update_ops) graph.add_to_collection(_TPU_TRAIN_OP, train_op) return model_fn_lib.EstimatorSpec( mode, loss=loss, training_chief_hooks=chief_hooks, training_hooks=hooks, train_op=train_op, scaffold=scaffold) if mode == model_fn_lib.ModeKeys.EVAL: total_loss, host_calls, scaffold = _eval_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) iterations_per_loop_var = _create_or_get_iterations_per_loop() mean_loss = math_ops.div(total_loss, math_ops.cast( iterations_per_loop_var, dtype=total_loss.dtype)) # Creates a dummy metric update_op for all metrics. Estimator expects # all metrics in eval_metric_ops have update_op and calls them one by # one. The real metric update_ops are invoked in a separated thread. # So, here give Estimator the dummy op for all metrics. with ops.control_dependencies([mean_loss]): # After TPU evaluation computation is done (the mean_loss tensor), # reads all variables back from TPU and updates the eval step # counter properly internal_ops_to_run = _sync_variables_ops() internal_ops_to_run.append( _increase_eval_step_op(iterations_per_loop_var)) with ops.control_dependencies(internal_ops_to_run): dummy_update_op = control_flow_ops.no_op() host_call_ret = host_calls.create_tpu_hostcall() eval_metric_ops = {} eval_update_ops = [] for k, v in host_call_ret['eval_metrics'].items(): eval_metric_ops[k] = (v[0], dummy_update_op) eval_update_ops.append(v[1]) if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, eval_update_ops + host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, loss=mean_loss, evaluation_hooks=hooks, eval_metric_ops=eval_metric_ops, scaffold=scaffold) # Predict assert mode == model_fn_lib.ModeKeys.PREDICT dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) with ops.control_dependencies([dummy_predict_op]): internal_ops_to_run = _sync_variables_ops() with ops.control_dependencies(internal_ops_to_run): dummy_predict_op = control_flow_ops.no_op() # In train and evaluation, the main TPU program is passed to monitored # training session to run. Infeed enqueue and outfeed dequeue are # executed in side threads. This is not the configuration for # prediction mode. # # For prediction, the Estimator executes the EstimatorSpec.predictions # directly and yield the element (via generator) to call site. So, the # outfeed based prediction must be passed to MonitoredSession directly. # Other parts of the TPU execution are organized as follows. # # 1. All outfeed based Tensors must be grouped with predictions Tensors # to form a single invocation. This avoid the issue we might trigger # multiple outfeeds incorrectly. To achieve this, `host_call` is # placed in control_dependencies of `stopping_signals`, and # `stopping_signals` is passed into _StoppingPredictHook, which sets # the `stopping_signals` as SessionRunArgs. MonitoredSession merges # all SessionRunArgs with the fetch in session.run together. # # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) # are grouped together. They will be launched once and only once in # side threads and they quit naturally according to the SAME stopping # condition. enqueue_ops.append(dummy_predict_op) host_call_ret = host_calls.create_tpu_hostcall() if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] predictions = host_call_ret['predictions'] _verify_cross_hosts_transfer_size( predictions, message=( 'The estimated size for TPUEstimatorSpec.predictions is too ' 'large.')) signals = host_call_ret['signals'] with ops.control_dependencies(host_ops): host_ops = [] # Empty, we do do not need it anymore. scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( signals) predictions = _PaddingSignals.slice_tensor_or_dict( predictions, signals) hooks = [ _StoppingPredictHook(scalar_stopping_signal), TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops, host_ops), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, prediction_hooks=hooks, predictions=predictions, scaffold=scaffold)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _augment_model_fn(self, model_fn, batch_axis):\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)\n\n return _model_fn", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits\n ))\n\n def metric_fn(labels, logits):\n labels = tf.cast(labels, tf.int64)\n return {\n \"recall@1\": tf.metrics.recall_at_k(labels, logits, 1),\n \"recall@5\": tf.metrics.recall_at_k(labels, logits, 5)\n }\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metrics=(metric_fn, [labels, logits]))\n\n elif mode == tf_estimator.ModeKeys.TRAIN:\n\n optimizer = tf.train.AdamOptimizer(\n learning_rate=params[\"learning_rate\"], beta1=params[\"beta1\"],\n beta2=params[\"beta2\"], epsilon=params[\"epsilon\"])\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels,\n logits=logits,\n ))\n\n train_op = optimizer.minimize(loss, tf.train.get_global_step())\n\n return tf_estimator.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, train_op=train_op)\n\n else:\n raise NotImplementedError\n return _model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def _call_model_fn(self, features, labels, is_export_mode=False):\n model_fn_args = util.fn_args(self._model_fn)\n kwargs = {}\n\n # Makes deep copy with `config` and params` in case user mutates them.\n config = copy.deepcopy(self._config)\n params = copy.deepcopy(self._params)\n\n if 'labels' in model_fn_args:\n kwargs['labels'] = labels\n elif labels is not None:\n raise ValueError(\n 'model_fn does not take labels, but input_fn returns labels.')\n if 'mode' in model_fn_args:\n kwargs['mode'] = self._ctx.mode\n if 'config' in model_fn_args:\n kwargs['config'] = config\n if 'params' in model_fn_args:\n kwargs['params'] = params\n\n if 'params' not in model_fn_args:\n raise ValueError('model_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\\'batch_size\\']'.format(self._model_fn))\n\n if is_export_mode:\n batch_size_for_model_fn = None\n else:\n batch_size_for_model_fn = self._ctx.batch_size_for_model_fn\n\n if batch_size_for_model_fn is not None:\n if isinstance(params, hparam.HParams):\n params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)\n else:\n params[_BATCH_SIZE_KEY] = batch_size_for_model_fn\n\n estimator_spec = self._model_fn(features=features, **kwargs)\n if (self._ctx.is_running_on_cpu(is_export_mode) and\n isinstance(estimator_spec, TPUEstimatorSpec)):\n # The estimator_spec will be passed to `Estimator` directly, which expects\n # type `EstimatorSpec`.\n return estimator_spec.as_estimator_spec()\n else:\n return estimator_spec", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def create_tpu_estimator(model_fn, feature_columns, params):\n\n tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(\n params[\"tpu\"],\n zone=params[\"tpu_zone\"],\n project=params[\"gcp_project\"],\n coordinator_name=\"coordinator\")\n\n config = tf_estimator.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=params[\"model_dir\"],\n tpu_config=tf_estimator.tpu.TPUConfig(\n iterations_per_loop=params[\"iterations_per_loop\"],\n experimental_host_call_every_n_steps=100,\n per_host_input_for_training=tf_estimator.tpu.InputPipelineConfig\n .PER_HOST_V2))\n\n return tf_estimator.tpu.TPUEstimator(\n use_tpu=params[\"use_tpu\"],\n model_fn=model_fn,\n config=config,\n train_batch_size=params[\"global_batch_size\"],\n eval_batch_size=params[\"eval_global_batch_size\"],\n params=params,\n embedding_config_spec=tf_estimator.tpu.experimental.EmbeddingConfigSpec(\n feature_columns=feature_columns,\n pipeline_execution_with_tensor_core=params[\"pipeline_execution\"],\n optimization_parameters=tf.tpu.experimental.AdagradParameters(\n learning_rate=params[\"learning_rate\"],\n use_gradient_accumulation=params[\"use_gradient_accumulation\"])))", "def model_fn_builder(num_labels, learning_rate, num_train_steps,\n num_warmup_steps):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(num_labels, learning_rate, num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(bert_model_hub, num_labels, learning_rate,\n num_train_steps, num_warmup_steps):\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Return the actual model function in the closure\n return model_fn", "def model_fn_builder(config: electra_files.configure_finetuning.FinetuningConfig, tasks,\n num_train_steps, pretraining_config=None):\n\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n utils.log(\"Building model...\")\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = FinetuningModel(\n config, tasks, is_training, features, num_train_steps)\n\n if pretraining_config is not None:\n # init_checkpoint = tf.train.latest_checkpoint(pretraining_config.model_dir)\n init_checkpoint = pretraining_config['checkpoint']\n utils.log(\"Using checkpoint\", init_checkpoint)\n tvars = tf.trainable_variables()\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, _ = modeling.get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n # Build model for training or prediction\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n model.loss, config.learning_rate, num_train_steps,\n weight_decay_rate=config.weight_decay_rate,\n use_tpu=config.use_tpu,\n warmup_proportion=config.warmup_proportion,\n layerwise_lr_decay_power=config.layerwise_lr_decay,\n n_transformer_layers=model.bert_config.num_hidden_layers\n )\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=model.loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn,\n training_hooks=[training_utils.ETAHook(\n {} if config.use_tpu else dict(loss=model.loss),\n num_train_steps, config.iterations_per_loop, config.use_tpu, 10)])\n else:\n assert mode == tf.estimator.ModeKeys.PREDICT\n output_spec = tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions=utils.flatten_dict(model.outputs),\n scaffold_fn=scaffold_fn)\n\n utils.log(\"Building complete\")\n return output_spec\n\n return model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec\n\n return model_fn", "def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)", "def model_fn_builder(self, bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def model_fn_builder(config):\n def model_fn(features,labels,mode,params):\n \"\"\"The model_fn for Estimator\"\"\"\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec\n return model_fn", "def model_fn_builder(config):\n init_checkpoint = config.init_checkpoint\n coref_model = CorefQAModel(config)\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n predictions, total_loss = coref_model.forward(features, is_training)\n doc_idx, subtoken_map, top_span_starts, top_span_ends, antecedent_starts, antecedent_ends, antecedent_scores = predictions\n tvars = tf.trainable_variables()\n initialized_variables = {}\n scaffold_fn = None\n if init_checkpoint:\n assignment_map, initialized_variables = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if config.use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \", *INIT_FROM_CKPT*\" if var.name in initialized_variables else \"\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = create_custom_optimizer(total_loss, config)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn(loss):\n return {\"eval_loss\": tf.metrics.mean(loss)}\n\n eval_metrics = (metric_fn, [total_loss])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"doc_idx\": doc_idx, \"subtoken_map\": subtoken_map,\n \"top_span_starts\": top_span_starts, \"top_span_ends\": top_span_ends,\n \"antecedent_starts\": antecedent_starts, \"antecedent_ends\": antecedent_ends,\n \"antecedent_scores\": antecedent_scores, \"loss\": total_loss},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, model_function):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = model_function.create(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu,\n scope=(\"loss\" if model_function.freeze else None))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n if model_function.task_type == TaskType.CLASSIFICATION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(label_ids, predictions)\n loss = tf.metrics.mean(per_example_loss)\n return {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n elif model_function.task_type == TaskType.REGRESSION:\n\n def metric_fn(per_example_loss, label_ids, logits):\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(label_ids, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(logits, 1e-8, 1e+30))\n return {\n \"eval_loss\": tf.metrics.mean(per_example_loss),\n \"another_loss\": tf.metrics.mean_squared_error(ground_truth, predictions)\n }\n else:\n raise NotImplementedError()\n\n eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n predictions = {\n \"result\": probabilities\n }\n print(probabilities.shape)\n print(type(probabilities))\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn", "def _model_fn(features, labels, mode):\n weights = None\n if weights_name and weights_name in features:\n weights = features.pop(weights_name)\n\n keys = None\n if keys_name and keys_name in features:\n keys = features.pop(keys_name)\n\n # If we're doing eval, optionally ignore device_assigner.\n # Also ignore device assigner if we're exporting (mode == INFER)\n dev_assn = device_assigner\n if (mode == model_fn_lib.ModeKeys.INFER or\n (local_eval and mode == model_fn_lib.ModeKeys.EVAL)):\n dev_assn = None\n\n graph_builder = graph_builder_class(params,\n device_assigner=dev_assn)\n inference = {}\n output_alternatives = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.INFER):\n inference[eval_metrics.INFERENCE_PROB_NAME] = (\n graph_builder.inference_graph(features))\n\n if params.regression:\n predictions = {\n None: inference[eval_metrics.INFERENCE_PROB_NAME]}\n output_alternatives = {\n None: (constants.ProblemType.LINEAR_REGRESSION, predictions)}\n else:\n inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(\n inference[eval_metrics.INFERENCE_PROB_NAME], 1)\n\n predictions = {\n prediction_key.PredictionKey.PROBABILITIES:\n inference[eval_metrics.INFERENCE_PROB_NAME],\n prediction_key.PredictionKey.CLASSES:\n inference[eval_metrics.INFERENCE_PRED_NAME]}\n output_alternatives = {\n None: (constants.ProblemType.CLASSIFICATION, predictions)}\n\n if report_feature_importances:\n inference[eval_metrics.FEATURE_IMPORTANCE_NAME] = (\n graph_builder.feature_importances())\n\n if keys is not None:\n inference[keys_name] = keys\n\n # labels might be None if we're doing prediction (which brings up the\n # question of why we force everything to adhere to a single model_fn).\n loss_deps = []\n training_graph = None\n training_hooks = []\n scaffold = None\n if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:\n training_graph = control_flow_ops.group(\n graph_builder.training_graph(\n features, labels, input_weights=weights,\n num_trainers=num_trainers,\n trainer_id=trainer_id),\n state_ops.assign_add(contrib_framework.get_global_step(), 1))\n loss_deps.append(training_graph)\n if hasattr(graph_builder, 'finalize_training'):\n finalize_listener = EveryCheckpointPreSaveListener(\n graph_builder.finalize_training())\n scaffold = monitored_session.Scaffold()\n training_hooks.append(\n basic_session_run_hooks.CheckpointSaverHook(\n model_dir, save_secs=600, save_steps=None,\n scaffold=scaffold,\n listeners=[finalize_listener]))\n\n training_loss = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.TRAIN):\n with ops.control_dependencies(loss_deps):\n training_loss = graph_builder.training_loss(\n features, labels, name=LOSS_NAME)\n\n # Put weights back in\n if weights is not None:\n features[weights_name] = weights\n\n if early_stopping_rounds:\n training_hooks.append(TensorForestLossHook(early_stopping_rounds))\n\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=inference,\n loss=training_loss,\n train_op=training_graph,\n training_hooks=training_hooks,\n scaffold=scaffold,\n output_alternatives=output_alternatives)", "def model_fn_builder():\n \n def model_fn(features, labels, mode, params):\n # features name and shape\n _info('*** Features ****')\n for name in sorted(features.keys()):\n tf.logging.info(' name = {}, shape = {}'.format(name, features[name].shape))\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n # get data\n input_x = features['input_x']\n input_mask = features['input_mask']\n if is_training:\n input_y = features['input_y']\n seq_length = features['seq_length']\n else:\n input_y = None\n seq_length = None\n\n # build encoder\n model = BertEncoder(\n config=cg.BertEncoderConfig,\n is_training=is_training,\n input_ids=input_x,\n input_mask=input_mask)\n embedding_table = model.get_embedding_table()\n encoder_output = tf.reduce_sum(model.get_sequence_output(), axis=1)\n\n # build decoder\n decoder_model = Decoder(\n config=cg.DecoderConfig,\n is_training=is_training,\n encoder_state=encoder_output,\n embedding_table=embedding_table,\n decoder_intput_data=input_y,\n seq_length_decoder_input_data=seq_length)\n logits, sample_id, ppl_seq, ppl = decoder_model.get_decoder_output()\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {'sample_id': sample_id, 'ppls': ppl_seq}\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN:\n max_time = ft.get_shape_list(labels, expected_rank=2)[1]\n target_weights = tf.sequence_mask(seq_length, max_time, dtype=logits.dtype)\n batch_size = tf.cast(ft.get_shape_list(labels, expected_rank=2)[0], tf.float32)\n\n loss = tf.reduce_sum(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) * target_weights) / batch_size\n\n learning_rate = tf.train.polynomial_decay(cg.learning_rate,\n tf.train.get_or_create_global_step(),\n cg.train_steps / 100,\n end_learning_rate=1e-4,\n power=1.0,\n cycle=False)\n\n lr = tf.maximum(tf.constant(cg.lr_limit), learning_rate)\n optimizer = tf.train.AdamOptimizer(lr, name='optimizer')\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=cg.colocate_gradients_with_ops)\n clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n\n\n # this is excellent, because it could display the result each step, i.e., each step equals to batch_size.\n # the output_spec, display the result every save checkpoints step.\n logging_hook = tf.train.LoggingTensorHook({'loss' : loss, 'ppl': ppl, 'lr': lr}, every_n_iter=cg.print_info_interval)\n\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op, training_hooks=[logging_hook])\n elif mode == tf.estimator.ModeKeys.EVAL:\n # TODO\n raise NotImplementedError\n \n return output_spec\n \n return model_fn", "def _model_fn(features, labels, mode):\n model.training = False\n logits = model(features)\n logits = tf.cast(logits, tf.float32)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=logits)\n else:\n eval_metric_ops = RMSE()(logits, labels)\n return tf.estimator.EstimatorSpec(mode=mode, loss=tf.log(1.0), train_op=None,\n eval_metric_ops=eval_metric_ops)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, \n init_checkpoint)\n if use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold() \n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\"unique_id\": unique_ids}\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, \n predictions=predictions, \n scaffold_fn=scaffold_fn)\n return output_spec", "def model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n logits = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"prediction\": tf.argmax(logits, axis=-1),\n }\n output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n else:\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=3, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n label = features[\"label\"]\n\n loss = compute_loss(logits, label)\n predicted_classes = tf.argmax(logits, axis=-1)\n accuracy = tf.metrics.accuracy(labels=label, predictions=predicted_classes, name='acc_op')\n\n # global global_acc_list\n # global_acc_list.append(accuracy)\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n scaffold_fn = None\n initialized_variable_names = []\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs,\n \"label_ids\": label_ids,\n },\n scaffold_fn=scaffold_fn)\n\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def build_model_fn_optimizer():\n # TODO(anjalisridhar): Move this inside the model_fn once OptimizerV2 is\n # done?\n optimizer = tf.train.GradientDescentOptimizer(0.2)\n\n def model_fn(features, labels, mode): # pylint: disable=unused-argument\n \"\"\"model_fn which uses a single unit Dense layer.\"\"\"\n # You can also use the Flatten layer if you want to test a model without any\n # weights.\n layer = tf.layers.Dense(1, use_bias=True)\n logits = tf.reduce_mean(layer(tf.cast(features[\"input_ids\"], tf.float32)))/1000\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\"logits\": logits}\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n def loss_fn():\n y = tf.reshape(logits, []) - tf.constant(1.)\n return y * y\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn())\n\n assert mode == tf.estimator.ModeKeys.TRAIN\n\n global_step = tf.train.get_global_step()\n train_op = optimizer.minimize(loss_fn(), global_step=global_step)\n return tf.estimator.EstimatorSpec(mode, loss=loss_fn(), train_op=train_op)\n\n return model_fn", "def model_fn(features,labels,mode,params):\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec", "def build_model_fn(self):", "def model_fn(features, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()", "def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n (total_loss, per_example_loss, log_probs) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n scaffold_fn = None\n initialized_variable_names = []\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.PREDICT:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\n \"log_probs\": log_probs,\n \"label_ids\": label_ids,\n },\n scaffold_fn=scaffold_fn)\n\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn" ]
[ "0.7530073", "0.71472275", "0.71315056", "0.7064388", "0.6985274", "0.69217014", "0.69103295", "0.6909028", "0.68506837", "0.67912245", "0.67624724", "0.67304444", "0.6727382", "0.6604433", "0.6592421", "0.65798515", "0.6450492", "0.6436315", "0.64316684", "0.63801944", "0.63746226", "0.6352861", "0.6319128", "0.6305287", "0.6278894", "0.62695444", "0.6265536", "0.6249122", "0.62282425", "0.61984205" ]
0.73766655
1
Executes `model_fn_wrapper` multiple times on all TPU shards.
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_eval_step, host_calls, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)) def multi_tpu_eval_steps_on_single_shard(): return training_loop.repeat( iterations_per_loop_var, single_tpu_eval_step, [_ZERO_LOSS]) (loss,) = tpu.shard( multi_tpu_eval_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) scaffold = _get_scaffold(captured_scaffold_fn) return loss, host_calls, scaffold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_train_step, host_call, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n def multi_tpu_train_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_train_step, [_INITIAL_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_train_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_call, scaffold", "def _TpuFunction():\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )", "def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n num_cores = ctx.num_cores\n\n single_tpu_predict_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))\n\n def multi_tpu_predict_steps_on_single_shard():\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n inputs = [_StopSignals.NON_STOPPING_SIGNAL]\n outputs = training_loop.while_loop(\n cond, single_tpu_predict_step, inputs=inputs, name=b'loop')\n return outputs\n\n (dummy_predict_op,) = tpu.shard(\n multi_tpu_predict_steps_on_single_shard,\n inputs=[],\n num_shards=num_cores,\n outputs_from_all_shards=False)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return dummy_predict_op, host_calls, scaffold", "def run_all_tests():\n model_configs = (model_handler.ModelConfig(\n saved_model_dir=platform_test.test_src_dir_path(\n \"python/compiler/tensorrt/model_tests/sample_model\"),\n default_batch_size=128),)\n if FLAGS.use_tf2:\n model_handler_cls = model_handler.ModelHandlerV2\n trt_model_handeler_cls = model_handler.TrtModelHandlerV2\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=True)\n else:\n model_handler_cls = model_handler.ModelHandlerV1\n trt_model_handeler_cls = model_handler.TrtModelHandlerV1\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=False)\n for model_config in model_configs:\n trt_convert_params = default_trt_convert_params._replace(\n max_batch_size=model_config.default_batch_size)\n base_model = model_handler_cls(model_config)\n random_inputs = base_model.generate_random_inputs()\n base_model_result = base_model.run(random_inputs)\n trt_fp32_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP32)).run(random_inputs)\n trt_fp16_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP16)).run(random_inputs)\n\n logging.info(\"Base model latency: %f ms\",\n _get_mean_latency(base_model_result))\n logging.info(\"TensorRT FP32 model latency: %f ms\",\n _get_mean_latency(trt_fp32_model_result))\n logging.info(\"TensorRT FP16 model latency: %f ms\",\n _get_mean_latency(trt_fp16_model_result))", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def __call__(self, fn, *args, **kwargs):\n # construct lists of args and kwargs for each function\n if args:\n my_args = _transpose_list_of_lists(\n [_maybe_repeat(arg, self.n) for arg in args])\n else:\n my_args = [[] for _ in range(self.n)]\n my_kwargs = [{} for _ in range(self.n)]\n for k, v in six.iteritems(kwargs):\n vals = _maybe_repeat(v, self.n)\n for i in range(self.n):\n my_kwargs[i][k] = vals[i]\n\n # construct lists of functions\n fns = _maybe_repeat(fn, self.n)\n\n # apply fns\n outputs = []\n cache = {}\n load = dict([(d, 0) for d in self._devices])\n for device_id, device in enumerate(self._devices):\n\n def daisy_chain_getter(getter, name, *args, **kwargs):\n \"\"\"Get a variable and cache in a daisy chain.\"\"\"\n device_var_key = (device, name)\n if device_var_key in cache:\n # if we have the variable on the correct device, return it.\n return cache[device_var_key]\n if name in cache:\n # if we have it on a different device, copy it from the last device\n v = tf.identity(cache[name])\n else:\n var = getter(name, *args, **kwargs)\n v = tf.identity(var._ref()) # pylint: disable=protected-access\n # update the cache\n cache[name] = v\n cache[device_var_key] = v\n return v\n\n def balanced_device_setter(op):\n \"\"\"Balance variables to all devices.\"\"\"\n if op.type in {'Variable', 'VariableV2', 'VarHandleOp'}:\n # return self._sync_device\n min_load = min(load.values())\n min_load_devices = [d for d in load if load[d] == min_load]\n chosen_device = random.choice(min_load_devices)\n load[chosen_device] += op.outputs[0].get_shape().num_elements()\n return chosen_device\n return device\n\n def identity_device_setter(op):\n return device\n\n if self._mode == ModeKeys.TRAIN:\n custom_getter = daisy_chain_getter\n # device_setter = balanced_device_setter\n device_setter = device\n else:\n custom_getter = None\n device_setter = device\n\n # with tf.name_scope(\"parallel_{}\".format(device_id)):\n with tf.variable_scope(\n tf.get_variable_scope(),\n reuse=True if device_id > 0 or self._reuse else None,\n custom_getter=custom_getter):\n with tf.device(device_setter):\n outputs.append(fns[device_id](*my_args[device_id], **my_kwargs[device_id]))\n\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def _GetHostTrainLoop(\n self, strategy: tf.distribute.TPUStrategy\n ) -> Callable[..., Any]:\n replicas_per_host = strategy.extended.num_replicas_per_host\n\n def Split(batch, replicas_per_host, axis=0):\n \"\"\"Splits a NestedMap into replicas_per_host pieces.\"\"\"\n def _SplitFn(t):\n return tf.sparse.split if isinstance(t, tf.SparseTensor) else tf.split\n\n split = batch.Transform(lambda t: _SplitFn(t)(t, replicas_per_host, axis))\n return [\n nest.map_structure_up_to(batch, lambda t: t[i], split) # pylint: disable=cell-var-from-loop\n for i in range(replicas_per_host)\n ]\n\n def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n \"\"\"Fetch and shard one batch per attached device.\"\"\"\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )\n\n def _Step(batch: py_utils.NestedMap):\n \"\"\"A single forward/backward step.\n\n Processes the given input batch and updates the distributed metrics\n accumulator. We use FProp (instead of FPropDefaultTheta) and\n _BPropForVariables (instead of BProp) in order to permit the tf.distribute\n library to handle threading values across devices.\n\n Args:\n batch: NestedMap of input batch data.\n \"\"\"\n with tf.name_scope('tpu_train'):\n with py_utils.GradientTape(persistent=True):\n batch.Update(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Dequeue(batch)\n )\n metrics_dict, _ = self.task.FPropDefaultTheta(batch)\n # py_utils.ComputeGradientsSimple() needs to access the tape, so BProp\n # needs to be within the GradientTape context.\n self.task.BProp()\n\n self._metrics_dict_structure = metrics_dict\n self._metrics_mgr.AccumulateStepMetrics(metrics_dict)\n\n @tf.function\n def _TpuFunction():\n \"\"\"Runs several training steps and returns a flattened metrics list.\"\"\"\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )\n\n # Trace the train function so it can create the optimizer slot vars and save\n # them at step 0.\n return _TpuFunction.get_concrete_function()", "def run_fn(fn_args: TrainerFnArgs):\n\n # Training set size\n TRAIN_SIZE = get_dataset_size(fn_args.train_files)\n NUM_STEPS = TRAIN_SIZE / BATCH_SIZE # number of steps per epoch for which to train model\n \n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n \n train_dataset = _input_fn(fn_args.train_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n\n model = _build_keras_model(\n tf_transform_output,\n hidden_units=[HIDDEN_UNITS_1, HIDDEN_UNITS_2, HIDDEN_UNITS_3],\n learning_rate=LEARNING_RATE)\n\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n \n model.fit(\n train_dataset,\n epochs=NUM_EPOCHS, \n steps_per_epoch=NUM_STEPS,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n LABEL_COLUMN,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n print(f\"Parameters {fn_args}\")\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n batch_size=fn_args.train_batches)\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n batch_size=fn_args.eval_batches)\n\n # mirrored_strategy = tf.distribute.MirroredStrategy()\n # with mirrored_strategy.scope():\n model = encoder_decoder_model.build_keras_model(\n timesteps=fn_args.timesteps,\n number_features=fn_args.number_features,\n outer_units=fn_args.outer_units,\n inner_units=fn_args.inner_units)\n\n steps_per_epoch = fn_args.training_example_count / fn_args.train_batches\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard()\n\n model.fit(\n train_dataset,\n epochs=int(fn_args.train_steps / steps_per_epoch),\n steps_per_epoch=steps_per_epoch,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default': _get_serve_tf_examples_fn(\n model, tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n\n model.save(\n fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def _model_compute_all(self, inputs):\n\n return self.model.compute_all(inputs)", "def _augment_model_fn(self, model_fn, batch_axis):\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)\n\n return _model_fn", "def train_models(self):\n\n #keep track on the number of iterations (needed to scale lambda)\n nr_iteration = 0\n \n for epoch in range(self.epochs):\n start = time.time()\n print()\n print(epoch + 1)\n print()\n for step, batch in enumerate(self.training_data):\n X_batch = normalize_images(tf.cast(batch[0], 'float32'))\n Y_batch = batch[1]\n Z_batch = self.ae_model.encode(X_batch)\n \n self.train_step_disc(Z_batch, Y_batch)\n # Call only one tf.function when tracing.\n #ADD LAMBDA SCHEDULE ACCORDING TO OUR EXPERIMENTS AND EPOCH LENGTH\n self.scale_lambda(self.lambda_e, nr_iteration)\n self.train_step_ae(X_batch, Y_batch, Z_batch)\n\n nr_iteration += 1\n end = time.time()\n print(\"Epoch \" + str(epoch + 1) + \" takes \" + str(end - start))", "def __call__(self, *args, **kwargs):\n\n def replica_local_fn(*args, **kwargs):\n \"\"\"Updates the state of the metric in a replica-local context.\"\"\"\n if any(\n isinstance(arg, keras_tensor.KerasTensor)\n for arg in nest.flatten((args, kwargs))):\n update_op = None\n else:\n update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable\n update_ops = []\n if update_op is not None:\n update_ops.append(update_op)\n with ops.control_dependencies(update_ops):\n result_t = self.result() # pylint: disable=not-callable\n\n # We are adding the metric object as metadata on the result tensor.\n # This is required when we want to use a metric with `add_metric` API on\n # a Model/Layer in graph mode. This metric instance will later be used\n # to reset variable state after each epoch of training.\n # Example:\n # model = Model()\n # mean = Mean()\n # model.add_metric(mean(values), name='mean')\n result_t._metric_obj = self # pylint: disable=protected-access\n return result_t\n\n from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top\n return distributed_training_utils.call_replica_local_fn(\n replica_local_fn, *args, **kwargs)", "def __call__(self, fn, *args, **kwargs):\n # Construct lists or args and kwargs for each function.\n if args:\n my_args = transpose_list_of_lists(\n [self._maybe_repeat(arg) for arg in args])\n else:\n my_args = [[] for _ in range(self.n)]\n my_kwargs = [{} for _ in range(self.n)]\n for k, v in six.iteritems(kwargs):\n vals = self._maybe_repeat(v)\n for i in range(self.n):\n my_kwargs[i][k] = vals[i]\n\n # Construct lists of functions.\n fns = self._maybe_repeat(fn)\n\n # Now make the parallel call.\n outputs = []\n cache = {}\n tensor_to_var = {}\n for i in range(self.n):\n\n def daisy_chain_getter(getter, name, *args, **kwargs):\n \"\"\"Get a variable and cache in a daisy chain.\"\"\"\n device_var_key = (self._devices[i], name)\n if device_var_key in cache:\n # if we have the variable on the correct device, return it.\n return cache[device_var_key]\n if name in cache:\n # if we have it on a different device, copy it from the last device\n last_device_v = cache[name]\n var = tensor_to_var[last_device_v]\n v = tf.identity(last_device_v)\n else:\n var = getter(name, *args, **kwargs)\n # v = tf.identity(var._ref()) # pylint: disable=protected-access\n v = var.read_value()\n\n # keep track of the original variable\n tensor_to_var[v] = var\n _add_variable_proxy_methods(tensor_to_var[v], v)\n # update the cache\n cache[name] = v\n cache[device_var_key] = v\n return v\n\n # Variable scope will not reset caching_device on reused variables,\n # so we make a custom getter that uses identity to cache the variable.\n # pylint: disable=cell-var-from-loop\n def caching_getter(getter, name, *args, **kwargs):\n \"\"\"Cache variables on device.\"\"\"\n key = (self._caching_devices[i], name)\n if key in cache:\n return cache[key]\n\n v = getter(name, *args, **kwargs)\n with tf.device(self._caching_devices[i]):\n # ret = tf.identity(v._ref()) # pylint: disable=protected-access\n ret = v.read_value()\n _add_variable_proxy_methods(v, ret)\n cache[key] = ret\n return ret\n\n if self._daisy_chain_variables:\n custom_getter = daisy_chain_getter\n elif self._caching_devices[i]:\n custom_getter = caching_getter\n else:\n custom_getter = None\n # pylint: enable=cell-var-from-loop\n with tf.name_scope(\"parallel_%d\" % i):\n with tf.variable_scope(\n tf.get_variable_scope() if self._reuse else \"parallel_%d\" % i,\n reuse=True if i > 0 and self._reuse else None,\n caching_device=self._caching_devices[i],\n custom_getter=custom_getter):\n # TODO(noam, epot, avaswani)\n # Allows for passing no device in case you want to default to the\n # existing device. This is needed when we put all experts on a single\n # device, for example in local_moe.\n if self._devices[i] != DEFAULT_DEV_STRING:\n with tf.device(self._devices[i]):\n outputs.append(fns[i](*my_args[i], **my_kwargs[i]))\n else:\n outputs.append(fns[i](*my_args[i], **my_kwargs[i]))\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def test_custom_distributed_multimodel_training():\n testcol = testcol_cust_dist_multi\n conn = pm.MongoClient(host=testhost,\n port=testport)\n\n # set up the parameters\n params = {}\n\n model1_params = {'func': model.mnist_tfutils,\n 'devices': [0, 1]}\n model2_params = {'func': model.mnist_tfutils,\n 'devices': [2, 3]}\n\n save_params = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0',\n 'save_valid_freq': 20,\n 'save_filters_freq': 200,\n 'cache_filters_freq': 100}\n\n train1_params = {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'train',\n 'n_threads': 4},\n 'train_loop': {'func': custom_train_loop},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 500}\n\n train2_params = {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'train',\n 'n_threads': 4},\n 'train_loop': {'func': custom_train_loop},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 500}\n\n loss_params = {'targets': ['labels'],\n 'agg_func': tf.reduce_mean,\n 'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}\n\n learning_rate_params = {'learning_rate': 0.05,\n 'decay_steps': num_batches_per_epoch,\n 'decay_rate': 0.95,\n 'staircase': True}\n\n validation_params = {'valid0': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 10,\n 'agg_func': utils.mean_dict}}\n optimizer_params = {'func': optimizer.ClipOptimizer,\n 'optimizer_class': tf.train.MomentumOptimizer,\n 'clip': True,\n 'optimizer_kwargs':{'momentum': 0.9}}\n\n load_params = {'do_restore': True}\n\n model_params = [model1_params, model2_params]\n train_params = [train1_params, train2_params]\n num_models = len(model_params)\n\n params['skip_check'] = True\n params['load_params'] = load_params\n params['loss_params'] = loss_params\n params['save_params'] = save_params\n params['model_params'] = model_params\n params['train_params'] = train_params\n params['optimizer_params'] = optimizer_params\n params['validation_params'] = validation_params\n params['learning_rate_params'] = learning_rate_params\n\n # actually run the training\n base.train_from_params(**params)", "def with_cpu(ops, model):\n ...", "def input_fn(self, params):\n with tf.variable_scope('data_provider'):\n if self.mode == enums.ModelMode.INFERENCE:\n images = tf.placeholder(tf.float32, [\n None, self.preprocessor.preprocessing_options.image_size,\n self.preprocessor.preprocessing_options.image_size, 3\n ])\n return tf_estimator.export.TensorServingInputReceiver(\n features=images, receiver_tensors=images)\n\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # tf.contrib.tpu.RunConfig for details.\n batch_size = params['batch_size']\n\n if 'context' in params:\n current_host = params['context'].current_input_fn_deployment()[1]\n num_hosts = params['context'].num_hosts\n num_cores = params['context'].num_replicas\n else:\n current_host = 0\n num_hosts = 1\n num_cores = 1\n\n dataset = self.make_source_dataset(current_host, num_hosts)\n\n if (self.mode == enums.ModelMode.TRAIN and self.max_samples and\n self.max_samples > 0):\n dataset = dataset.take(self.max_samples)\n\n dataset = dataset.map(self.dataset_parser, num_parallel_calls=num_cores)\n if self.label_noise_prob > 0. and self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.map(\n self._label_noise_fn, num_parallel_calls=num_cores)\n\n if self.cache:\n dataset = dataset.cache()\n if self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.shuffle(self.shuffle_buffer).repeat()\n\n # Use the fused map-and-batch operation.\n #\n # For XLA, we must used fixed shapes. Because we repeat the source\n # training dataset indefinitely, we can use `drop_remainder=True` to get\n # fixed-size batches without dropping any training examples.\n #\n # When evaluating, `drop_remainder=True` prevents accidentally evaluating\n # the same image twice by dropping the final batch if it is less than a\n # full batch size. As long as this validation is done with consistent\n # batch size, exactly the same images will be used.\n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n self._preprocess_image,\n batch_size=batch_size,\n num_parallel_batches=num_cores,\n drop_remainder=True))\n\n # Assign static batch size dimension\n dataset = dataset.map(\n functools.partial(self._set_static_batch_dim, batch_size))\n\n # Prefetch overlaps in-feed with training\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset", "def _invoke_input_fn_and_record_structure(self):\n enqueue_ops = []\n infeed_queues = []\n all_hooks = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n else:\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id))\n else:\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n all_hooks.extend(hooks)\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if is_dataset:\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def __call__(self, params):\n logging.info('Running __call__ function...')\n batch_size = self._train_batch_size\n # For MCTS, the number of features for each trajecotry is unknown beforehand\n num_features = None\n\n if self._global_step_value % self._iterations_per_loop == 0:\n logging.info('Update iterator (gs=%d)...', self._global_step_value)\n # Feature/Labels Placeholders\n self.features_ph = {\n 'mcts_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='mcts_state_ph'),\n 'policy_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='policy_state_ph'),\n }\n self.labels_ph = {\n 'action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='action_ph'),\n 'value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='value_ph'),\n 'return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='return_ph'),\n 'old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='old_neg'),\n 'mean_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='mean_ph'),\n 'logstd_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='logstd_ph'),\n 'mcts_enable_tensor':\n tf.placeholder(\n tf.bool, shape=[num_features], name='mcts_enable_ph'),\n 'policy_action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='policy_action_ph'),\n 'policy_value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_value_ph'),\n 'policy_return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_return_ph'),\n 'policy_old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_old_neg'),\n }\n # Create the dataset\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.features_ph, self.labels_ph))\n dataset = dataset.shuffle(buffer_size=self._max_horizon)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # repeat until the loop is done\n dataset = dataset.repeat()\n if self._use_tpu:\n dataset = dataset.map(functools.partial(self._set_shapes, batch_size))\n dataset = dataset.prefetch(2)\n self._iterator = dataset.make_initializable_iterator()\n return self._iterator.get_next()\n else:\n return self._iterator.get_next()", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def run_inference(dataset, model, executor_):\n for batch in dataset:\n results = model.inference(batch)\n for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):\n if stats is not None:\n yield stats\n return", "def init_components(\n self, model_fn=None, criterion_fn=None, optimizer_fn=None, scheduler_fn=None\n ):\n model = model_fn()\n model = self.sync_device(model)\n\n # criterion\n criterion = criterion_fn()\n criterion = self.sync_device(criterion)\n\n # optimizer\n optimizer = optimizer_fn()\n optimizer = self.sync_device(optimizer)\n\n model, optimizer = _wrap_into_data_parallel_with_apex(\n model, optimizer, distributed_params=self.apex_kwargs\n )\n\n # scheduler\n scheduler = scheduler_fn()\n scheduler = self.sync_device(scheduler)\n return model, criterion, optimizer, scheduler", "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def _build_model(self):\n tf.set_random_seed(self.params.tf_random_seed)\n np.random.seed(4321)\n phase_train = not (self.params.eval or self.params.forward_only)\n\n log_fn('Generating model')\n losses = []\n device_grads = []\n all_logits = []\n all_top_1_ops = []\n all_top_5_ops = []\n enqueue_ops = []\n gpu_compute_stage_ops = []\n gpu_grad_stage_ops = []\n\n with tf.device(self.global_step_device):\n global_step = tf.train.get_or_create_global_step()\n \n # Build the processing and model for the worker.\n (image_producer_ops,\n image_producer_stages) = self._build_image_processing(shift_ratio=0)\n image_producer_ops = tf.group(*image_producer_ops)\n update_ops = None\n staging_delta_ops = []\n\n for device_num in range(len(self.devices)):\n with self.variable_mgr.create_outer_variable_scope(\n device_num), tf.name_scope('tower_%i' % device_num) as name_scope:\n results = self.add_forward_pass_and_gradients(\n phase_train, device_num, device_num,\n image_producer_stages[device_num], gpu_compute_stage_ops,\n gpu_grad_stage_ops)\n if phase_train:\n losses.append(results['loss'])\n device_grads.append(results['gradvars'])\n \n\n if device_num == 0:\n # Retain the Batch Normalization updates operations only from the\n # first tower. These operations update the moving mean and moving\n # variance variables, which are updated (but not used) during\n # training, and used during evaluation. The moving mean and variance\n # approximate the true mean and variance across all images in the\n # dataset. Therefore, in replicated mode, these moving averages would\n # be almost identical for each tower, and so we only update and save\n # the moving averages for one tower. In parameter server mode, all\n # towers share a copy of the variables so we also only need to update\n # and save the moving averages once.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)\n staging_delta_ops = list(self.variable_mgr.staging_delta_ops)\n \n enqueue_ops.append(tf.group(*gpu_compute_stage_ops))\n\n fetches = self._build_fetches(global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops,\n all_top_5_ops, phase_train)\n return (image_producer_ops, enqueue_ops, fetches)", "def sampling(X_train, y_train, X_test, y_test, sampling_instances, model_instances, func):\n\n metrics = []\n # go through all sampling methods\n for sampling_instance in sampling_instances:\n if sampling_instance is not None:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = sampling_instance.fit_resample(X=X_train, y=y_train)\n else:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = X_train, y_train\n\n # Go through all models\n for model_instance in model_instances:\n print('fitting model ' + str(model_instances.index(model_instance) + 1) + ' on ' +\n str(len(model_instances)), \" : \", type(model_instance).__name__)\n model_instance.fit(X_train1, y_train1)\n metrics.append(func(y_test, model_instance.predict(X_test)))\n\n models = [type(model).__name__ for model in model_instances]\n methods = [type(sampling).__name__ for sampling in sampling_instances]\n index = [model + '_' + method for model in models for method in methods]\n\n #Dry run of compute metrics with return_index=True to get indexes\n columns = func(y_test, y_test, average='weighted', return_index=True)\n metrics = pd.DataFrame(metrics, columns=columns, index=index)\n\n return metrics", "def translation_rule_cpu(func):\n # functions to call before running the translation rule\n setup_funcs = (\n functools.partial(ensure_platform_flush, \"cpu\"),\n ensure_omnistaging,\n )\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n for f in setup_funcs:\n f()\n return func(*args, **kwargs)\n\n return wrapped", "def apply(self, fn: Callable[[nn.Module], None]) -> \"FullyShardedDataParallel\":\n uninitialized = self._is_root is None\n self._assert_state(TrainingState_.IDLE)\n with self._summon_full_params(recurse=False, writeback=True):\n ret = super().apply(fn)\n\n # Reset lazy init that might be called by _summon_full_params, since\n # it could have set is_root incorrectly for non-root FSDP instances.\n if uninitialized and self._is_root:\n for module in self.fsdp_modules(self):\n module._reset_lazy_init()\n\n return ret", "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue" ]
[ "0.66594964", "0.61444473", "0.5915304", "0.5768129", "0.57181996", "0.56924707", "0.55896854", "0.55495274", "0.5511398", "0.5488238", "0.5486051", "0.5469015", "0.544404", "0.54410255", "0.54396427", "0.5391762", "0.53675765", "0.53656566", "0.5349069", "0.53173107", "0.5289184", "0.5286326", "0.52813745", "0.5257258", "0.5210004", "0.51570165", "0.51488954", "0.51194197", "0.5106247", "0.5105777" ]
0.63976693
1
Executes `model_fn_wrapper` multiple times on all TPU shards.
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_train_step, host_call, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) def multi_tpu_train_steps_on_single_shard(): return training_loop.repeat( iterations_per_loop_var, single_tpu_train_step, [_INITIAL_LOSS]) (loss,) = tpu.shard( multi_tpu_train_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) scaffold = _get_scaffold(captured_scaffold_fn) return loss, host_call, scaffold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_eval_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))\n\n def multi_tpu_eval_steps_on_single_shard():\n return training_loop.repeat(\n iterations_per_loop_var,\n single_tpu_eval_step, [_ZERO_LOSS])\n\n (loss,) = tpu.shard(\n multi_tpu_eval_steps_on_single_shard,\n inputs=[],\n num_shards=ctx.num_replicas,\n outputs_from_all_shards=False,\n device_assignment=ctx.device_assignment)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return loss, host_calls, scaffold", "def _TpuFunction():\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )", "def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n num_cores = ctx.num_cores\n\n single_tpu_predict_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))\n\n def multi_tpu_predict_steps_on_single_shard():\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n inputs = [_StopSignals.NON_STOPPING_SIGNAL]\n outputs = training_loop.while_loop(\n cond, single_tpu_predict_step, inputs=inputs, name=b'loop')\n return outputs\n\n (dummy_predict_op,) = tpu.shard(\n multi_tpu_predict_steps_on_single_shard,\n inputs=[],\n num_shards=num_cores,\n outputs_from_all_shards=False)\n\n scaffold = _get_scaffold(captured_scaffold_fn)\n return dummy_predict_op, host_calls, scaffold", "def run_all_tests():\n model_configs = (model_handler.ModelConfig(\n saved_model_dir=platform_test.test_src_dir_path(\n \"python/compiler/tensorrt/model_tests/sample_model\"),\n default_batch_size=128),)\n if FLAGS.use_tf2:\n model_handler_cls = model_handler.ModelHandlerV2\n trt_model_handeler_cls = model_handler.TrtModelHandlerV2\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=True)\n else:\n model_handler_cls = model_handler.ModelHandlerV1\n trt_model_handeler_cls = model_handler.TrtModelHandlerV1\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=False)\n for model_config in model_configs:\n trt_convert_params = default_trt_convert_params._replace(\n max_batch_size=model_config.default_batch_size)\n base_model = model_handler_cls(model_config)\n random_inputs = base_model.generate_random_inputs()\n base_model_result = base_model.run(random_inputs)\n trt_fp32_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP32)).run(random_inputs)\n trt_fp16_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP16)).run(random_inputs)\n\n logging.info(\"Base model latency: %f ms\",\n _get_mean_latency(base_model_result))\n logging.info(\"TensorRT FP32 model latency: %f ms\",\n _get_mean_latency(trt_fp32_model_result))\n logging.info(\"TensorRT FP16 model latency: %f ms\",\n _get_mean_latency(trt_fp16_model_result))", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def __call__(self, fn, *args, **kwargs):\n # construct lists of args and kwargs for each function\n if args:\n my_args = _transpose_list_of_lists(\n [_maybe_repeat(arg, self.n) for arg in args])\n else:\n my_args = [[] for _ in range(self.n)]\n my_kwargs = [{} for _ in range(self.n)]\n for k, v in six.iteritems(kwargs):\n vals = _maybe_repeat(v, self.n)\n for i in range(self.n):\n my_kwargs[i][k] = vals[i]\n\n # construct lists of functions\n fns = _maybe_repeat(fn, self.n)\n\n # apply fns\n outputs = []\n cache = {}\n load = dict([(d, 0) for d in self._devices])\n for device_id, device in enumerate(self._devices):\n\n def daisy_chain_getter(getter, name, *args, **kwargs):\n \"\"\"Get a variable and cache in a daisy chain.\"\"\"\n device_var_key = (device, name)\n if device_var_key in cache:\n # if we have the variable on the correct device, return it.\n return cache[device_var_key]\n if name in cache:\n # if we have it on a different device, copy it from the last device\n v = tf.identity(cache[name])\n else:\n var = getter(name, *args, **kwargs)\n v = tf.identity(var._ref()) # pylint: disable=protected-access\n # update the cache\n cache[name] = v\n cache[device_var_key] = v\n return v\n\n def balanced_device_setter(op):\n \"\"\"Balance variables to all devices.\"\"\"\n if op.type in {'Variable', 'VariableV2', 'VarHandleOp'}:\n # return self._sync_device\n min_load = min(load.values())\n min_load_devices = [d for d in load if load[d] == min_load]\n chosen_device = random.choice(min_load_devices)\n load[chosen_device] += op.outputs[0].get_shape().num_elements()\n return chosen_device\n return device\n\n def identity_device_setter(op):\n return device\n\n if self._mode == ModeKeys.TRAIN:\n custom_getter = daisy_chain_getter\n # device_setter = balanced_device_setter\n device_setter = device\n else:\n custom_getter = None\n device_setter = device\n\n # with tf.name_scope(\"parallel_{}\".format(device_id)):\n with tf.variable_scope(\n tf.get_variable_scope(),\n reuse=True if device_id > 0 or self._reuse else None,\n custom_getter=custom_getter):\n with tf.device(device_setter):\n outputs.append(fns[device_id](*my_args[device_id], **my_kwargs[device_id]))\n\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def _GetHostTrainLoop(\n self, strategy: tf.distribute.TPUStrategy\n ) -> Callable[..., Any]:\n replicas_per_host = strategy.extended.num_replicas_per_host\n\n def Split(batch, replicas_per_host, axis=0):\n \"\"\"Splits a NestedMap into replicas_per_host pieces.\"\"\"\n def _SplitFn(t):\n return tf.sparse.split if isinstance(t, tf.SparseTensor) else tf.split\n\n split = batch.Transform(lambda t: _SplitFn(t)(t, replicas_per_host, axis))\n return [\n nest.map_structure_up_to(batch, lambda t: t[i], split) # pylint: disable=cell-var-from-loop\n for i in range(replicas_per_host)\n ]\n\n def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n \"\"\"Fetch and shard one batch per attached device.\"\"\"\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )\n\n def _Step(batch: py_utils.NestedMap):\n \"\"\"A single forward/backward step.\n\n Processes the given input batch and updates the distributed metrics\n accumulator. We use FProp (instead of FPropDefaultTheta) and\n _BPropForVariables (instead of BProp) in order to permit the tf.distribute\n library to handle threading values across devices.\n\n Args:\n batch: NestedMap of input batch data.\n \"\"\"\n with tf.name_scope('tpu_train'):\n with py_utils.GradientTape(persistent=True):\n batch.Update(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Dequeue(batch)\n )\n metrics_dict, _ = self.task.FPropDefaultTheta(batch)\n # py_utils.ComputeGradientsSimple() needs to access the tape, so BProp\n # needs to be within the GradientTape context.\n self.task.BProp()\n\n self._metrics_dict_structure = metrics_dict\n self._metrics_mgr.AccumulateStepMetrics(metrics_dict)\n\n @tf.function\n def _TpuFunction():\n \"\"\"Runs several training steps and returns a flattened metrics list.\"\"\"\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )\n\n # Trace the train function so it can create the optimizer slot vars and save\n # them at step 0.\n return _TpuFunction.get_concrete_function()", "def run_fn(fn_args: TrainerFnArgs):\n\n # Training set size\n TRAIN_SIZE = get_dataset_size(fn_args.train_files)\n NUM_STEPS = TRAIN_SIZE / BATCH_SIZE # number of steps per epoch for which to train model\n \n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n \n train_dataset = _input_fn(fn_args.train_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n eval_dataset = _input_fn(fn_args.eval_files, LABEL_COLUMN, tf_transform_output, BATCH_SIZE)\n\n model = _build_keras_model(\n tf_transform_output,\n hidden_units=[HIDDEN_UNITS_1, HIDDEN_UNITS_2, HIDDEN_UNITS_3],\n learning_rate=LEARNING_RATE)\n\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), 'logs')\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=log_dir, update_freq='batch')\n \n model.fit(\n train_dataset,\n epochs=NUM_EPOCHS, \n steps_per_epoch=NUM_STEPS,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default':\n _get_serve_tf_examples_fn(model,\n LABEL_COLUMN,\n tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n print(f\"Parameters {fn_args}\")\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n batch_size=fn_args.train_batches)\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n batch_size=fn_args.eval_batches)\n\n # mirrored_strategy = tf.distribute.MirroredStrategy()\n # with mirrored_strategy.scope():\n model = encoder_decoder_model.build_keras_model(\n timesteps=fn_args.timesteps,\n number_features=fn_args.number_features,\n outer_units=fn_args.outer_units,\n inner_units=fn_args.inner_units)\n\n steps_per_epoch = fn_args.training_example_count / fn_args.train_batches\n\n tensorboard_callback = tf.keras.callbacks.TensorBoard()\n\n model.fit(\n train_dataset,\n epochs=int(fn_args.train_steps / steps_per_epoch),\n steps_per_epoch=steps_per_epoch,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=[tensorboard_callback])\n\n signatures = {\n 'serving_default': _get_serve_tf_examples_fn(\n model, tf_transform_output).get_concrete_function(\n tf.TensorSpec(\n shape=[None],\n dtype=tf.string,\n name='examples')),\n }\n\n model.save(\n fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def _model_compute_all(self, inputs):\n\n return self.model.compute_all(inputs)", "def _augment_model_fn(self, model_fn, batch_axis):\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)\n\n return _model_fn", "def train_models(self):\n\n #keep track on the number of iterations (needed to scale lambda)\n nr_iteration = 0\n \n for epoch in range(self.epochs):\n start = time.time()\n print()\n print(epoch + 1)\n print()\n for step, batch in enumerate(self.training_data):\n X_batch = normalize_images(tf.cast(batch[0], 'float32'))\n Y_batch = batch[1]\n Z_batch = self.ae_model.encode(X_batch)\n \n self.train_step_disc(Z_batch, Y_batch)\n # Call only one tf.function when tracing.\n #ADD LAMBDA SCHEDULE ACCORDING TO OUR EXPERIMENTS AND EPOCH LENGTH\n self.scale_lambda(self.lambda_e, nr_iteration)\n self.train_step_ae(X_batch, Y_batch, Z_batch)\n\n nr_iteration += 1\n end = time.time()\n print(\"Epoch \" + str(epoch + 1) + \" takes \" + str(end - start))", "def __call__(self, *args, **kwargs):\n\n def replica_local_fn(*args, **kwargs):\n \"\"\"Updates the state of the metric in a replica-local context.\"\"\"\n if any(\n isinstance(arg, keras_tensor.KerasTensor)\n for arg in nest.flatten((args, kwargs))):\n update_op = None\n else:\n update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable\n update_ops = []\n if update_op is not None:\n update_ops.append(update_op)\n with ops.control_dependencies(update_ops):\n result_t = self.result() # pylint: disable=not-callable\n\n # We are adding the metric object as metadata on the result tensor.\n # This is required when we want to use a metric with `add_metric` API on\n # a Model/Layer in graph mode. This metric instance will later be used\n # to reset variable state after each epoch of training.\n # Example:\n # model = Model()\n # mean = Mean()\n # model.add_metric(mean(values), name='mean')\n result_t._metric_obj = self # pylint: disable=protected-access\n return result_t\n\n from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top\n return distributed_training_utils.call_replica_local_fn(\n replica_local_fn, *args, **kwargs)", "def __call__(self, fn, *args, **kwargs):\n # Construct lists or args and kwargs for each function.\n if args:\n my_args = transpose_list_of_lists(\n [self._maybe_repeat(arg) for arg in args])\n else:\n my_args = [[] for _ in range(self.n)]\n my_kwargs = [{} for _ in range(self.n)]\n for k, v in six.iteritems(kwargs):\n vals = self._maybe_repeat(v)\n for i in range(self.n):\n my_kwargs[i][k] = vals[i]\n\n # Construct lists of functions.\n fns = self._maybe_repeat(fn)\n\n # Now make the parallel call.\n outputs = []\n cache = {}\n tensor_to_var = {}\n for i in range(self.n):\n\n def daisy_chain_getter(getter, name, *args, **kwargs):\n \"\"\"Get a variable and cache in a daisy chain.\"\"\"\n device_var_key = (self._devices[i], name)\n if device_var_key in cache:\n # if we have the variable on the correct device, return it.\n return cache[device_var_key]\n if name in cache:\n # if we have it on a different device, copy it from the last device\n last_device_v = cache[name]\n var = tensor_to_var[last_device_v]\n v = tf.identity(last_device_v)\n else:\n var = getter(name, *args, **kwargs)\n # v = tf.identity(var._ref()) # pylint: disable=protected-access\n v = var.read_value()\n\n # keep track of the original variable\n tensor_to_var[v] = var\n _add_variable_proxy_methods(tensor_to_var[v], v)\n # update the cache\n cache[name] = v\n cache[device_var_key] = v\n return v\n\n # Variable scope will not reset caching_device on reused variables,\n # so we make a custom getter that uses identity to cache the variable.\n # pylint: disable=cell-var-from-loop\n def caching_getter(getter, name, *args, **kwargs):\n \"\"\"Cache variables on device.\"\"\"\n key = (self._caching_devices[i], name)\n if key in cache:\n return cache[key]\n\n v = getter(name, *args, **kwargs)\n with tf.device(self._caching_devices[i]):\n # ret = tf.identity(v._ref()) # pylint: disable=protected-access\n ret = v.read_value()\n _add_variable_proxy_methods(v, ret)\n cache[key] = ret\n return ret\n\n if self._daisy_chain_variables:\n custom_getter = daisy_chain_getter\n elif self._caching_devices[i]:\n custom_getter = caching_getter\n else:\n custom_getter = None\n # pylint: enable=cell-var-from-loop\n with tf.name_scope(\"parallel_%d\" % i):\n with tf.variable_scope(\n tf.get_variable_scope() if self._reuse else \"parallel_%d\" % i,\n reuse=True if i > 0 and self._reuse else None,\n caching_device=self._caching_devices[i],\n custom_getter=custom_getter):\n # TODO(noam, epot, avaswani)\n # Allows for passing no device in case you want to default to the\n # existing device. This is needed when we put all experts on a single\n # device, for example in local_moe.\n if self._devices[i] != DEFAULT_DEV_STRING:\n with tf.device(self._devices[i]):\n outputs.append(fns[i](*my_args[i], **my_kwargs[i]))\n else:\n outputs.append(fns[i](*my_args[i], **my_kwargs[i]))\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def test_custom_distributed_multimodel_training():\n testcol = testcol_cust_dist_multi\n conn = pm.MongoClient(host=testhost,\n port=testport)\n\n # set up the parameters\n params = {}\n\n model1_params = {'func': model.mnist_tfutils,\n 'devices': [0, 1]}\n model2_params = {'func': model.mnist_tfutils,\n 'devices': [2, 3]}\n\n save_params = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0',\n 'save_valid_freq': 20,\n 'save_filters_freq': 200,\n 'cache_filters_freq': 100}\n\n train1_params = {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'train',\n 'n_threads': 4},\n 'train_loop': {'func': custom_train_loop},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 500}\n\n train2_params = {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'train',\n 'n_threads': 4},\n 'train_loop': {'func': custom_train_loop},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 500}\n\n loss_params = {'targets': ['labels'],\n 'agg_func': tf.reduce_mean,\n 'loss_per_case_func': tf.nn.sparse_softmax_cross_entropy_with_logits}\n\n learning_rate_params = {'learning_rate': 0.05,\n 'decay_steps': num_batches_per_epoch,\n 'decay_rate': 0.95,\n 'staircase': True}\n\n validation_params = {'valid0': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'num_steps': 10,\n 'agg_func': utils.mean_dict}}\n optimizer_params = {'func': optimizer.ClipOptimizer,\n 'optimizer_class': tf.train.MomentumOptimizer,\n 'clip': True,\n 'optimizer_kwargs':{'momentum': 0.9}}\n\n load_params = {'do_restore': True}\n\n model_params = [model1_params, model2_params]\n train_params = [train1_params, train2_params]\n num_models = len(model_params)\n\n params['skip_check'] = True\n params['load_params'] = load_params\n params['loss_params'] = loss_params\n params['save_params'] = save_params\n params['model_params'] = model_params\n params['train_params'] = train_params\n params['optimizer_params'] = optimizer_params\n params['validation_params'] = validation_params\n params['learning_rate_params'] = learning_rate_params\n\n # actually run the training\n base.train_from_params(**params)", "def with_cpu(ops, model):\n ...", "def input_fn(self, params):\n with tf.variable_scope('data_provider'):\n if self.mode == enums.ModelMode.INFERENCE:\n images = tf.placeholder(tf.float32, [\n None, self.preprocessor.preprocessing_options.image_size,\n self.preprocessor.preprocessing_options.image_size, 3\n ])\n return tf_estimator.export.TensorServingInputReceiver(\n features=images, receiver_tensors=images)\n\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # tf.contrib.tpu.RunConfig for details.\n batch_size = params['batch_size']\n\n if 'context' in params:\n current_host = params['context'].current_input_fn_deployment()[1]\n num_hosts = params['context'].num_hosts\n num_cores = params['context'].num_replicas\n else:\n current_host = 0\n num_hosts = 1\n num_cores = 1\n\n dataset = self.make_source_dataset(current_host, num_hosts)\n\n if (self.mode == enums.ModelMode.TRAIN and self.max_samples and\n self.max_samples > 0):\n dataset = dataset.take(self.max_samples)\n\n dataset = dataset.map(self.dataset_parser, num_parallel_calls=num_cores)\n if self.label_noise_prob > 0. and self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.map(\n self._label_noise_fn, num_parallel_calls=num_cores)\n\n if self.cache:\n dataset = dataset.cache()\n if self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.shuffle(self.shuffle_buffer).repeat()\n\n # Use the fused map-and-batch operation.\n #\n # For XLA, we must used fixed shapes. Because we repeat the source\n # training dataset indefinitely, we can use `drop_remainder=True` to get\n # fixed-size batches without dropping any training examples.\n #\n # When evaluating, `drop_remainder=True` prevents accidentally evaluating\n # the same image twice by dropping the final batch if it is less than a\n # full batch size. As long as this validation is done with consistent\n # batch size, exactly the same images will be used.\n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n self._preprocess_image,\n batch_size=batch_size,\n num_parallel_batches=num_cores,\n drop_remainder=True))\n\n # Assign static batch size dimension\n dataset = dataset.map(\n functools.partial(self._set_static_batch_dim, batch_size))\n\n # Prefetch overlaps in-feed with training\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset", "def _invoke_input_fn_and_record_structure(self):\n enqueue_ops = []\n infeed_queues = []\n all_hooks = []\n num_hosts = self._ctx.num_hosts\n tpu_host_placement_fn = self._ctx.tpu_host_placement_function\n\n run_infeed_loop_on_coordinator = True\n\n if self._sharded_per_core:\n # Per-Core input pipeline deployment.\n # Invoke input pipeline for each core and placed on the corresponding\n # host.\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n enqueue_ops_fn, captured_infeed_queue = (\n generate_per_core_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn, self._inputs_structure_recorder))\n\n if _WRAP_INPUT_FN_INTO_WHILE_LOOP:\n run_infeed_loop_on_coordinator = False\n enqueue_ops.append(\n _wrap_computation_in_while_loop(\n device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n # Infeed_queue_getter must be called after enqueue_ops_fn is called.\n infeed_queues.append(captured_infeed_queue.get())\n\n else:\n for host_id in range(num_hosts):\n host_device = tpu_host_placement_fn(host_id=host_id)\n with ops.device(host_device):\n with ops.name_scope('input_pipeline_task%d' % (host_id)):\n if self._ctx.is_input_per_host_with_iterators():\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_v2_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, host_device, host_id))\n else:\n enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (\n generate_per_host_enqueue_ops_fn_for_host(\n self._ctx, self._input_fn,\n self._inputs_structure_recorder, self._batch_axis,\n host_device, host_id))\n all_hooks.extend(hooks)\n\n # NOTE(xiejw): We dispatch here based on the return type of the\n # users `input_fn`.\n #\n # 1. If input_fn returns a Dataset instance, we initialize the\n # iterator outside of tf.while_loop, and call the iterator.get_next\n # inside tf.while_loop. This should be always safe.\n #\n # 2. If input_fn returns (features, labels), it is too late to wrap\n # them inside tf.while_loop, as resource initialization cannot be\n # handled in TF control flow properly. In this case, we will use\n # python loop to enqueue the data into TPU system. This may be\n # slow compared to the previous case.\n if is_dataset:\n run_infeed_loop_on_coordinator = False\n wrap_fn = (\n _wrap_computation_in_while_loop\n if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else\n _wrap_computation_in_while_loop_with_stopping_signals)\n enqueue_ops.append(\n wrap_fn(device=host_device, op_fn=enqueue_ops_fn))\n else:\n enqueue_ops.append(enqueue_ops_fn())\n infeed_queues.append(captured_infeed_queue.get())\n # infeed_queue is used to generate dequeue ops. The only thing it uses for\n # dequeue is dtypes and types. So, any one can be used. Here, grab the\n # first one.\n self._infeed_queue = infeed_queues[0]\n return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator", "def enqueue_ops_fn():\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def __call__(self, params):\n logging.info('Running __call__ function...')\n batch_size = self._train_batch_size\n # For MCTS, the number of features for each trajecotry is unknown beforehand\n num_features = None\n\n if self._global_step_value % self._iterations_per_loop == 0:\n logging.info('Update iterator (gs=%d)...', self._global_step_value)\n # Feature/Labels Placeholders\n self.features_ph = {\n 'mcts_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='mcts_state_ph'),\n 'policy_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='policy_state_ph'),\n }\n self.labels_ph = {\n 'action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='action_ph'),\n 'value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='value_ph'),\n 'return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='return_ph'),\n 'old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='old_neg'),\n 'mean_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='mean_ph'),\n 'logstd_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='logstd_ph'),\n 'mcts_enable_tensor':\n tf.placeholder(\n tf.bool, shape=[num_features], name='mcts_enable_ph'),\n 'policy_action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='policy_action_ph'),\n 'policy_value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_value_ph'),\n 'policy_return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_return_ph'),\n 'policy_old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_old_neg'),\n }\n # Create the dataset\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.features_ph, self.labels_ph))\n dataset = dataset.shuffle(buffer_size=self._max_horizon)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # repeat until the loop is done\n dataset = dataset.repeat()\n if self._use_tpu:\n dataset = dataset.map(functools.partial(self._set_shapes, batch_size))\n dataset = dataset.prefetch(2)\n self._iterator = dataset.make_initializable_iterator()\n return self._iterator.get_next()\n else:\n return self._iterator.get_next()", "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, input_fn is never passed to Estimator. So, by\n # checking the self._is_input_fn_invoked bit, we can know, given the\n # mode == PREDICT, it is the .predict API, not export_savedmodel API.\n if self._is_input_fn_invoked:\n is_export_mode = False\n else:\n is_export_mode = True\n\n # Clear the bit.\n self._is_input_fn_invoked = None\n\n if ctx.is_running_on_cpu(is_export_mode=is_export_mode):\n logging.info('Running %s on CPU', mode)\n return model_fn_wrapper.call_without_tpu(\n features, labels, is_export_mode=is_export_mode)\n\n assert labels is None, '`labels` passed to `model_fn` must be `None`.'\n # TPUEstimator._call_input_fn passes `input_fn` as features to here.\n assert callable(features), '`input_fn` is not callable.'\n input_fn = features\n\n input_holders = _InputPipeline(input_fn, batch_axis, ctx)\n enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (\n input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())\n\n graph = ops.get_default_graph()\n for enqueue_op in enqueue_ops:\n if isinstance(enqueue_op, list):\n graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)\n else:\n graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)\n\n if mode == model_fn_lib.ModeKeys.TRAIN:\n loss, host_call, scaffold = (\n _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))\n host_ops = host_call.create_tpu_hostcall()\n if host_ops is None:\n host_ops = []\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ExamplesPerSecondHook(ctx.global_batch_size,\n output_dir=self.model_dir),\n InstallSignalHandlerHook(),\n training.LoggingTensorHook(\n {\n 'loss': array_ops.identity(loss),\n 'step': training.get_global_step()\n },\n every_n_secs=30)\n ] + input_hooks\n chief_hooks = []\n if (self._config.save_checkpoints_secs or\n self._config.save_checkpoints_steps):\n chief_hooks.append(\n training.CheckpointSaverHook(\n self.model_dir,\n save_secs=self._config.save_checkpoints_secs,\n save_steps=self._config.save_checkpoints_steps,\n steps_per_run=self._config.tpu_config.iterations_per_loop,\n scaffold=scaffold))\n summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)\n with ops.control_dependencies([loss]):\n update_ops = _sync_variables_ops()\n\n # Validate the TPU training graph to catch basic errors\n _validate_tpu_training_graph()\n\n train_op = control_flow_ops.group(*update_ops)\n graph.add_to_collection(_TPU_TRAIN_OP, train_op)\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=loss,\n training_chief_hooks=chief_hooks,\n training_hooks=hooks,\n train_op=train_op,\n scaffold=scaffold)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n total_loss, host_calls, scaffold = _eval_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n mean_loss = math_ops.div(total_loss,\n math_ops.cast(\n iterations_per_loop_var,\n dtype=total_loss.dtype))\n\n # Creates a dummy metric update_op for all metrics. Estimator expects\n # all metrics in eval_metric_ops have update_op and calls them one by\n # one. The real metric update_ops are invoked in a separated thread.\n # So, here give Estimator the dummy op for all metrics.\n with ops.control_dependencies([mean_loss]):\n # After TPU evaluation computation is done (the mean_loss tensor),\n # reads all variables back from TPU and updates the eval step\n # counter properly\n internal_ops_to_run = _sync_variables_ops()\n internal_ops_to_run.append(\n _increase_eval_step_op(iterations_per_loop_var))\n with ops.control_dependencies(internal_ops_to_run):\n dummy_update_op = control_flow_ops.no_op()\n\n host_call_ret = host_calls.create_tpu_hostcall()\n eval_metric_ops = {}\n eval_update_ops = []\n for k, v in host_call_ret['eval_metrics'].items():\n eval_metric_ops[k] = (v[0], dummy_update_op)\n eval_update_ops.append(v[1])\n\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n hooks = [\n TPUInfeedOutfeedSessionHook(\n ctx,\n enqueue_ops,\n eval_update_ops + host_ops,\n run_infeed_loop_on_coordinator=(\n run_infeed_loop_on_coordinator)),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n loss=mean_loss,\n evaluation_hooks=hooks,\n eval_metric_ops=eval_metric_ops,\n scaffold=scaffold)\n\n # Predict\n assert mode == model_fn_lib.ModeKeys.PREDICT\n\n dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(\n ctx, model_fn_wrapper, dequeue_fn)\n with ops.control_dependencies([dummy_predict_op]):\n internal_ops_to_run = _sync_variables_ops()\n with ops.control_dependencies(internal_ops_to_run):\n dummy_predict_op = control_flow_ops.no_op()\n\n # In train and evaluation, the main TPU program is passed to monitored\n # training session to run. Infeed enqueue and outfeed dequeue are\n # executed in side threads. This is not the configuration for\n # prediction mode.\n #\n # For prediction, the Estimator executes the EstimatorSpec.predictions\n # directly and yield the element (via generator) to call site. So, the\n # outfeed based prediction must be passed to MonitoredSession directly.\n # Other parts of the TPU execution are organized as follows.\n #\n # 1. All outfeed based Tensors must be grouped with predictions Tensors\n # to form a single invocation. This avoid the issue we might trigger\n # multiple outfeeds incorrectly. To achieve this, `host_call` is\n # placed in control_dependencies of `stopping_signals`, and\n # `stopping_signals` is passed into _StoppingPredictHook, which sets\n # the `stopping_signals` as SessionRunArgs. MonitoredSession merges\n # all SessionRunArgs with the fetch in session.run together.\n #\n # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)\n # are grouped together. They will be launched once and only once in\n # side threads and they quit naturally according to the SAME stopping\n # condition.\n enqueue_ops.append(dummy_predict_op)\n\n host_call_ret = host_calls.create_tpu_hostcall()\n if 'host_call' not in host_call_ret:\n host_ops = []\n else:\n host_ops = host_call_ret['host_call']\n\n predictions = host_call_ret['predictions']\n _verify_cross_hosts_transfer_size(\n predictions, message=(\n 'The estimated size for TPUEstimatorSpec.predictions is too '\n 'large.'))\n signals = host_call_ret['signals']\n\n with ops.control_dependencies(host_ops):\n host_ops = [] # Empty, we do do not need it anymore.\n scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(\n signals)\n predictions = _PaddingSignals.slice_tensor_or_dict(\n predictions, signals)\n\n hooks = [\n _StoppingPredictHook(scalar_stopping_signal),\n TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,\n host_ops),\n ] + input_hooks\n\n return model_fn_lib.EstimatorSpec(\n mode,\n prediction_hooks=hooks,\n predictions=predictions,\n scaffold=scaffold)", "def run_inference(dataset, model, executor_):\n for batch in dataset:\n results = model.inference(batch)\n for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):\n if stats is not None:\n yield stats\n return", "def init_components(\n self, model_fn=None, criterion_fn=None, optimizer_fn=None, scheduler_fn=None\n ):\n model = model_fn()\n model = self.sync_device(model)\n\n # criterion\n criterion = criterion_fn()\n criterion = self.sync_device(criterion)\n\n # optimizer\n optimizer = optimizer_fn()\n optimizer = self.sync_device(optimizer)\n\n model, optimizer = _wrap_into_data_parallel_with_apex(\n model, optimizer, distributed_params=self.apex_kwargs\n )\n\n # scheduler\n scheduler = scheduler_fn()\n scheduler = self.sync_device(scheduler)\n return model, criterion, optimizer, scheduler", "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def _build_model(self):\n tf.set_random_seed(self.params.tf_random_seed)\n np.random.seed(4321)\n phase_train = not (self.params.eval or self.params.forward_only)\n\n log_fn('Generating model')\n losses = []\n device_grads = []\n all_logits = []\n all_top_1_ops = []\n all_top_5_ops = []\n enqueue_ops = []\n gpu_compute_stage_ops = []\n gpu_grad_stage_ops = []\n\n with tf.device(self.global_step_device):\n global_step = tf.train.get_or_create_global_step()\n \n # Build the processing and model for the worker.\n (image_producer_ops,\n image_producer_stages) = self._build_image_processing(shift_ratio=0)\n image_producer_ops = tf.group(*image_producer_ops)\n update_ops = None\n staging_delta_ops = []\n\n for device_num in range(len(self.devices)):\n with self.variable_mgr.create_outer_variable_scope(\n device_num), tf.name_scope('tower_%i' % device_num) as name_scope:\n results = self.add_forward_pass_and_gradients(\n phase_train, device_num, device_num,\n image_producer_stages[device_num], gpu_compute_stage_ops,\n gpu_grad_stage_ops)\n if phase_train:\n losses.append(results['loss'])\n device_grads.append(results['gradvars'])\n \n\n if device_num == 0:\n # Retain the Batch Normalization updates operations only from the\n # first tower. These operations update the moving mean and moving\n # variance variables, which are updated (but not used) during\n # training, and used during evaluation. The moving mean and variance\n # approximate the true mean and variance across all images in the\n # dataset. Therefore, in replicated mode, these moving averages would\n # be almost identical for each tower, and so we only update and save\n # the moving averages for one tower. In parameter server mode, all\n # towers share a copy of the variables so we also only need to update\n # and save the moving averages once.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)\n staging_delta_ops = list(self.variable_mgr.staging_delta_ops)\n \n enqueue_ops.append(tf.group(*gpu_compute_stage_ops))\n\n fetches = self._build_fetches(global_step, all_logits, losses, device_grads,\n enqueue_ops, update_ops, all_top_1_ops,\n all_top_5_ops, phase_train)\n return (image_producer_ops, enqueue_ops, fetches)", "def sampling(X_train, y_train, X_test, y_test, sampling_instances, model_instances, func):\n\n metrics = []\n # go through all sampling methods\n for sampling_instance in sampling_instances:\n if sampling_instance is not None:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = sampling_instance.fit_resample(X=X_train, y=y_train)\n else:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = X_train, y_train\n\n # Go through all models\n for model_instance in model_instances:\n print('fitting model ' + str(model_instances.index(model_instance) + 1) + ' on ' +\n str(len(model_instances)), \" : \", type(model_instance).__name__)\n model_instance.fit(X_train1, y_train1)\n metrics.append(func(y_test, model_instance.predict(X_test)))\n\n models = [type(model).__name__ for model in model_instances]\n methods = [type(sampling).__name__ for sampling in sampling_instances]\n index = [model + '_' + method for model in models for method in methods]\n\n #Dry run of compute metrics with return_index=True to get indexes\n columns = func(y_test, y_test, average='weighted', return_index=True)\n metrics = pd.DataFrame(metrics, columns=columns, index=index)\n\n return metrics", "def translation_rule_cpu(func):\n # functions to call before running the translation rule\n setup_funcs = (\n functools.partial(ensure_platform_flush, \"cpu\"),\n ensure_omnistaging,\n )\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n for f in setup_funcs:\n f()\n return func(*args, **kwargs)\n\n return wrapped", "def apply(self, fn: Callable[[nn.Module], None]) -> \"FullyShardedDataParallel\":\n uninitialized = self._is_root is None\n self._assert_state(TrainingState_.IDLE)\n with self._summon_full_params(recurse=False, writeback=True):\n ret = super().apply(fn)\n\n # Reset lazy init that might be called by _summon_full_params, since\n # it could have set is_root incorrectly for non-root FSDP instances.\n if uninitialized and self._is_root:\n for module in self.fsdp_modules(self):\n module._reset_lazy_init()\n\n return ret", "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_host_sharded_inputs = []\n for core_ordinal in range(num_cores_per_host):\n with ops.name_scope('ordinal_%d' % (core_ordinal)):\n inputs = _Inputs.from_input_fn(input_fn())\n if inputs.is_dataset:\n raise TypeError(\n '`input_fn` returning `Dataset` is not yet supported in '\n 'per-Core input pipeline deployment yet. Please set '\n 'TPUConfig.per_host_input_for_training to True or return '\n '`features` and `labels` from `input_fn`')\n features, labels = inputs.features_and_labels()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops\n\n return enqueue_ops_fn, captured_infeed_queue" ]
[ "0.6398013", "0.61430126", "0.59155613", "0.5768424", "0.57171917", "0.56894976", "0.5591647", "0.55525845", "0.55103254", "0.54879135", "0.5486981", "0.5467224", "0.5443748", "0.5441698", "0.5436969", "0.5393069", "0.5367924", "0.53674203", "0.53497", "0.531816", "0.52897155", "0.528445", "0.5280554", "0.5256689", "0.52108693", "0.5159293", "0.51478505", "0.5118373", "0.51061493", "0.51059365" ]
0.6660681
0
Wraps the ops generated by `op_fn` in tf.while_loop.
def _wrap_computation_in_while_loop(device, op_fn): def computation(i): with ops.control_dependencies(op_fn()): return i + 1 iterations_per_loop_var = _create_or_get_iterations_per_loop() # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): iterations = array_ops.identity(iterations_per_loop_var) return control_flow_ops.while_loop( lambda i: i < iterations, computation, [constant_op.constant(0)], parallel_iterations=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def while_loop_op(op):\n return (control_flow_util.IsLoopSwitch(op) or\n control_flow_util.IsLoopMerge(op) or\n control_flow_util.IsLoopEnter(op) or\n control_flow_util.IsLoopExit(op) or\n TensorTracer.loop_cond_op(op) or\n op.type in ('RefNextIteration', 'NextIteration'))", "def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):\n\n def cond(scalar_stopping_signal):\n return math_ops.logical_not(\n _StopSignals.should_stop(scalar_stopping_signal))\n\n def computation(unused_scalar_stopping_signal):\n return_value = op_fn()\n execute_ops = return_value['ops']\n signals = return_value['signals']\n with ops.control_dependencies(execute_ops):\n return _StopSignals.as_scalar_stopping_signal(signals)\n\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with ops.device(device):\n return control_flow_ops.while_loop(\n cond,\n computation, [_StopSignals.NON_STOPPING_SIGNAL],\n parallel_iterations=1)", "def _in_while_loop(control_flow_node_map, op_name):\n return op_name in control_flow_node_map and \"LoopCond\" in control_flow_node_map[op_name]", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def InfeedTFFunc(self):\n self.task.input.DeviceLoopSetupEager()\n\n def InfeedBody(i):\n self.task.input.CreateTpuEnqueueOps()\n return i + 1\n\n tf.while_loop(\n cond=lambda i: i < self._steps_per_loop,\n body=InfeedBody,\n loop_vars=[tf.constant(0)])", "def InfeedTFFunc(self):\n self._task.input.DeviceLoopSetupEager()\n\n def InfeedBody(i):\n self._task.input.CreateTpuEnqueueOps()\n return i + 1\n\n tf.while_loop(\n cond=lambda i: i < self._steps_per_loop,\n body=InfeedBody,\n loop_vars=[tf.constant(0)])", "def receive_fn(fn: Callable):\n\n global __enveloop_number_of_loops__\n\n __enveloop_number_of_loops__[fn.__name__] = number_of_loops\n\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n \"\"\"Function that does the actual wrapping.\n :param args:\n :param kwargs:\n :return: function response\n \"\"\"\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)\n\n return wrapper_fn", "def infinite_loop(func):\n @wraps(func) # Preserve target's metadata\n def wrapper(*args, **kwargs):\n while True:\n try:\n func(*args, **kwargs)\n except KeyboardInterrupt:\n break\n return wrapper", "def testWhileLoopProblem(self):\n def while_loop_problem():\n x = tf.get_variable(\"x\", shape=[], initializer=tf.ones_initializer())\n\n # Strange way of squaring the variable.\n _, x_squared = tf.while_loop(\n cond=lambda t, _: t < 1,\n body=lambda t, x: (t + 1, x * x),\n loop_vars=(0, x),\n name=\"loop\")\n return x_squared\n\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options={\"layers\": ()}))\n minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n train(sess, minimize_ops, 1, 2)", "def true_fn(thunk):\n\n def result():\n with tf.control_dependencies([thunk()]):\n return tf.no_op()\n\n return result", "def add_while(self, input_name, body_function, cond_function, name=None):\n return self._build_op(\n 'while', [input_name],\n name=name,\n attr={\n 'body_function': body_function,\n 'cond_function': cond_function\n })", "def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_map):\n node_name_prefix = node.name.rsplit(\"/\", 1)[0]\n plname = find_parent_loop_name(node.name, self._while_loop_name_set)\n if node.op == \"Merge\":\n if _in_while_loop(self._control_flow_node_map, node_name_prefix):\n op = self._licm_construct(plname, node.input[0])\n if node_name_prefix not in self._loops:\n self._loops[node_name_prefix] = Loop(self._mod, plname, self._lvar2expr)\n else:\n if node_name_prefix not in self._branches:\n switch_prefix = node_name_prefix + \"/Switch\"\n merge_idx = self._sorted_cf_node_names.index(node.name)\n for i in range(merge_idx - 1, -1, -1):\n cf_name = self._sorted_cf_node_names[i]\n if cf_name.startswith(switch_prefix):\n self._backtrack_construct(cf_name)\n break\n\n branch = self._branches[node_name_prefix]\n false_br = self._licm_construct(plname, node.input[0])\n true_br = self._licm_construct(plname, node.input[1])\n branch.true_branch = true_br\n branch.false_branch = false_br\n op = branch.if_node()\n if node_name_prefix not in self._while_loop_name_set:\n try:\n cond_val = np.all(\n _infer_value(branch.cond, self._params, self._mod).numpy()\n )\n if cond_val:\n op = branch.true_branch\n else:\n op = branch.false_branch\n except Exception:\n op = branch.if_node()\n elif node.op == \"Exit\":\n loop = self._loops[node_name_prefix]\n\n # Check whether the order of loop variables aligns\n # with loop body. If not, create new loop variable list\n # with correct order.\n if not loop.aligned:\n loop_vars = []\n for i in self._loop_body_order[node_name_prefix]:\n for j, k in enumerate(self._loop_var_order[node_name_prefix]):\n if k == i:\n loop_vars.append(loop.loop_vars[j])\n loop.loop_vars = loop_vars\n loop.aligned = True\n exit_name = node.name.split(\"/\")[-1]\n if \"_\" in exit_name:\n exit_number = int(exit_name[5:])\n else:\n exit_number = 0\n expr = loop.while_loop()\n body_pos = exit_number\n for i, j in enumerate(self._loop_body_order[node_name_prefix]):\n if exit_number == j:\n body_pos = i\n break\n op = _expr.TupleGetItem(expr, body_pos)\n elif node.op == \"Enter\":\n op = self._licm_construct(plname, node.input[0])\n elif node.op == \"LoopCond\":\n op = self._licm_construct(plname, node.input[0])\n self._loops[node_name_prefix].cond = op\n elif node.op == \"Switch\":\n op = self._licm_construct(plname, node.input[0])\n cond = self._licm_construct(plname, node.input[1])\n if _in_while_loop(self._control_flow_node_map, node_name_prefix):\n if node_name_prefix not in self._loop_var_order:\n self._loop_var_order[node_name_prefix] = []\n if node.name.endswith(\"Switch\"):\n self._loop_var_order[node_name_prefix].append(0)\n else:\n self._loop_var_order[node_name_prefix].append(\n int(node.name.split(\"Switch_\")[-1])\n )\n self._loops[node_name_prefix].loop_vars.append(op)\n else:\n if node_name_prefix not in self._branches:\n self._branches[node_name_prefix] = Branch()\n self._branches[node_name_prefix].cond = cond\n elif node.op == \"NextIteration\":\n if node_name_prefix not in self._loop_body_order:\n self._loop_body_order[node_name_prefix] = []\n if node.name.endswith(\"NextIteration\"):\n self._loop_body_order[node_name_prefix].append(0)\n else:\n self._loop_body_order[node_name_prefix].append(\n int(node.name.split(\"NextIteration_\")[-1])\n )\n op = self._licm_construct(plname, node.input[0])\n self._loops[node_name_prefix].body.append(op)\n else:\n raise Exception(f\"Cannot identify control flow operator: {node.op}\")\n\n return op", "def while_do(condition: Callable[[Any], bool], source: ObservableBase) -> ObservableBase:\n from ..operators.observable.whiledo import while_do\n return while_do(condition, source)", "def get_gen_loop_fun(param_dict, fun_dict):\n def f(x_pl, hid_pl, count, f_state, eps_z, eps_x):\n return gen_loop(x_pl, hid_pl, count, f_state, eps_z, eps_x, param_dict, fun_dict)\n return f", "def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret", "def loop_fn(time, cell_output, cell_state, loop_state):\n if cell_state is None: # time == 0\n return loop_fn_initial(\n time, cell_output, cell_state, loop_state)\n\n emit_output = cell_output # == None for time == 0\n\n # couple external context with cell states (c, h)\n next_cell_state = []\n # condition only at the first temporal step\n context = context_proj if cell_output is None else 0.0\n for layer in range(num_layers):\n next_cell_state.append(tf.contrib.rnn.LSTMStateTuple(\n c=cell_state[layer].c + context,\n h=cell_state[layer].h + context))\n\n next_cell_state = tuple(next_cell_state)\n\n elements_finished = (time >= dec_inputs_length_)\n finished = tf.reduce_all(elements_finished)\n\n # TODO: the current code computes an intermediate generated\n # sequence for conditioning purposes: is it what we want?\n out = decode_state(cell_output, V, b_o)\n out.set_shape([None, vocab_size])\n next_input = tf.cond(finished, lambda: pad, lambda: out)\n\n next_loop_state = None\n\n return (elements_finished,\n next_input,\n next_cell_state,\n emit_output,\n next_loop_state)", "def smart_cond(predicate, true_fn, false_fn, name=None):\n if (tf.distribute.has_strategy() and tf.distribute.get_replica_context()):\n strategy = tf.distribute.get_strategy()\n else:\n strategy = None\n if not isinstance(strategy, tf.distribute.MirroredStrategy):\n return tf.cond(predicate, true_fn, false_fn, name)\n else:\n # Conditionals with functions which execute synchronization calls are not\n # well supported with Distribution Strategy. Instead follow the scheme\n # suggested in https://github.com/tensorflow/tensorflow/issues/27716:\n # 1. Execute the conditional in a cross-replica context.\n # 2. The conditional functions then return to a replica-context before\n # executing the original conditional functions.\n def true_fn_per_replica():\n # call_for_each_replica requires a tensor to be returned. This is not true\n # for all functions (which, e.g., might return an op or tf.group) so\n # instead execute the ops as control dependency and return a constant\n # tensor.\n with tf.control_dependencies([true_fn()]):\n return tf.constant(0.0)\n def true_fn_cross_replica():\n strategy = tf.distribute.get_strategy()\n return strategy.extended.call_for_each_replica(true_fn_per_replica)\n def false_fn_per_replica():\n with tf.control_dependencies([false_fn()]):\n return tf.constant(0.0)\n def false_fn_cross_replica():\n strategy = tf.distribute.get_strategy()\n return strategy.extended.call_for_each_replica(false_fn_per_replica)\n def cond(distribution):\n del distribution\n return tf.cond(predicate, true_fn_cross_replica, false_fn_cross_replica, name)\n return tf.distribute.get_replica_context().merge_call(cond)", "def get_train_loop_fun(param_dict, fun_dict):\n def train_loop_fun(x_pl, hid_pl, err_acc, count, f_state, eps_z, debug_tensors):\n return train_loop(x_pl, hid_pl, err_acc, count, f_state, eps_z, param_dict, fun_dict, debug_tensors)\n return train_loop_fun", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def randomly_apply_operation(operation, img, gt_boxes):\n return tf.cond(\n get_random_bool(),\n lambda: operation(img, gt_boxes),\n lambda: (img, gt_boxes)\n )", "def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):\n if TensorTracer.while_loop_op(op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))\n return True\n if TensorTracer.control_flow_op(op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_CONTROLFLOW_OP))\n return True\n if TensorTracer.unsafe_op(op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))\n return True\n if TensorTracer.device_mismatch(self._tt_config.device_type, op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))\n return True\n if op not in ops_in_exec_path:\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))\n return True\n # TensorTracer will not trace the operations that are in an inner while loop\n # or tf.cond when a temporary cache is used. Temporary cache adds direct\n # data dependencies to traced operations, and needs a static number of\n # traced operations. For these cases,\n # - We do not know the number of slots required when there are inner while\n # loops. TensorTracer can only trace the result of a while loop.\n # - We do not know ahead of time which branch of the tf.cond\n # will be taken, so we avoid introducing data dependencies for the\n # operations inside a tf.cond.\n # - We also cannot have a data dependency to an operation in a different\n # while context.\n if self._is_in_control_flow(op) or not self._is_in_outmost_while_loop(op):\n if not self._should_trace_in_control_flow():\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_IN_CONTROL_FLOW))\n return True\n if self._is_user_included_op(op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))\n if tensor_tracer_flags.TT_CHECK_FILTER.value:\n logging.info('USER_INCLUDED op %s', op.name)\n return False\n\n if not self._inside_op_range(op_id):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))\n return True\n if not self._is_interesting_op(op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))\n return True\n if self._is_user_excluded_op(op):\n report_handler.instrument_op(\n op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))\n if tensor_tracer_flags.TT_CHECK_FILTER.value:\n logging.info('USER_EXCLUDED op %s', op.name)\n return True\n return False", "def group(*ops):\n with tf.control_dependencies(ops):\n return tf.constant(0)", "def wrapper_fn(*args, **kwargs):\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)", "def _get_op_control_flow_context(self, op):\n # pylint: disable=protected-access\n op_control_flow_context = op._control_flow_context\n # pylint: enable=protected-access\n if control_flow_util.IsLoopExit(op):\n op_control_flow_context = op_control_flow_context.outer_context\n return op_control_flow_context", "def time_conv_reshape(arr,window,stride):\n \n bat, steps, feat = arr.get_shape().as_list()\n r = tf.floormod((steps - window), stride)\n n = math.ceil((steps - window)/stride)\n \n def padder(n=n,r=r,feat=feat,steps=steps,bat=bat,arr=arr):\n \"\"\"Pad function.\"\"\"\n pad = tf.zeros([bat, stride - r, feat],tf.float32)\n return tf.concat([arr, pad], 1) \n \n arr = tf.cond(tf.equal(r,0), lambda: arr, padder)\n steps = tf.cond(tf.equal(r,0), lambda: steps, lambda: steps + stride -r)\n last_step = steps - window + 1 \n \n def c(i,a,b):\n \"\"\"Condition tf.while_loop\"\"\"\n return tf.less(i,window)\n \n def b(i,new_arr,arr):\n \"\"\"Body tf.while_loop. Appends ith value of windows to new_arr.\"\"\"\n new_arr = tf.concat([new_arr,arr[:, i:last_step + i:stride, :]], axis=2)\n return i+1,new_arr,arr\n \n i = tf.constant(1)\n new_arr = arr[:, 0: last_step: stride, :]\n new_arr.set_shape([bat,n+1,None])\n _,new_arr,_=tf.while_loop(c,\n b,\n loop_vars=[i,new_arr,arr],\n shape_invariants=[i.get_shape(),\n tf.TensorShape([bat,n+1,None]),\n arr.get_shape(),\n ],\n )\n new_arr.set_shape([bat,n+1,feat*window])\n return new_arr", "def power_iteration(A, max_steps=10):\n\n def cond(r, i):\n return i < max_steps\n\n def body(r, i):\n i += 1\n r = tf.nn.l2_normalize(tf.reshape(tf.matmul(A, tf.expand_dims(r, -1)), [-1, N]), axis=-1)\n return [r, i]\n\n batch_size = tf.shape(A)[0]\n N = tf.shape(A)[1]\n\n # r0 = tf.random.uniform(shape=[batch_size, N], dtype=tf.float32)\n # TODO: allow negative values here?\n r0 = tf.random.uniform(shape=[batch_size, N], minval=-1, maxval=1, dtype=tf.float32)\n r0 = tf.nn.l2_normalize(r0, axis=-1)\n i0 = tf.constant(0, dtype=tf.int32)\n\n r_final, a = tf.while_loop(cond, body, loop_vars=[r0, i0], back_prop=True, parallel_iterations=10)\n\n return r_final", "def wait_for(predicate_func, **kwargs):\n if len(kwargs) == 0:\n while not predicate_func():\n pass\n else:\n while not predicate_func(**kwargs):\n pass", "def loop(cls, f, **kwargs):\n def _loop(*args, **kwargs):\n while True:\n try:\n f(*args, **kwargs)\n except TaskletExit:\n break\n except:\n logging.exception(\"unhandled exception in Tasklet.loop\")\n cls.sleep(1.0) #prevent hogging the cpu if exception repeats\n\n return cls.new(_loop, **kwargs)", "def gen_loop(x_pl, hid_pl, count, f_state, eps_z, eps_x, pd, fun_dict):\n eps_z_t = tf.squeeze(tf.slice(eps_z, [tf.to_int32(count), 0, 0], [1, -1, -1]), axis=[0])\n eps_x_t = tf.squeeze(tf.slice(eps_x, [tf.to_int32(count), 0, 0], [1, -1, -1]), axis=[0])\n\n x_t, f_out, f_state = generation(hid_pl, f_state, eps_z_t, eps_x_t, pd, fun_dict)\n\n x_old = tf.slice(x_pl, [0, 0, 0], [tf.to_int32(count), -1, -1])\n x_empty = tf.slice(x_pl, [tf.to_int32(count) + 1, 0, 0], [-1, -1, -1])\n x_t = tf.reshape(x_t, [1, pd['batch_size'], pd['in_dim']])\n x_pl = tf.concat([x_old, x_t, x_empty], axis=0)\n x_pl.set_shape([pd['seq_length'], pd['batch_size'], pd['in_dim']])\n\n count += 1\n return x_pl, f_out, count, f_state, eps_z, eps_x", "def _define_loop(graph, logdir, train_steps, eval_steps, batch_env):\n\n default_external_action = np.zeros(batch_env.action_info[1])\n loop = tools.Loop(\n logdir, graph.step, graph.should_log, graph.do_report,\n graph.force_reset)\n loop.add_phase(\n 'train', graph.done, graph.score, graph.summary, train_steps,\n report_every=None,\n log_every=train_steps // 2,\n checkpoint_every=None,\n feed={\n graph.is_training: True, graph.should_step: True, graph.use_external_action: False,\n graph.external_action: default_external_action})\n loop.add_phase(\n 'eval', graph.done, graph.score, graph.summary, eval_steps,\n report_every=eval_steps,\n log_every=eval_steps // 2,\n checkpoint_every=10 * eval_steps,\n feed={graph.is_training: False, graph.should_step: True, graph.use_external_action: False,\n graph.external_action: default_external_action})\n return loop" ]
[ "0.69818914", "0.68200386", "0.6126418", "0.6078447", "0.57636625", "0.5754965", "0.57449204", "0.57445085", "0.57208353", "0.57029027", "0.56025404", "0.54687613", "0.54140025", "0.53947186", "0.52648926", "0.5232417", "0.52252024", "0.520506", "0.5199421", "0.5190266", "0.5179423", "0.51715124", "0.5146725", "0.5144517", "0.5111674", "0.50638556", "0.5041175", "0.50376606", "0.50271463", "0.5021398" ]
0.7957619
0
Retrieves the Scaffold from `captured_scaffold_fn`.
def _get_scaffold(captured_scaffold_fn): with _CapturingContext(message='Inside scaffold_fn'): scaffold_fn = captured_scaffold_fn.get() if scaffold_fn: scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') else: scaffold = None if scaffold: wrapped_finalize = scaffold.finalize def _finalize(): with _CapturingContext('Inside Scaffold.finalize'): wrapped_finalize() scaffold.finalize = _finalize return scaffold
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scaffold(self, mol: Chem.rdchem.Mol) -> str:\n return MurckoScaffold.MurckoScaffoldSmiles(mol=mol, includeChirality=self.include_chirality)", "def generate_scaffold(smiles, include_chirality=False):\n mol = Chem.MolFromSmiles(smiles)\n engine = ScaffoldGenerator(include_chirality=include_chirality)\n scaffold = engine.get_scaffold(mol)\n return scaffold", "def get_scaffold_from_match(match):\n match_split = match.split('_')\n right_slice_index = match_split.index(ligand_resname)\n return '_'.join(match_split[4:right_slice_index])", "def generate_scaffold(smiles: str, include_chirality: bool = False) -> str:\n if type(smiles) == str:\n mol = Chem.MolFromSmiles(smiles)\n else:\n mol = smiles\n engine = ScaffoldGenerator(include_chirality=include_chirality)\n scaffold = engine.get_scaffold(mol)\n\n return scaffold", "def fill_scaffolds_table(self,fn_scaffolds, overwrite=True):\n scaffold_record_pattern = re.compile(\"(.+?)\\s+(.+?)\\s+(.+)\")\n tables_names = self.get_tables_names()\n log.info(\"Creating and filling table of scaffolds ...\")\n if overwrite and self.ScaffoldsTable in tables_names:\n self.drop_table(self.ScaffoldsTable)\n if not self.ScaffoldsTable in tables_names:\n self.create_table(self.ScaffoldsTable ,\n self.ScaffoldsFields, self.ScaffoldsTypes)\n\n parser = SeqIO.parse(fn_scaffolds, \"fasta\")\n data = []\n n_stored = 0\n batch_size = 1000\n for seq_record in parser:\n description = seq_record.description\n m = re.match(scaffold_record_pattern,description)\n if not m:\n raise ValueError(\"Problem reading description %s\", description)\n scaffold_id = m.group(1)\n scaffold= m.group(2)\n\n s = seq_record.seq\n length = len(s)\n GC = 1.* (s.count(\"G\") + s.count(\"C\")) / length\n table_record = [scaffold_id,scaffold, str(seq_record.seq), length, GC]\n data.append(table_record)\n # store batch of data\n if len(data) > batch_size:\n self.store_data(self.ScaffoldsTable, data)\n n_stored += batch_size\n log.info(\"Stored %20d sequences\\r\", n_stored)\n data = [] # empty data to avoid using a lot of memory\n # store last batch\n if len(data) > 0:\n n_stored += len(data)\n self.store_data(self.ScaffoldsTable, data)\n log.info(\"Stored %20d sequences\\r\", n_stored)", "def _define_scaffold(mode, config, params, summaries_data=None):\r\n # Comment: init_op with init_feed_dict, and init_fn are executed from SessionManager\r\n # only if model is not loaded successfully from checkpoint using the saver.\r\n # if no saver is provided then the default saver is constructed to load all\r\n # variables (from collections GLOBAL_VARIABLES and SAVEABLE_OBJECTS) and init_op won't\r\n # be executed.\r\n # For that reason, during training using init_checkpoint we provide a custom saver only\r\n # for model variables and an init_op to initialize all variables not in init_checkpoint.\r\n\r\n # create scopes outside of scaffold namescope\r\n # with tf.name_scope('init') as init_scope:\r\n # pass\r\n with tf.name_scope('saver') as saver_scope:\r\n pass\r\n\r\n with tf.name_scope('scaffold'):\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n _define_summaries(mode, config, params, summaries_data)\r\n saver = train_saver(config, params, scope=saver_scope)\r\n # Initialization is handled by replace_initializers\r\n # init_op, init_feed_dict = train_init(config, params, scope=init_scope)\r\n # init_op, init_feed_dict = [None]*2\r\n elif mode == tf.estimator.ModeKeys.EVAL:\r\n saver = evaluate_saver(config, params, scope=saver_scope)\r\n # init_op, init_feed_dict = [None]*2\r\n elif mode == tf.estimator.ModeKeys.PREDICT:\r\n saver = predict_saver(config, params, scope=saver_scope)\r\n # init_op, init_feed_dict = [None]*2\r\n\r\n # WARNING: default ready_op and ready_for_local_init_op install operations\r\n # in the graph to report_uninitialized_variables, resulting in too many ops,\r\n # so make ready_for_local_init_op a no_op to reduce them.\r\n scaffold = tf.train.Scaffold(\r\n # init_op=init_op,\r\n # init_feed_dict=init_feed_dict,\r\n # ready op only for distributed debugging\r\n # ready_op=tf.no_op(),\r\n saver=saver)\r\n\r\n return scaffold", "def create_scaffold(name, fields=''):\n create_model(name, fields)\n create_view(name, fields)\n create_routes(name)", "def getScreen(self):\n \n return self.screen", "def master_screen(self):\n return self.screen_manager.master_screen", "def generate_scaffolds(self, dataset, log_every_n=1000):\n scaffolds = {}\n data_len = len(dataset)\n\n log(\"About to generate scaffolds\", self.verbose)\n for ind, smiles in enumerate(dataset.ids):\n if ind % log_every_n == 0:\n log(f\"Generating scaffold {ind} {data_len}\", self.verbose)\n scaffold = generate_scaffold(smiles)\n if scaffold not in scaffolds:\n scaffolds[scaffold] = [ind]\n else:\n scaffolds[scaffold].append(ind)\n\n # Sort from largest to smallest scaffold sets\n scaffolds = {key: sorted(value) for key, value in scaffolds.items()}\n scaffold_sets = [\n scaffold_set\n for (scaffold, scaffold_set) in sorted(scaffolds.items(),\n key=lambda x: (len(x[1]), x[1][0]),\n reverse=True)\n ]\n return scaffold_sets", "def make_seq(scaffold, o_dict):\n scaff_name = scaffold[0]\n sequence = []\n \n nice_scaff = \"contigs__\"\n \n scaff_string = str(scaffold)\n while scaffold:\n \n if len(scaffold) == 1:\n #This should never happen!\n paf(\"\\nWARNING: odd number of elements in scaffold!\")\n paf(\"scaffold is: \" + scaff_string)\n nice_scaff += \"WARNING:_odd_number_of_elements_in_scaffold!\"\n sequence.description = scaff_name\n return sequence, nice_scaff\n\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n \n if end1[0:4] != \"five\" and end1[0:5] != \"three\":\n if end2 in repeat_contigs and end2[0:10] == \"threeprime\":\n #Only attach a repeat if connected by fiveprime end,\n # to avoid creating duplicate copies\n ''' this condition has been removed!\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n #threeprime ends of repeats are not attached\n if end2[0:4] != \"five\" and end2[0:5] != \"three\": end2 = other_end(end1)\n '''\n \n if \"dummy\" in end2:\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n\n if end2[0:4] != \"five\" and end2[0:5] != \"three\":\n #This should never happen! \n paf(\"\\nWARNING: scaffold not included in assembly!\")\n paf(\"scaffold is: \" + scaff_string)\n paf(\"end1 is: \" + str(end1))\n paf(\"end2 is: \" + str(end2)+ \"\\n\")\n nice_scaff += \"scaffold.not.included.in.assembly!\" + str(end1) + \".\" + str(end2)\n sequence.description = scaff_name\n return sequence, nice_scaff\n else:\n sequence, nice_scaff = initiate_seq(end2, nice_scaff)\n elif (end2 != \"link_circular\") and (\"dummy\" not in end1):\n sequence, nice_scaff = extend_seq(sequence, end0, end1, o_dict, nice_scaff)\n end0 = end2\n \n sequence.description = scaff_name\n \n return sequence, nice_scaff", "def getscreen(self):\n return self.screen", "def getscreen(self):\n return self.screen", "def examine_document(self, action):\n doc = action[1] # this should have a document ID so we can pull out the correct document text\n screen = DocScreen('doc_title', 'doc_content goes here')\n\n return screen", "def snapshot_controller(self) -> Optional[pulumi.Input['ManagedClusterStorageProfileSnapshotControllerArgs']]:\n return pulumi.get(self, \"snapshot_controller\")", "def test_scaffold_formation(self):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n func = self._get_simple_implicit_function().to(device)\n func.scaffold_max_pool_kernel_size = 1\n\n def new_density(points):\n \"\"\"\n Density function which returns 1 if p>(0.5, 0.5, 0.5) or\n p < (-0.5, -0.5, -0.5) else 0\n \"\"\"\n inshape = points.shape\n points = points.view(-1, 3)\n out = []\n for p in points:\n if torch.all(p > 0.5) or torch.all(p < -0.5):\n out.append(torch.tensor([[1.0]]))\n else:\n out.append(torch.tensor([[0.0]]))\n return torch.cat(out).view(*inshape[:-1], 1).to(device)\n\n func._get_density = new_density\n func._get_scaffold(0)\n\n points = torch.tensor(\n [\n [0, 0, 0],\n [1, 1, 1],\n [1, 0, 0],\n [0.1, 0, 0],\n [10, 1, -1],\n [-0.8, -0.7, -0.9],\n ]\n ).to(device)\n expected = new_density(points).float().to(device)\n assert torch.allclose(func.voxel_grid_scaffold(points), expected), (\n func.voxel_grid_scaffold(points),\n expected,\n )", "def generate_scaffolds(self, dataset, cutoff=0.18):\n mols = []\n for ind, smiles in enumerate(dataset.ids):\n mols.append(Chem.MolFromSmiles(smiles))\n n_mols = len(mols)\n fps = [AllChem.GetMorganFingerprintAsBitVect(x, 2, 1024) for x in mols]\n\n scaffold_sets = ClusterFps(fps, cutoff=cutoff)\n scaffold_sets = sorted(scaffold_sets, key=lambda x: -len(x))\n scaffold_sets = [list(sfd) for sfd in scaffold_sets]\n return scaffold_sets", "def get_scene(self):\n return self.scenes[self.current_scene]", "def get_stage():\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"r\")\n stage = f.readline().strip()\n f.close()\n logger.debug(\"get stage: %s\" % (stage))\n return stage\n except:\n return reset_stage()", "def _get_controller(self):\n return self.__controller", "def get_test_frame(self):\n\n # get function from end of unittest id()\n target = self.id().split('.')[-1]\n\n # traverse frames until function name is found\n for frame in inspect.stack():\n if frame[3] == target:\n return frame\n return None", "def get_frame(stream, is_pre_captured):\n frame = stream.read()\n if is_pre_captured:\n return frame[1]\n else:\n return frame", "def get_screen(self):\n return self._cached('raw', self.ale.getScreen)", "def firecracker(self):\n return self._context.get(\"firecracker\", None)", "def preview_trigger():\n global _PREVIEW_TRIGGER # pylint:disable=global-statement\n if _PREVIEW_TRIGGER is None:\n _PREVIEW_TRIGGER = PreviewTrigger()\n return _PREVIEW_TRIGGER", "def get_scene():\n try:\n return _stack[-1]\n except IndexError:\n return None", "def get_snapshot_setup_script(self):\n return self.shard_setup_script", "def get_toplevel(self):\n raise NotImplementedError", "def GetZScrInfoForIndividualScaffolds(PosScfBC_d):\n Scf_Pos_ZScr_vals = {\"scaffolds\": {}}\n\n for scf_name in PosScfBC_d[\"scaffolds\"].keys():\n scf_info = PosScfBC_d[\"scaffolds\"][scf_name]\n\n pos_and_nIns_l = [[int(x), scf_info[\"positions\"][x][\"nIns\"]] for x in \\\n scf_info[\"positions\"].keys()]\n\n\n mean, SD, max_z, pos_to_Zscr_l = GetSdPointsForValues(pos_and_nIns_l)\n\n Scf_Pos_ZScr_vals[\"scaffolds\"][scf_name] = {\n \"scaffold_length\": scf_info[\"scaffold_length\"],\n \"mean\": mean,\n \"SD\": SD,\n \"max_z\": max_z,\n \"pos_to_Zscr_l\": pos_to_Zscr_l\n }\n Scf_Pos_ZScr_vals[\"analysis_type\"] = \"IndividualScaffoldStats\"\n\n return Scf_Pos_ZScr_vals", "def _get_dashboard_object(self):\n pass" ]
[ "0.5962946", "0.54265267", "0.5422105", "0.5317502", "0.49453947", "0.49192426", "0.47882766", "0.47736388", "0.46875826", "0.45953506", "0.4508714", "0.4507913", "0.4507913", "0.4490961", "0.44098067", "0.4358617", "0.42400393", "0.41495925", "0.41405055", "0.41361576", "0.40939546", "0.40794885", "0.40110835", "0.40092093", "0.39913958", "0.39774626", "0.39623973", "0.3959937", "0.39598063", "0.3956747" ]
0.8463581
0
Returns an `_Inputs` instance according to `input_fn` return value.
def from_input_fn(return_values): if isinstance(return_values, dataset_ops.Dataset): dataset = return_values return _Inputs(dataset=dataset) features, labels = _Inputs._parse_inputs(return_values) return _Inputs(features, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input(inputs):\n return input(inputs)", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def get_function_input(inputs, input_name, optional=False):\n this_input = inputs.get(input_name)\n\n if this_input is None and optional is False:\n err = \"'{0}' is a mandatory function input\".format(input_name)\n raise ValueError(err)\n else:\n return this_input", "def get_function_input(inputs, input_name, optional=False):\n input = inputs.get(input_name)\n\n if input is None and optional is False:\n err = \"'{0}' is a mandatory function input\".format(input_name)\n raise ValueError(err)\n else:\n return input", "def _get_input_iterator(input_fn, strategy):\n\n # When training with TPU pods, datasets needs to be cloned across\n # workers. Since Dataset instance cannot be cloned in eager mode, we instead\n # pass callable that returns a dataset.\n input_data = input_fn()\n if callable(input_data):\n iterator = iter(\n strategy.experimental_distribute_datasets_from_function(input_data))\n else:\n iterator = iter(strategy.experimental_distribute_dataset(input_data))\n return iterator", "def train_input_fn():\n # Initialize `iterator` with training data.\n train_filenames = [os.path.join(FLAGS.out_dir, FLAGS.train_records)]\n return _input_fn(train_filenames, FLAGS.epochs, FLAGS.batch_size, shuffle=True)", "def _call_input_fn(self, input_fn, mode):\n input_fn_args = util.fn_args(input_fn)\n config = self.config # a deep copy.\n kwargs = {}\n if 'params' in input_fn_args:\n kwargs['params'] = self.params # a deep copy.\n else:\n raise ValueError('input_fn ({}) does not include params argument, '\n 'required by TPUEstimator to pass batch size as '\n 'params[\"batch_size\"]'.format(input_fn))\n if 'config' in input_fn_args:\n kwargs['config'] = config\n\n if 'mode' in input_fn_args:\n kwargs['mode'] = mode\n\n # Records the fact input_fn has been invoked.\n self._is_input_fn_invoked = True\n\n with self._ctx.with_mode(mode) as ctx:\n # Setting the batch size in params first. This helps user to have same\n # input_fn for use_tpu=True/False.\n batch_size_for_input_fn = ctx.batch_size_for_input_fn\n if batch_size_for_input_fn is not None:\n if isinstance(kwargs['params'], hparam.HParams):\n kwargs['params'].add_hparam(_BATCH_SIZE_KEY, batch_size_for_input_fn)\n else:\n kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn\n\n # For export_savedmodel, input_fn is never passed to Estimator. So,\n # `is_export_mode` must be False.\n if ctx.is_running_on_cpu(is_export_mode=False):\n with ops.device('/device:CPU:0'):\n return input_fn(**kwargs)\n\n # For TPU computation, input_fn should be invoked in a tf.while_loop for\n # performance. While constructing the tf.while_loop, the structure of\n # inputs returned by the `input_fn` needs to be recorded. The structure\n # includes whether features or labels is dict or single Tensor, dict keys,\n # tensor shapes, and dtypes. The recorded structure is used to create the\n # infeed dequeue ops, which must be wrapped and passed as a Fn, called\n # inside the TPU computation, as the TPU computation is wrapped inside a\n # tf.while_loop also. So, we either pass input_fn to model_fn or pass\n # dequeue_fn to model_fn. Here, `input_fn` is passed directly as\n # `features` in `model_fn` signature.\n def _input_fn():\n return input_fn(**kwargs)\n\n return _input_fn", "def get_input_fn(is_train):\n d = DataInfo(ddir,evalddir)\n hparams = d.generate()\n params = utils.Params(**hparams)\n\n if is_train:\n input_fn = data.get_input_fn(dataset_fn=data.get_train_dataset, mode=TRAIN, params=params, shuffle_queue=10000, repeat=False)\n \n else:\n input_fn = data.get_input_fn(dataset_fn=data.get_eval_dataset, mode=EVAL, params=params, shuffle_queue=10000, repeat=False)\n \n return input_fn, params", "def serving_input_fn(self):\n label_ids = tf.placeholder(tf.int32, [None], name='label_ids')\n input_ids = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_ids')\n input_mask = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='input_mask')\n segment_ids = tf.placeholder(\n tf.int32, [None, self.params[\"MAX_SEQ_LENGTH\"]], name='segment_ids')\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'label_ids': label_ids,\n 'input_ids': input_ids,\n 'input_mask': input_mask,\n 'segment_ids': segment_ids})()\n return input_fn", "def train_input_fn(self) -> types.FeatureAndLabelTensors:\n return self._input_fn_from_file(self._train_path)", "def input_fn(params=None):\n del params\n if is_tpu:\n features = get_input_fn_dataset(pattern, flags, batch_size)()[0]\n else:\n features = get_input_fn_queue(pattern, flags, batch_size)()[0]\n\n if flags.color_data_augment:\n\n def augment_img(image):\n image = tf.image.random_hue(image, .5)\n return image\n\n features[IMAGE_FEATURE_NAME] = tf.map_fn(\n augment_img, features[IMAGE_FEATURE_NAME], parallel_iterations=32)\n\n return features, None", "def build_training_input_fn():\n transformed_metadata = metadata_io.read_metadata(\n os.path.join(\n MODEL_DIR, transform_fn_io.TRANSFORMED_METADATA_DIR))\n transformed_feature_spec = transformed_metadata.schema.as_feature_spec()\n\n def input_fn():\n \"\"\"Input function for training and eval.\"\"\"\n dataset = tf.contrib.data.make_batched_features_dataset(\n file_pattern=os.path.join(TFRECORD_DIR, '*'),\n batch_size=BATCH_SIZE,\n features=transformed_feature_spec,\n reader=tf.data.TFRecordDataset,\n shuffle=True)\n transformed_features = dataset.make_one_shot_iterator().get_next()\n # Extract features and labels from the transformed tensors.\n label_cols = set(['TotalVolume', 'Density', 'Temperature', 'Humidity', 'Energy', 'Problems'])\n transformed_labels = {key: value for (key, value) in transformed_features.items() if key in label_cols}\n transformed_features = {key: value for (key, value) in transformed_features.items() if key not in label_cols}\n return transformed_features, transformed_labels\n\n return input_fn", "def validate_input_fn(self) -> types.FeatureAndLabelTensors:\n return self._input_fn_from_file(self._validate_path)", "def generate_input_fn(mode='TRAIN'):\n mode = mode.upper()\n if mode == 'TRAIN' or mode == 'EVAL':\n return input_fn\n elif mode == 'PREDICT' or mode == 'NOISE':\n return noise_input_fn\n else:\n raise ValueError('Incorrect mode provided')", "def make_input_fn(step_output):\n return tf.nn.embedding_lookup(embeddings, step_output.predictions)", "def input_fn(sources, train, params):\n \n raise NotImplementedError", "def _input_fn(params):\n batch_size = params['batch_size']\n inputs, labels, lengths = sequence_example_lib.get_fake_data_batch(\n batch_size, input_size, padding_length)\n\n features = {\n 'inputs': inputs,\n 'lengths': lengths\n }\n return features, labels", "def input_processing(func, config, input_ids, **kwargs):\n signature = dict(inspect.signature(func).parameters)\n signature.pop(\"kwargs\", None)\n parameter_names = list(signature.keys())\n output = {}\n allowed_types = (tf.Tensor, bool, int, tuple, list, dict)\n\n if \"inputs\" in kwargs[\"kwargs_call\"]:\n logger.warning(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning)\n\n output[\"input_ids\"] = kwargs[\"kwargs_call\"].pop(\"inputs\")\n\n if \"decoder_cached_states\" in kwargs[\"kwargs_call\"]:\n logger.warning(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = kwargs[\"kwargs_call\"].pop(\"decoder_cached_states\")\n\n if len(kwargs[\"kwargs_call\"]) > 0:\n raise ValueError(\n f\"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}.\"\n )\n\n for k, v in kwargs.items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n else:\n raise ValueError(f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n\n if isinstance(input_ids, (tuple, list)):\n for i, input in enumerate(input_ids):\n # EagerTensors don't allow to use the .name property so we check for a real Tensor\n if type(input) == tf.Tensor:\n # Tensor names have always the pattern name:device_id then we check only the\n # name and not the device id\n tensor_name = input.name.split(\":\")[0]\n\n if tensor_name in parameter_names:\n output[tensor_name] = input\n else:\n output[parameter_names[i]] = input\n elif isinstance(input, allowed_types) or input is None:\n output[parameter_names[i]] = input\n else:\n raise ValueError(\n f\"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}.\"\n )\n elif isinstance(input_ids, dict):\n if \"inputs\" in input_ids:\n logger.warning(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.\",\n FutureWarning,\n )\n\n output[\"input_ids\"] = input_ids.pop(\"inputs\")\n\n if \"decoder_cached_states\" in input_ids:\n logger.warning(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = input_ids.pop(\"decoder_cached_states\")\n\n for k, v in dict(input_ids).items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n elif k not in parameter_names and \"args\" not in parameter_names:\n logger.warning(\n f\"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored.\"\n )\n continue\n else:\n raise ValueError(f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n else:\n if isinstance(input_ids, tf.Tensor) or input_ids is None:\n output[parameter_names[0]] = input_ids\n else:\n raise ValueError(\n f\"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}.\"\n )\n\n for name in parameter_names:\n if name not in list(output.keys()) and name != \"args\":\n output[name] = kwargs.pop(name, signature[name].default)\n\n # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)\n # So to respect the proper output we have to add this exception\n if \"args\" in output:\n if output[\"args\"] is not None and type(output[\"args\"]) == tf.Tensor:\n tensor_name = output[\"args\"].name.split(\":\")[0]\n output[tensor_name] = output[\"args\"]\n else:\n # `args` in this case is always the first parameter, then `input_ids`\n output[\"input_ids\"] = output[\"args\"]\n\n del output[\"args\"]\n\n if \"kwargs\" in output:\n del output[\"kwargs\"]\n\n boolean_dict = {\n k: v\n for k, v in output.items()\n if k in [\"return_dict\", \"output_attentions\", \"output_hidden_states\", \"use_cache\"]\n }\n\n output.update(\n booleans_processing(\n config=config,\n **boolean_dict,\n )\n )\n\n return output", "def input_fn_builder(features, seq_length):\n\n all_unique_ids = []\n all_input_ids = []\n all_input_mask = []\n all_input_type_ids = []\n\n for feature in features:\n all_unique_ids.append(feature.unique_id)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_input_type_ids.append(feature.input_type_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(all_input_ids, \n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn", "def get_input_fn(pattern, flags, batch_size, is_tpu):\n\n def input_fn(params=None):\n \"\"\"Calls the appropriate input_fn and augments the data.\"\"\"\n del params\n if is_tpu:\n features = get_input_fn_dataset(pattern, flags, batch_size)()[0]\n else:\n features = get_input_fn_queue(pattern, flags, batch_size)()[0]\n\n if flags.color_data_augment:\n\n def augment_img(image):\n image = tf.image.random_hue(image, .5)\n return image\n\n features[IMAGE_FEATURE_NAME] = tf.map_fn(\n augment_img, features[IMAGE_FEATURE_NAME], parallel_iterations=32)\n\n return features, None\n\n return input_fn", "def get_input(self, input_number: int) -> Input:\n return Input(self.api, input_number)", "def _input_fn(params):\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # `tf.contrib.tpu.RunConfig` for details.\n batch_size = params['batch_size']\n inputs, labels, lengths = sequence_example_lib.get_padded_batch(\n file_paths, batch_size, input_size, padding_length)\n features = {\n 'inputs': inputs,\n 'lengths': lengths,\n }\n return features, labels", "def get_input_fn(options, is_training):\n if not isinstance(options, reader_pb2.VCRReader):\n raise ValueError('options has to be an instance of Reader.')\n\n def _input_fn(input_pipeline_context=None):\n \"\"\"Returns a python dictionary.\n\n Returns:\n A dataset that can be fed to estimator.\n \"\"\"\n return _create_dataset(options, is_training, input_pipeline_context)\n\n return _input_fn", "def input_fn_builder(input_file, seq_length, is_test, is_training, drop_remainder):\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if not is_test:\n name_to_features[\"label\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n batch_size = params[\"train_batch_size\"]\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n else:\n batch_size = params[\"predict_batch_size\"]\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn", "def input_fn_builder(self, features, max_seq_len, batch_size, is_training):\n\n all_input_ids = []\n all_input_mask = []\n all_label_ids = []\n all_label_mask = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_label_ids.append(feature.label_ids)\n all_label_mask.append(feature.label_mask)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = self.batch_size\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size)\n return d\n\n return input_fn", "def input_fn(params):\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"unique_ids\":\n tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),\n \"input_ids\":\n tf.constant(all_input_ids, \n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_type_ids\":\n tf.constant(all_input_type_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n })\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d", "def input_fn_builder(features, seq_length):\n\n all_label_ids = []\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n\n for feature in features:\n all_label_ids.append(feature.label_ids)\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices(\n {\n \"label_ids\": tf.constant(\n all_label_ids, shape=[num_examples], dtype=tf.int32\n ),\n \"input_ids\": tf.constant(\n all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"input_mask\": tf.constant(\n all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n \"segment_ids\": tf.constant(\n all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32\n ),\n }\n )\n\n d = d.batch(batch_size=batch_size, drop_remainder=False)\n return d\n\n return input_fn", "def input_fn_builder(features, seq_length, is_training, drop_remainder):\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n num_examples = len(features)\n\n # This is for demo purposes and does NOT scale to large data sets. We do\n # not use Dataset.from_generator() because that uses tf.py_func which is\n # not TPU compatible. The right way to load data is with TFRecordReader.\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn", "def input_fn(params):\n batch_size = self.batch_size\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n \"label_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, max_seq_len],\n dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size)\n return d", "def input_fn():\n raw_placeholder_spec = RAW_DATA_METADATA.schema.as_batched_placeholders()\n # remove label key that is not going to be available at seving\n raw_placeholder_spec.pop(LABEL_KEY)\n\n # we are defining the feature_column (raw_featutes) and the tensor\n # (receiver_tensors) for the raw data\n raw_input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(\n raw_placeholder_spec)\n raw_features, receiver_tensors , _ = raw_input_fn()\n\n # we are tranforming the raw_features with the graph written by\n # preprocess.py to transform_fn_io.TRANSFORM_FN_DIR and that was used to\n # write the tf records. This helps avoiding training/serving skew\n\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n os.path.join(tft_working_dir, transform_fn_io.TRANSFORM_FN_DIR),\n raw_features))\n\n return tf.estimator.export.ServingInputReceiver(\n transformed_features, receiver_tensors)" ]
[ "0.6401963", "0.63848746", "0.6295835", "0.62745816", "0.62434405", "0.6147081", "0.61179113", "0.61011976", "0.6071407", "0.6055143", "0.5980461", "0.5961409", "0.59082115", "0.5904447", "0.5903279", "0.5902866", "0.5896739", "0.587621", "0.5863113", "0.58621866", "0.5805875", "0.5794452", "0.5765817", "0.5761434", "0.57535225", "0.5729602", "0.57290554", "0.565888", "0.5618884", "0.56140465" ]
0.74578035
0
Returns True if the return value from input_fn is Dataset.
def is_dataset(self): return self._dataset is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_dataset(obj):\n return isinstance(obj, (DictDataset, ImageDataset, LabeledImageDataset,\n TupleDataset, DatasetMixin))", "def is_pyvista_dataset(obj):\n return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))", "def is_dataset(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = [\"data_vars\", \"coords\", \"dims\", \"to_array\"]\n\n return all([hasattr(X, name) for name in require_attrs])", "def check_dataset_type(val, name='The hdf5 dataset', allow_none=False, print_value=True, location=''):\n none_msg = name + ' was not found in the hdf5 file at its location ' + location\n return check_type_value(val, name, h5py._hl.dataset.Dataset,\n allow_none=allow_none, print_value=print_value, none_msg=none_msg)", "def is_valid(self, dataset):\n pass", "def check_type(df: pd.DataFrame, input_output=\"\") -> Tuple[bool, str]:\n\n error_string = (\n \"should be DataFrame: The input should be a Pandas DataFrame\"\n \" representing a matrix, where every cell is one entry of the matrix.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n if not isinstance(df, pd.DataFrame):\n return False, error_string\n else:\n return True, \"\"", "def test_record_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.record\"\n result = is_dataset_file(path)\n self.assertTrue(result)", "def test_pbtxt_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.pbtxt\"\n result = is_dataset_file(path)\n self.assertFalse(result)", "def load_dataset(self) -> bool:\n\n if self.write_path is None:\n raise Exception(\"Error: Attempted to load results dataset without ever specifiying a path to write it to\")\n\n try:\n if self.format == \"arrow\":\n self.res_dataset = Dataset.load_from_disk(self.write_path)\n elif self.format == \"csv\":\n self.res_dataset = pd.read_csv(self.write_path)\n return True\n except:\n return False", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def is_fitted(self):\n return self.__fdata is not None", "def validate_dataset(self):\n pass", "def isDataset(inputData):\n if '#' in inputData.split('/')[-1]:\n return False\n return True", "def isDataset(inputData):\n if '#' in inputData.split('/')[-1]:\n return False\n return True", "def exists_dataset(self, dataset):\n assert dataset, \"Must input a valid dataset name.\"\n return any(self.get_by_dataset(dataset))", "def is_splitable_var(var: Any) -> bool:\n if isinstance(var, DataSample):\n return True\n if isinstance(var, torch.Tensor):\n return True\n if isinstance(var, np.ndarray):\n return True\n if isinstance(var, abc.Sequence) and not isinstance(var, str):\n return True\n return False", "def canStandardize(self, datasetType):\n\n return hasattr(self, 'std_' + datasetType)", "def _is_DataArrays(data):\n if isinstance(data, (Dataset, DataArray)):\n return True\n if isinstance(data, Mapping):\n for da in data.values():\n if not isinstance(da, DataArray):\n raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n if isinstance(data, Iterable):\n for da in data:\n if not isinstance(da, DataArray):\n return False\n # raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n return False", "def validate_dataset(self, path: str = None) -> bool:\n checked = {} # type: Dict[str, Any]\n # Check path exists\n path = path if path is not None else self.path\n if not os.path.exists(path):\n raise ValueError('The path to the dataset does not exists! ({:s})'.format(path))\n # Check readme\n checked['readme'] = os.path.exists(os.path.join(path, 'Readme.md'))\n # Check the different file formats\n file_formats = {\n 'simple-zip': 'single_oscillation_{:04d}.zip',\n 'advanced-gz': 'single_oscillation_0-2_{:04d}.gz',\n 'big2-gz': 'single_oscillation_01-2_{:04d}.gz',\n }\n for file_key, file_format in file_formats.items():\n file_count = 0\n while os.path.exists(os.path.join(path, file_format.format(file_count))):\n file_count += 1\n checked[file_key] = file_count\n # Test for valid dataset\n if checked['readme'] and checked['simple-zip'] == 64:\n self.dataset = 'simple'\n self._file_format = 'single_oscillation_{:04d}.zip'\n self._max_file_count = 64\n return True\n elif checked['advanced-gz'] == 1024:\n self.dataset = 'big'\n self._file_format = 'single_oscillation_0-2_{:04d}.gz'\n self._max_file_count = 1024\n return True\n elif checked['big2-gz'] == 1024:\n self.dataset = 'big2'\n self._file_format = 'single_oscillation_01-2_{:04d}.gz'\n self._max_file_count = 1024\n return True\n return False", "def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))", "def test_create_dataset(self):\n self.assertIsInstance(self._dataset, cifar10.CIFAR10Dataset)", "def _is_dataset_path(ds_path: github_api.GithubPath) -> bool:\n return ds_path.is_dir() and (ds_path / f'{ds_path.name}.py').exists()", "def is_dataset_exported(filename):\n try:\n with open(filename):\n return True\n except IOError:\n return False", "def is_dataset_created(path, suffix=\"\"):\n dataset_id = None\n try:\n with open(\"%s%sdataset%s\" % (path, os.sep, suffix)) as dataset_file:\n dataset_id = dataset_file.readline().strip()\n try:\n dataset_id = bigml.api.get_dataset_id(dataset_id)\n return True, dataset_id\n except ValueError:\n return False, None\n except IOError:\n return False, None", "def exists(dtype, name, rootdir=None):\n return FreezableAPI.to_slug(dtype,name) in FreezableAPI.datasets(rootdir=rootdir)", "def _dataset_fn(ctx=None):\n batch_size = ctx.get_per_replica_batch_size(\n global_batch_size) if ctx else global_batch_size\n dataset = input_pipeline.create_classifier_dataset(\n input_file_pattern,\n max_seq_length,\n batch_size,\n is_training=is_training,\n input_pipeline_context=ctx)\n return dataset", "def _is_dataset_metric(self, metric: mlflow.entities.Metric) -> bool:\n return self._prefix is None or (\n self._prefix and metric.key.startswith(self._prefix)\n )", "def eval_input_fn(features, labels, batch_size):\n #features=dict(features)\n features = dataframetodict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset", "def exists(self, name):\n assert name, \"Must input a valid dataset name.\"\n return name in self.manager.data[\"dataset\"]", "def input_fn(evaluate=False) -> tf.data.Dataset:\n\n # The dataset\n ds = tf.data.Dataset.from_generator( generator=train_generator, \n output_types=( { 'character' : tf.string } , tf.string ),\n output_shapes=( { 'character' : (Model.SEQUENCE_LENGHT,) } , () )\n )\n\n ds = ds.batch(64)\n ds = ds.prefetch(1)\n\n return ds" ]
[ "0.7126082", "0.65838486", "0.63373053", "0.63276345", "0.60850066", "0.6026266", "0.5969108", "0.5859921", "0.5840824", "0.5836024", "0.583491", "0.5834641", "0.579408", "0.579408", "0.57419455", "0.57343686", "0.5691356", "0.5672013", "0.56190723", "0.55721736", "0.5570917", "0.5565193", "0.55619067", "0.55222934", "0.5509561", "0.5479185", "0.54700255", "0.5460652", "0.5433021", "0.5393772" ]
0.72974783
0
Returns a `SessionRunHook` to initialize this dataset. This must be called before `features_and_labels`.
def dataset_initializer_hook(self): iterator = self._dataset.make_initializable_iterator() # pylint: disable=protected-access hook = estimator_lib._DatasetInitializerHook(iterator) self._iterator = iterator return hook
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def _initialize(self) -> None:\n p = self.params\n # We make self.input public so that users can access its methods like\n # IdsToStrings if needed.\n with py_utils.infeed_context_scope(\n infeed_host_index=p.infeed_host_index,\n num_infeed_hosts=p.num_infeed_hosts):\n self.input = p.input.Instantiate()\n\n if hasattr(self.input, 'datasource') and isinstance(\n self.input.datasource, datasource.TFDatasetSource):\n # For the special case when the input is implemented by a tf.data.Dataset,\n # use it directly. Otherwise roundtrip adaptions may result in returning\n # duplciate batches.\n self._get_next_fn = self.input.datasource.GetNext\n else:\n self._get_next_fn = tf.function(self._get_batch)\n self._num_batches_produced = 0", "def __post_init__(self):\n self._session = Session()\n self._post_hooks()", "def pre_train(self, dataset, **kwargs):\n\n pass", "def init_run(self):\n raise NotImplementedError", "def _training_before_hook(self):\n pass", "def init(*args):\n global dataset\n dataset = args[0]", "def run_step(self):\n self.hooked_sess.run(self.train_op)", "def init(self,sess):\n if not os.path.isfile(\\\n \"./Models/\" + self.mod_name + \".ckpt.meta\"):\n sess.run(tf.global_variables_initializer())\n return 0\n else:\n if self.gen_only:\n sess.run(tf.global_variables_initializer())\n self.load(sess)\n return 1", "def _init_session(self):\n self.sess = tf.Session(config=self.config, graph=self.g)\n self.sess.run(self.init)", "def _init_session(self):\n self.sess = tf.Session(graph=self.g)\n self.sess.run(self.init)", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def __init__(self, config):\n logger.info(f\"{self.__class__.__name__}: Dataset initializing ...\")\n super().__init__(config)", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n self.sess = tf.compat.v1.Session()\n self.sess.run(tf.compat.v1.global_variables_initializer())\n self.saver = tf.compat.v1.train.Saver()", "def initialize_session(self):\n self.logger.info(\"Initializing tf session\")\n session_conf = tf.ConfigProto(\n allow_soft_placement=self.FLAGS.allow_soft_placement,\n log_device_placement=self.FLAGS.log_device_placement)\n self.session = tf.Session(config=session_conf)\n self.session.run(tf.global_variables_initializer())\n try: \n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.FLAGS.num_checkpoints)\n except:\n pass", "def __init__(self, sourcedata=None, metadata=None):\n SourceHook.__init__(self, sourcedata=sourcedata, metadata=metadata)", "def initialize_session(self):\r\n self.logger.info(\"Initializing tf session\")\r\n self.sess = tf.Session()\r\n self.sess.run(tf.global_variables_initializer())\r\n self.saver = tf.train.Saver()", "def setup_global_step(self):\n self.global_step = tf.train.get_or_create_global_step()", "def runner(self):\n\n print('[ INFO ]: Initializing the forest fires program runner...')\n\n df, features, predictor = self.preprocess()", "def _initialise_run(self) -> None:", "def init_batch(self):\n pass", "def initialize_variables(self):\n self.sess.run(self.init)", "def _initialize_session(self):\n config = tf.ConfigProto()\n # restrict model GPU memory utilization to min required\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(config=config)\n tf_ver = int(tf.__version__.split('.')[1])\n if TF_VERSION <= 0.10:\n self.sess.run(tf.initialize_all_variables())\n logswriter = tf.train.SummaryWriter\n else:\n self.sess.run(tf.global_variables_initializer())\n logswriter = tf.summary.FileWriter\n self.saver = tf.train.Saver()\n self.summary_writer = logswriter(self.logs_path, graph=self.sess.graph) # change by ccx, add the graph_def", "def setup_hooks(self):\n pass", "def initialize(self):\n return self._wrapper(self._initialize_fun)", "def init(self, **kwargs):\n self._d = {}\n self._th = None\n self._run = True\n self.load()", "def on_train_begin(self, logs=None):", "def on_train_begin(self, logs=None):", "def _initial_setup(self, **train_kwargs):\n self._update(time_step=0., **train_kwargs)" ]
[ "0.5872473", "0.5574521", "0.551776", "0.5452945", "0.539178", "0.5315563", "0.5254858", "0.5231456", "0.5206683", "0.520266", "0.51905555", "0.51767313", "0.51684254", "0.51541096", "0.51093185", "0.5108235", "0.5104647", "0.5104162", "0.5078713", "0.5040331", "0.5021158", "0.501524", "0.5010536", "0.5010131", "0.49809623", "0.49745715", "0.49519357", "0.4951365", "0.4951365", "0.4930973" ]
0.67949516
0