query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Modify a context to allow startcase boolean representations. | def startcase_booleans(context_class):
context_class._convert_bool_to_str = _convert_to_str
return context_class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lowercase_booleans(context_class):\n context_class._convert_bool_to_str = _convert_to_lowercase_str\n return context_class",
"def _set_bool(name, value, context):\n if name in os.environ:\n envval = os.environ.get(name).lower()\n if envval in [\"1\", \"true\", \"y\", \"yes\"]:\n context[name] = True\n elif envval in [\"0\", \"false\", \"n\", \"no\"]:\n context[name] = False\n else:\n raise ValueError(f\"{name} is a boolean, cannot match '{os.environ[name]}'\")\n\n _set_default(name, value, context)",
"def Bool(arg):\n return arg.lower() in ('y', 'true', 't', '1')",
"def setTrue(self):\n self.cond = CT.TRUE\n self.left = self.right = None\n self.z3 = BoolSort().cast(True)\n self.cleaned = self.Z3Simplified = self.customSimplified = self.checked = True\n self.customSimplifiedValue = CE.TRUE",
"def _evalContext(self):\n def xor(*args):\n return sum(args) == 1\n def neg(result):\n return not result\n context = {\n 'xor': xor,\n 'neg': neg\n }\n return context",
"def set_case_sensitive(self, v):\n self._case_sensitive = bool(v)",
"def __init__(self, cased: bool = True):\n super().__init__()\n self.and_token = \"And\" if cased else \"and\"",
"def set_boolean(x):\n\n if x:\n return \"True\"\n else:\n return \"False\"",
"def booleanize(text):\n ltext = text.lower()\n if ltext == 'true':\n booleanized = True\n elif ltext == 'false':\n booleanized = False\n else:\n raise ValueError('A monk asked: Is \"{}\" true or false.'.format(text))\n return booleanized",
"def CONST_BOOL(self, t):\n t.value = False if t.value == '#false' else True\n return t",
"def str2bool(self, v):\n \tprint('Entering conversion function')\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")",
"def context(key, value = Void, type = Void):\r\n def is_in_context(client, response, testcase):\r\n # If multiple templates are called, context\r\n # is actually a list of contexts, so we check\r\n # the value in all of them.\r\n if isinstance(response.context, list):\r\n contexts = response.context\r\n else:\r\n contexts = [response.context]\r\n \r\n for context in contexts:\r\n assert key in context\r\n if value is not Void:\r\n testcase.assertEqual(\r\n value, \r\n context[key]\r\n )\r\n if type is not Void:\r\n testcase.assertTrue(\r\n isinstance(\r\n context[key], \r\n type\r\n )\r\n )\r\n return is_in_context",
"def convert_boolean(cls, param, value):\r\n return True",
"def preprocess_bools(args):\n for arg in args:\n if type(args[arg]) == bool:\n args[arg] = int(args[arg])\n return args",
"def str2bool(self, val):\n return val.lower() in ('true','yes','t',1)",
"def make_bool(value):\n def make_value():\n return verify.Term(verify.BOOLEAN, value)\n return make_value",
"def boolean(val):\n\tif val == \"True\" or val == \"1\":\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_applicable(self, context: Any) -> bool:\n pass",
"def on_true(self) -> global___Expression:",
"def take_action_on_flags(self, *args, **kwargs):\r\n pass",
"def TransformFlags(self) -> _n_2_t_0[bool]:",
"def variable_boolean(self, value):\n\n text_value = to_text(value)\n text_value = text_value.lower()\n\n if text_value == 'true' or text_value == 'false':\n return True\n\n return False",
"def before_activity_control(context: Activity, target_type: str = None,\n target_names: List[str] = None):\n if target_type and context[\"type\"] == target_type:\n context[\"dry\"] = True\n if target_names and context[\"name\"] in target_names:\n context[\"dry\"] = True",
"def _bool_encode(self, d):\n for k, v in d.items():\n if isinstance(v, bool):\n d[k] = str(v).lower()\n \n return d",
"def process_bool_arg(arg):\n if isinstance(arg, bool):\n return arg\n elif isinstance(arg, basestring):\n if arg.lower() in [\"true\", \"1\"]:\n return True\n elif arg.lower() in [\"false\", \"0\"]:\n return False",
"def _context_allow_change(self):\n try:\n self.__dict__['ALLOW_CHANGE'] = True\n yield\n finally:\n del self.__dict__['ALLOW_CHANGE']",
"def switch(self, context):\n return",
"def test_true(self):\n result = self.flag.parseString('Y')\n self.assertEqual('Y', result[0])",
"def __init__(self, boolean_name, boolean_value):\n self._boolean_name = process_for_latex(boolean_name)\n self._boolean_value = boolean_value",
"def _str2bool(self, v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")"
]
| [
"0.708648",
"0.60394466",
"0.5391676",
"0.5367538",
"0.53071034",
"0.5305611",
"0.5247369",
"0.5187181",
"0.514308",
"0.5142465",
"0.5080693",
"0.50760305",
"0.50713897",
"0.5069089",
"0.50475407",
"0.5033341",
"0.50312597",
"0.5011229",
"0.5008267",
"0.49925774",
"0.49824417",
"0.49811795",
"0.49755344",
"0.49485692",
"0.494086",
"0.49354148",
"0.49335593",
"0.4915708",
"0.49114123",
"0.49108735"
]
| 0.78109324 | 0 |
gets kid value to be used in token header for this handler. it must be unique for each handler. | def get_kid(self):
return 'f825ccd5-9b4a-476f-ae12-c1c1ea99e6b2' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_kid_from_jwe_header(token: str) -> Optional[str]:\n import base64\n import json\n\n header = token.split(\".\")[0]\n deserialized_header = base64.urlsafe_b64decode(header + \"===\")\n jose_header = json.loads(deserialized_header)\n\n return jose_header.get(\"kid\")",
"def _get_kid(message) -> str:\n if KID in message.phdr.keys():\n return base64.b64encode(message.phdr[KID]).decode(\"UTF-8\")\n return base64.b64encode(message.uhdr[KID]).decode(\"UTF-8\")",
"def get_key_id(self):",
"def _get_key(self):\n if not self.session:\n key = self.key\n else:\n key = self.session.get(\"_signature_key\")\n if key is None:\n key = str(uuid.uuid1())\n self.session[\"_signature_key\"] = key\n return key",
"def key_id(self):\n return self._key_id",
"def key_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"key_id\")",
"def jwt_get_user_id_from_payload_handler(payload):\n user_id = payload.get('user_id')\n return user_id",
"def get_key(self) -> int:\n return self.__key",
"def get_key(self) -> int:\n return self.key",
"def kms_key_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"kms_key_id\")",
"def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")",
"def unauthenticated_userid(self, request):\n authorization = request.headers.get('Authorization', '')\n try:\n authmeth, token = authorization.split(' ', 1)\n except ValueError:\n return None\n if authmeth.lower() != 'bearer':\n return None\n\n user_id, client_name = self._verify_token(token, request)\n\n # Don't add suffix if authentication failed, or no specific client name is configured\n if client_name is None or client_name == 'default':\n return user_id\n\n return '{}-{}'.format(user_id, client_name)",
"def key(self):\n return str(self._id)",
"def _get_jwe_token(self, request_id_obj):\n # Inspired by https://jwcrypto.readthedocs.io/en/latest/jwe.html#asymmetric-keys\n payload = json.dumps(request_id_obj)\n\n public_key = jwk.JWK.from_pem(self.cleopatra_pub)\n protected_header = {\n \"alg\": \"RSA-OAEP-256\",\n \"enc\": \"A256CBC-HS512\",\n \"typ\": \"JWE\",\n \"kid\": public_key.thumbprint(),\n }\n jwetoken = jwe.JWE(payload.encode('utf-8'),\n recipient=public_key,\n protected=protected_header)\n\n enc = jwetoken.serialize()\n return enc",
"def get_current_uid():\n # TODO: Find a better way to access the token\n return request.token['id']",
"def identifier(self) -> str:\n return self.current_token",
"def key(self):\n return self._key.decode('utf-8')",
"def get_client_token(**_):\n return str(uuid.uuid4())",
"def get_key(self):\n return self.key",
"def get_key(self):\n return self.key",
"def key(self) -> str:\n return self.__key",
"def _get_tenant_ocid(self):\n if isinstance(self._provider, oci.signer.Signer):\n return self._provider.api_key.split('/')[0]",
"def get_public_key(self, kid):\n resp = self.request(self.jwks_url(), method=\"GET\")\n resp.raise_for_status()\n\n # find the proper key for the kid\n for key in resp.json()[\"keys\"]:\n if key[\"kid\"] == kid:\n return self.jwt_key_to_pem(key)\n raise DecodeError(f\"Cannot find kid={kid}\")",
"def key(self) -> str:\n return self._key",
"def generate_key():\n return get_token_generator().generate_token()",
"def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")",
"def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")",
"def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")",
"def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")",
"def client_key(self) -> str:\n return pulumi.get(self, \"client_key\")"
]
| [
"0.70360565",
"0.66597205",
"0.57830864",
"0.5602095",
"0.5457416",
"0.5443089",
"0.53873295",
"0.5355741",
"0.53556186",
"0.5337027",
"0.532609",
"0.5317285",
"0.5314371",
"0.5287662",
"0.5271824",
"0.5269629",
"0.52666974",
"0.5262292",
"0.5250077",
"0.5250077",
"0.52417606",
"0.5232066",
"0.52317595",
"0.52216244",
"0.52130306",
"0.52028865",
"0.52028865",
"0.52028865",
"0.52028865",
"0.5202823"
]
| 0.6909571 | 1 |
Analyze simulated random walks on BarabasiAlbert graphs. Modify input and output as needed. | def rwgraph_analyze1(input=(None)):
#generates graph
n=2000
m=4
G=nx.barabasi_albert_graph(n, m, seed=5)
Nt=100
M=20000
#finds max degree of graph and stores list of degrees of nodes
maxdeg=0
degree_dist=[]
for i in range(0,n):
degree_dist.append(G.degree[i])
if G.degree[i]>maxdeg:
maxdeg=G.degree[i]
j=i
#generates data and stores them in lists for varyin M and Nt
X=rwgraph(G,j,M,Nt)
Listnodes=[]
for i in range(M):
Listnodes.append(G.degree(X[i,Nt]))
Nt=10000
M=20000
X=rwgraph(G,j,M,Nt)
Listnodes2=[]
for i in range(M):
Listnodes2.append(G.degree(X[i,Nt]))
Nt=10
M=20000
X=rwgraph(G,j,M,Nt)
Listnodes3=[]
for i in range(M):
Listnodes3.append(G.degree(X[i,Nt]))
Nt=10000
M=200
X=rwgraph(G,j,M,Nt)
Listnodes4=[]
for i in range(M):
Listnodes4.append(G.degree(X[i,Nt]))
fig, ax1 = plt.subplots(figsize =(14,7))
##### creates histo gram figure with 2 axis####
ax1.hist([Listnodes,Listnodes2], bins=maxdeg, label=['Nt=100', 'Nt=10000'],color=['g','r'],alpha=0.6)
ax1.set_xlabel('degree of node')
ax1.set_ylabel('frequency of final position of random walks')
ax1.tick_params(axis='y')
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax2.hist([degree_dist], bins=maxdeg, label=['graph node frequency'],color=['b'],alpha=0.6)
ax2.set_ylabel('frequency of node degrees for graph')
ax2.tick_params(axis='y')
ax1.legend(loc="center right", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)
ax2.legend(loc="upper right", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)
plt.title('M=20000, node degree of final position of random walk, for varying amounts of time', y=1.10, fontsize=20)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.grid(b=None)
plt.show()
#function to generate diction of frequency
def CountFrequency(my_list):
# Creating an empty dictionary
freq = {}
for item in my_list:
if (item in freq):
freq[item] += 1
else:
freq[item] = 1
return freq
#converts data to approprate form so it can plotted on scatter plot
#frequecy
listfreq1=CountFrequency(Listnodes2)
listfreq2=CountFrequency(Listnodes3)
listfreq3=CountFrequency(Listnodes4)
listfreq_deg=CountFrequency(degree_dist)
#set up lists
z=[]
z2=[]
z3=[]
z_deg=[]
z_deg2=[]
z_deg3=[]
#code to create list of only degrees used in simulations
for i in listfreq1:
z.append(listfreq1[i]/(listfreq_deg[i]*20000))
z_deg.append(i)
for i in listfreq2:
z2.append(listfreq2[i]/(listfreq_deg[i]*20000))
z_deg2.append(i)
for i in listfreq3:
z3.append(listfreq3[i]/(listfreq_deg[i]*200))
z_deg3.append(i)
#extpected prob distribution
E=G.number_of_edges()
z0=[]
z_deg0=[]
for i in listfreq_deg:
z0.append(i/(2*E))
z_deg0.append(i)
#genrates scatter plot figure
plt.figure(figsize=(12, 6))
plt.scatter(z_deg, z, label='Nt=10000, M=20000')
plt.scatter(z_deg2, z2,label='Nt=10, M=20000')
plt.scatter(z_deg3, z3,label='Nt=10, M=200')
plt.plot(z_deg0,z0,label="expected prob dist",alpha=0.5)
plt.xlabel('degree of node')
plt.ylabel('frequency of final position / M*frequency of degree')
plt.legend(loc="upper left", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)
plt.title("Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.")
plt.show()
return None #modify as needed
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create learning agent\n # a = e.create_agent(RandomAgent) # create random agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01)\n # reduce update_delay to speed up simulation\n sys.stdout = open(\"./output.txt\", \"w\")\n tic = time()\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n toc = time()\n sys.stdout = sys.__stdout__\n\n print \"Totoal time used: {}.\".format(toc - tic)\n parse(\"./output.txt\")",
"def test_simulation(walk_length_array, number_of_simulations, walker_class_type):\n for walk_length in walk_length_array:\n _distances_ = simulate_walks(walk_length, number_of_simulations, walker_class_type)\n print(walker_class_type.__name__, \" random walk of {} steps\".format(walk_length), \" After {} simulations\".format(number_of_simulations))\n print(\" Mean= {}\".format(round(sum(_distances_)/len(_distances_),4)))\n print(\" Max= {}\".format(round(max(_distances_), 4)))\n print(\" Min= {}\".format(round(min(_distances_),4)))",
"def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')",
"def test_extended_barabasi_albert(self, m=2):\n seed = 42\n repeats = 2\n BA_model = barabasi_albert_graph(100, m, seed)\n BA_model_edges = BA_model.number_of_edges()\n\n while repeats:\n repeats -= 1\n\n # This behaves just like BA, the number of edges must be the same\n G1 = extended_barabasi_albert_graph(100, m, 0, 0, seed)\n assert_equal(G1.size(), BA_model_edges)\n\n # More than twice more edges should have been added\n G1 = extended_barabasi_albert_graph(100, m, 0.8, 0, seed)\n assert_greater(G1.size(), BA_model_edges * 2)\n\n # Only edge rewiring, so the number of edges less than original\n G2 = extended_barabasi_albert_graph(100, m, 0, 0.8, seed)\n assert_equal(G2.size(), BA_model_edges)\n\n # Mixed scenario: less edges than G1 and more edges than G2\n G3 = extended_barabasi_albert_graph(100, m, 0.3, 0.3, seed)\n assert_greater(G3.size(), G2.size())\n assert_less(G3.size(), G1.size())\n\n # Testing exceptions\n ebag = extended_barabasi_albert_graph\n assert_raises(NetworkXError, ebag, m, m, 0, 0)\n assert_raises(NetworkXError, ebag, 1, 0.5, 0, 0)\n assert_raises(NetworkXError, ebag, 100, 2, 0.5, 0.5)",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def barabasi_albert_graph(T, m, seed=None, is_directed=False):\n n = m + T - 1\n if m < 1 or m >= n:\n raise nx.NetworkXError(\"Barabási–Albert network must have m >= 1\"\n \" and m < n, m = %d, n = %d\" % (m, n))\n\n # Add m initial nodes (m0 in barabasi-speak)\n time = 0\n starting_graph = nx.path_graph(m) # start with a tree\n G = nx.DiGraph() if is_directed else nx.Graph()\n G.add_edges_from(starting_graph.edges(data=True), t=time, w=1)\n\n # Target nodes for new edges\n targets = list(range(m))\n # List of existing nodes, with nodes repeated once for each adjacent edge\n repeated_nodes = []\n # Start adding the other n-m nodes. The first node is m.\n source = m\n while source < n:\n time += 1 # increase time\n # Add edges to m nodes from the source.\n G.add_edges_from(zip([source] * m, targets), t=time, w=1)\n # Add one node to the list for each new edge just created.\n repeated_nodes.extend(targets)\n # And the new node \"source\" has m edges to add to the list.\n repeated_nodes.extend([source] * m)\n # Now choose m unique nodes from the existing nodes\n # Pick uniformly from repeated_nodes (preferential attachment)\n targets = _random_subset(repeated_nodes, m, seed)\n source += 1\n\n edgelist_path = f'datasets/synthetic/BA_{m}_{T}_raw.g'\n print(f'Weighted edgelist written at {edgelist_path!r}')\n nx.write_edgelist(G, path=edgelist_path, data='t')\n return G",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0000001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n print 'alpha, gamma:', a.alpha, a.gamma\n print 'penalties:', a.total_penalties\n print 'total rewards:', a.total_rewards",
"def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))",
"def test_am_basic(Simulator, plt, seed, rng):\n\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab)\n in_node = nengo.Node(output=vocab.parse(\"A\").v, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.2)\n t = sim.trange()\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))\n plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[in_p][t > 0.15], vocab.parse(\"A\").v) > 0.99\n assert similarity(sim.data[out_p][t > 0.15], vocab.parse(\"A\").v) > 0.8",
"def test_am_basic(Simulator, plt, seed, rng):\n\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(threshold=0.3, input_vocab=vocab,\n mapping=vocab.keys(),\n function=filtered_step_fn)\n spa.sym.A >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.2)\n t = sim.trange()\n\n plt.subplot(3, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.subplot(3, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab))\n plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.95, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert_sp_close(t, sim.data[in_p], vocab['A'], skip=0.15, atol=0.05)\n assert_sp_close(t, sim.data[out_p], vocab['A'], skip=0.15)",
"def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)",
"def run_genetic_algorithm(bayes_params):\n\n print('Running genetic algorithm')\n\n # Unpacks parameters (unfortunately can't feed dataframe (or series or\n # array) data into a function with hyperopt, so am having to pickle the\n # parameters not being optimised with hyperopt\n params_file = '{}/Program_input/Input_params.pkl'.format(\n bayes_params['workingdirectory']\n )\n with open(params_file, 'rb') as f:\n fixed_params = pickle.load(f)\n if not type(fixed_params) in [dict, OrderedDict]:\n raise TypeError('Data in {} is not a pickled dictionary'.format(params_file))\n params = {**bayes_params, **fixed_params}\n\n # Records sequences and their fitnesses after each generation\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'w') as f:\n f.write('Tracking GA optimisation progress\\n')\n\n ga_calcs = run_ga_calcs(params)\n\n # Defines whether sequences are compared by their raw or rank propensities.\n # Since BUDE scores and frequency values have to be compared by their rank\n # values, have made the decision to also compare propensity values by their\n # rankings.\n \"\"\"\n if params['matingpopmethod'] in ['fittest', 'roulettewheel']:\n raw_or_rank = 'raw'\n elif params['matingpopmethod'] in ['rankroulettewheel']:\n raw_or_rank = 'rank'\n \"\"\"\n raw_or_rank = 'rank'\n\n # Calculates propensity and/or BUDE energy of input structure\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('Input structure\\n')\n\n if params['fitnessscoremethod'] == 'alternate':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score,'\n ' BUDE energy, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency,\n params['inputpdbenergy'], params['inputpdbclash']\n ))\n f.write('\\n')\n\n if params['fitnessscoremethod'] == 'propensity':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, BUDE energy\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'molprobity':\n network_clashes = ga_calcs.measure_fitness_clashscore(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clashscore = network_clashes[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, clashscore))\n f.write('\\n')\n\n # Runs GA cycles\n gen = params['startgen']\n while gen < params['stopgen']:\n gen += 1\n print('Generation {}'.format(gen))\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nGeneration {}\\n'.format(gen))\n\n\n all_networks_list = [params['sequencesdict']]\n pop_sizes = [params['populationsize']]\n\n for index, networks_dict in enumerate(all_networks_list):\n # Measures fitness of sequences in starting population.\n if (\n (params['fitnessscoremethod'] == 'propensity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 2 == 1)\n ):\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(networks_dict)\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, propensity, frequency, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency, probability\n ))\n f.write('Total: {}, {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n elif (\n (params['fitnessscoremethod'] == 'allatom')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 2)\n ):\n # Runs BUDE energy scoring on parallel processors\n network_energies = ga_calcs.measure_fitness_allatom(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, BUDE score, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, energy, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_energies.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n elif (\n (params['fitnessscoremethod'] == 'molprobity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 0)\n ):\n # Runs MolProbity scoring on parallel processors\n network_clashes = ga_calcs.measure_fitness_clashscore(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_clashscores_to_probabilities(network_clashes)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, clashscore, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clash = network_clashes[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, clash, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_clashes.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n # Selects subpopulation for mating\n if params['matingpopmethod'] == 'fittest':\n mating_pop_dict = ga_calcs.create_mat_pop_fittest(\n networks_dict, network_fitness_scores, pop_sizes[index],\n params['unfitfraction']\n )\n elif params['matingpopmethod'] in ['roulettewheel', 'rankroulettewheel']:\n mating_pop_dict = ga_calcs.create_mat_pop_roulette_wheel(\n networks_dict, network_fitness_scores, pop_sizes[index], params['']\n )\n\n # Performs crossover of parent sequences to generate child sequences\n if params['crossovermethod'] == 'uniform':\n crossover_pop_dict = ga_calcs.uniform_crossover(mating_pop_dict)\n elif params['crossovermethod'] == 'segmented':\n crossover_pop_dict = ga_calcs.segmented_crossover(mating_pop_dict)\n\n # Mutates child sequences\n if params['mutationmethod'] == 'swap':\n mutated_pop_dict = ga_calcs.swap_mutate(crossover_pop_dict)\n elif params['mutationmethod'] == 'scramble':\n mutated_pop_dict = ga_calcs.scramble_mutate(crossover_pop_dict)\n\n # Combines parent and child sequences into single generation\n merged_networks_dict = ga_calcs.add_children_to_parents(\n mutated_pop_dict, mating_pop_dict\n )\n\n random_order = [n for n in range(len(merged_networks_dict))]\n random.shuffle(random_order)\n shuffled_merged_networks_dict = OrderedDict(\n {list(merged_networks_dict.keys())[n]:\n list(merged_networks_dict.values())[n] for n in random_order}\n )\n params['sequencesdict'] = shuffled_merged_networks_dict\n\n # Calculates fitness of output sequences and filters population to maintain\n # the fittest 50%, plus sums the probabilities of the retained sequences and\n # returns this value (to be minimised with hyperopt)\n summed_fitness = 0\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nOutput generation\\n')\n\n if params['fitnessscoremethod'] != 'allatom':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['sequencesdict'])\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['sequencesdict'])\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their associated\n # fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n if params['fitnessscoremethod'] != 'allatom':\n f.write('network, sequence, propensity, frequency\\n')\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('network, sequence, BUDE score\\n')\n for network, G in params['sequencesdict'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n if params['fitnessscoremethod'] != 'allatom':\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n if params['fitnessscoremethod'] != 'allatom':\n f.write('Total: {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values())\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('Total: {}'.format(sum(network_energies.values())))\n f.write('\\n')\n\n params['sequencesdict'] = ga_calcs.create_mat_pop_fittest(\n params['sequencesdict'], network_fitness_scores,\n params['populationsize'], unfit_fraction=0\n )\n\n for network in params['sequencesdict'].keys():\n # Higher propensity is more likely, so add because output from\n # measure_fitness_propensity is sum of -log(propensity) values, and\n # hyperopt minimises output score\n # Can't combine propensity and frequency scores without first converting\n # to a probability, so for calculating output combined fitness can only\n # use combined propensity scores to rank the structures\n if params['fitnessscoremethod'] != 'allatom':\n summed_fitness += network_propensity_scores[network]\n # Lower score is more likely, so add because hyperopt minimises output\n # score\n elif params['fitnessscoremethod'] == 'allatom':\n summed_fitness += network_energies[network]\n\n with open('{}/Program_output/GA_output_sequences_dict.pkl'.format(\n bayes_params['workingdirectory']), 'wb') as f:\n pickle.dump(params['sequencesdict'], f)\n\n print(summed_fitness)\n\n return summed_fitness",
"def main(genomes, config, break_score=True):\n\tglobal GEN\n\tGEN += 1\n\n\t# Create lists holiding the genome itself,\n\t# the neural network associated with the genome\n\t# and the bird object that uses the network to play\n\tnets = []\n\tge = []\n\tbirds = []\n\n\tfor g_id, g in genomes:\n\t\tnet = neat.nn.FeedForwardNetwork.create(g, config)\n\t\tnets.append(net)\n\t\tbirds.append(Bird(230, 350))\n\t\tg.fitness = 0\n\t\tge.append(g)\n\n\tbase = Base(FLOOR)\n\tpipes = [Pipe(random.randrange(450, 600))]\n\twin = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))\n\tclock = pygame.time.Clock()\n\n\tscore = 0\n\n\trun = True\n\twhile run and len(birds) > 0:\n\t\tclock.tick(FPS)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\n\t\tpipe_ind = 0\n\t\tif len(birds) > 0:\n\t\t\t# Determine whether to use the first or second pipe on the screen\n\t\t\t# for neural network input\n\t\t\tif len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():\n\t\t\t\tpipe_ind = 1\n\n\t\tfor x, bird in enumerate(birds):\n\t\t\t# Give each bird a fitness of 0.1 for each frame it stays alive\n\t\t\tge[x].fitness += 0.1\n\t\t\tbird.move()\n\n\t\t\t# Send bird location, top pipe location and bottom pipe location\n\t\t\t# and determine from network whether to jump or not\n\t\t\toutput = nets[x].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))\n\n\t\t\t# Use tanh as activation function\n\t\t\t# Result will in the range [-1, 1]\n\t\t\t# Jump if result > 0.5\n\t\t\tif output[0] > 0.5:\n\t\t\t\tbird.jump()\n\n\t\tadd_pipe = False\n\t\trem = []\n\t\tfor pipe in pipes:\n\t\t\tfor x, bird in enumerate(birds):\n\t\t\t\t# Check for collision\n\t\t\t\tif pipe.collide(bird):\n\t\t\t\t\t# Reduce fitness for bird who collides\n\t\t\t\t\t# such that bird that flew far but collides always\n\t\t\t\t\t# aren't favored\n\t\t\t\t\tge[x].fitness -= 1\n\t\t\t\t\tbirds.pop(x)\n\t\t\t\t\tnets.pop(x)\n\t\t\t\t\tge.pop(x)\n\n\t\t\tif not pipe.passed and pipe.x < bird.x:\n\t\t\t\tpipe.passed = True\n\t\t\t\tadd_pipe = True\n\n\t\t\tif pipe.x + pipe.PIPE_TOP.get_width() < 0:\n\t\t\t\trem.append(pipe)\n\n\t\t\tpipe.move()\n\n\t\tif add_pipe:\n\t\t\tscore += 1\n\t\t\tfor g in ge:\n\t\t\t\tg.fitness += 5\n\t\t\tpipes.append(Pipe(random.randrange(450, 600)))\n\n\t\tfor r in rem:\n\t\t\tpipes.remove(r)\n\n\t\tfor x, bird in enumerate(birds):\n\t\t\tif bird.y + bird.img.get_height() >= FLOOR or bird.y < 0:\n\t\t\t\tbirds.pop(x)\n\t\t\t\tnets.pop(x)\n\t\t\t\tge.pop(x)\n\n\t\tbase.move()\n\t\tdraw_window(win, birds, pipes, base, score, GEN, pipe_ind)\n\n\t\t# Break game if score gets large enough\n\t\tif break_score:\n\t\t\tif score > BREAK_SCORE:\n\t\t\t\tbreak",
"def run_algorithm(self):\n population_size = self.population_size\n simulator = self.simulator\n num_generations = self.num_generations\n current_dir = os.getcwd()\n urdf = current_dir + os.sep + os.path.join(\"URDF\", \"Ghost\", \"urdf\", \"Ghost.urdf\")\n simulated_robot = Robot(urdf, (0, 0, 0.4))\n simulated_robot.set_id(simulator.load_new_robot_urdf(simulated_robot))\n # make placeholders\n counter = 0\n best_genome = None\n best_fit = 0\n evals = population_size * (num_generations + 1)\n beam_fit = np.zeros(evals)\n current_population = self.make_population()\n current_population_fitness = [0] * self.population_size\n # print(\"build robots\")\n for k in range(self.population_size):\n #\tprint(\"initial robot \" , k)\n robot = current_population[k]\n simulator.load_robot_parameters(robot.parameters, 0)\n robot.set_fitness(simulator.compute_walk_fitness(1000)[0]) # evaluate the robot's fitness\n fitness = robot.get_fitness()\n current_population_fitness[k] = fitness\n \n if counter == 0:\n beam_fit[counter] = current_population_fitness[k] \n else:\n \n if beam_fit[counter - 1] < current_population_fitness[k]: # if the best overall robot thus far\n best_genome = robot.genome.copy() # update the best robot's genome\n beam_fit[counter] = current_population_fitness[k] \n else:\n beam_fit[counter] = beam_fit[counter - 1]\n best_fit = beam_fit[counter]\n\n counter +=1 \n\n\n #\tprint(\"origional robots evaluated, their fitness is \" , )\n for i in range(num_generations): # perform mutations equal to num_Climb\n #\t\t\tprint(\"start of gen , current population_fitness\" , current_population_fitness)\n population = current_population.copy()\n population_fitness = current_population_fitness.copy()\n print('gen' , i)\n for j in range(self.population_size):\n robot = population[j]\n mut_loc, old_val = robot.mutate_genome() # Mutation: Keep track of mut location and previous vals\n simulator.load_robot_parameters(robot.parameters, 0)\n robot.set_fitness(simulator.compute_walk_fitness(1000)[0]) # evaluate the robot's fitness\n fit_new = robot.get_fitness()\n population_fitness[j] = fit_new\n # BIG POINT - here we keep regardless if the change is better or not\n if fit_new > best_fit: # update learning curve\n best_fit = fit_new\n best_genome = robot.genome.copy()\n beam_fit[counter] = best_fit\n counter += 1\n #\t\t\tprint(\" ... \")\n #\t\t\tprint(\"end of gen , current population_fitness\" , current_population_fitness)\n # concat the populations and population fitnesses\n total_population = current_population + population\n total_population_fitness = current_population_fitness + population_fitness\n # print(\"before quick sort \" , total_population_fitness)\n # print(\" ... \")\n # sort the lists\n self.quick_sort(total_population_fitness, total_population, 0, len(total_population) - 1)\n # print(\" after quick sort \" , total_population_fitness)\n # print(\" ... \")\n # keep the top half\n current_population = total_population[:self.population_size]\n current_population_fitness = total_population_fitness[:self.population_size]\n # print(\"keep \", current_population_fitness)\n #\t\tprint(counter)\n\n if not os.path.exists('./data'):\n os.mkdir('./data')\n\n np.savetxt(\"beam_genome_gen_999_pop_100.csv\", best_genome, delimiter=\",\")\n np.savetxt(\"beam_learning_gen_999_pop_100.csv\", beam_fit, delimiter=\",\")",
"def test_am_wta(Simulator, plt, seed, rng):\n\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n def input_func(t):\n if t < 0.2:\n return vocab.parse('A+0.8*B').v\n elif t < 0.3:\n return np.zeros(D)\n else:\n return vocab.parse('0.8*A+B').v\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab, wta_output=True)\n in_node = nengo.Node(output=input_func, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.5)\n t = sim.trange()\n more_a = (t > 0.15) & (t < 0.2)\n more_b = t > 0.45\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))\n plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)\n plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[out_p][more_a], vocab.parse(\"A\").v) > 0.79\n assert similarity(sim.data[out_p][more_a], vocab.parse(\"B\").v) < 0.19\n assert similarity(sim.data[out_p][more_b], vocab.parse(\"B\").v) > 0.79\n assert similarity(sim.data[out_p][more_b], vocab.parse(\"A\").v) < 0.19",
"def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"",
"def sample_from_bm(self,\n num_chains, \n num_samples,\n num_steps,\n save_to_path,\n num_burn_in,\n test_inputs = None,\n print_p_tilda = False,\n print_gibbs = False):\n \n if type(test_inputs) is np.ndarray:\n \n print(\"Will initialize gibbs chains with dataset images\\n\")\n \n num_test_examples = test_inputs.shape[0]\n \n self.test_inputs = theano.shared(np.asarray(test_inputs,\n dtype=theano.config.floatX),\n borrow= True) \n \n select_examples = np.random.choice(num_test_examples, \n num_chains, \n replace=False)\n \n init_chains = np.asarray(\n self.test_inputs.get_value(borrow=True)[select_examples,:],\n dtype=theano.config.floatX)\n \n else:\n \n print(\"Will initialize gibbs chains with random images\\n\")\n init_chains = self.np_rand_gen.binomial(n=1,p=0.5, \n size = (num_chains, self.num_vars))\n \n images = np.zeros([num_chains*num_samples+num_chains, self.num_vars])\n \n images[0:num_chains,:] = init_chains\n \n theano.config.exception_verbosity = 'high'\n \n self.x_gibbs = theano.shared(init_chains, name= \"x_gibbs\")\n \n if self.num_hidden > 0:\n print(\"Running gibbs chains for RBM ...\\n\")\n \n (\n [ _,\n _,\n _,\n x_inputs,\n p_xi_given_x_,\n x_samples\n ],\n updates\n ) = theano.scan(\n self.gibbs_step_rbm_vis,\n outputs_info=[None, None, None, None, None, self.x_gibbs],\n n_steps= num_steps)\n \n output_vars = [p_xi_given_x_[-1], x_samples[-1]]\n \n updates.update({self.x_gibbs: x_samples[-1]})\n \n else:\n \n print(\"Running gibbs chains for BM ...\\n\")\n \n (p_xi_given_x_, x_samples), updates =\\\n theano.scan(self.gibbs_step_fully_visible, n_steps = num_steps)\n \n output_vars = [p_xi_given_x_[num_burn_in:],\n x_samples[num_burn_in:]]\n \n take_step = (num_steps - num_burn_in) // self.num_vars \n \n if take_step == 0:\n \n take_step = 1\n \n get_samples = theano.function(inputs = [],\n outputs = output_vars, \n updates = updates)\n \n for ind in range(num_samples):\n \n p_all, samples_all = get_samples()\n \n if num_steps != 1 and self.num_hidden == 0:\n \n p_out, samples_out = self.assemble_image(p_all, \n samples_all,\n num_chains,\n step = take_step)\n \n elif num_steps ==1 and self.num_hidden == 0:\n \n p_out = p_all[-1]\n \n samples_out = samples_all[-1]\n \n elif self.num_hidden > 0:\n \n p_out = p_all\n \n samples_out = samples_all\n \n if self.num_hidden == 0:\n \n p_out = np.transpose(p_out) \n \n # without resetting the chains are persistent for\n # fully visible Boltzmann Machines\n # (self.x_gibbs are modified continuously)\n # self.x_gibbs.set_value(init_chains)\n \n print(\"Sample %d -- max pixel activations for %d gibbs chains:\"%\n (ind, num_chains))\n print(np.max(p_out, axis= 1))\n print(\"\")\n \n if print_gibbs:\n self.print_gibbs_conditionals(p_vals = p_all)\n \n if print_p_tilda: \n is_samples = self.np_rand_gen.binomial(n=1, \n p=0.5, \n size =(10000, self.num_vars))\n \n gibbs_p_tilda, rand_p_tilda = \\\n self.test_p_tilda(np.transpose(samples_out), \n is_samples,\n training = False)\n \n print(\"p_tilda values for gibbs samples:\")\n print(gibbs_p_tilda)\n print(\"\")\n print(\"p_tilda values for randomly chosen importance samples:\")\n print(rand_p_tilda)\n print(\"\")\n \n images[num_chains*(ind+1):num_chains*(ind+2),:] = np.round(p_out)\n \n make_raster_plots(images, \n num_samples, \n num_chains, \n reshape_to = [self.side, self.side], \n save_to_path = save_to_path)",
"def run_sparring_algorithm(means, horizon):\n\n # The number of arms\n n_arms = len(means)\n\n # Shuffling the means vector.\n random.shuffle(means)\n\n # Assigning Bernoulli arms\n arms = map(lambda (mu): BernoulliArm(mu), means)\n\n # Assigning the black-boxes with the UCB 1 algorithm\n left_black_box = UCB1([], [])\n right_black_box = UCB1([], [])\n\n # Initializing the black-boxes.\n left_black_box.initialize(n_arms)\n right_black_box.initialize(n_arms)\n\n # Initializing rewards and regrets\n average_reward = [0]*horizon\n\n regret = [0]*horizon\n\n cumulative_average_reward = [0]*horizon\n\n cumulative_regret = [0]*horizon\n\n for t in range(horizon):\n\n # Using the black-boxes to select the arms\n left_arm = left_black_box.select_arm()\n right_arm = right_black_box.select_arm()\n\n # Acquiring the rewards\n left_reward = arms[left_arm].draw()\n\n right_reward = arms[right_arm].draw()\n\n b = observe_b_t(left_reward, right_reward)\n b_not = 1 - b\n\n # Updating the black-boxes\n left_black_box.update(left_arm, b_not)\n right_black_box.update(right_arm, b)\n\n # Assigning the average reward.\n average_reward[t] = float(right_reward + left_reward) / 2\n\n # Assigning the regret\n regret[t] = max(means) - average_reward[t]\n\n # Assigning the cumulative regret and rewards\n if t == 1:\n cumulative_average_reward[t] = average_reward[t]\n\n cumulative_regret[t] = regret[t]\n else:\n cumulative_average_reward[t] = average_reward[t] + cumulative_average_reward[t-1]\n\n cumulative_regret[t] = regret[t] + cumulative_regret[t-1]\n\n # Returning the average regret.\n return cumulative_regret",
"def ais_latent_network_given_A(x0, graph_model, graph_sampler, N_samples=1000, B=100,\n steps_per_B=11):\n import pdb; pdb.set_trace()\n betas = np.linspace(0,1,B)\n\n # Sample m points\n log_weights = np.zeros(N_samples)\n for m in range(N_samples):\n # Sample a new set of graph parameters from the prior\n x = copy.deepcopy(x0)\n\n # print \"M: %d\" % m\n # Sample mus from each of the intermediate distributions,\n # starting with a draw from the prior.\n samples = []\n\n # Ratios correspond to the 'f_{n-1}(x_{n-1})/f_{n}(x_{n-1})' values in Neal's paper\n ratios = np.zeros(B-1)\n\n # Sample the intermediate distributions\n for (n,beta) in zip(range(1,B), betas[1:]):\n # print \"M: %d\\tBeta: %.3f\" % (m,beta)\n sys.stdout.write(\"M: %d\\tBeta: %.3f \\r\" % (m,beta))\n sys.stdout.flush()\n # Set the likelihood scale (beta) in the graph model\n graph_model.lkhd_scale.set_value(beta)\n\n # Take 100 steps per beta\n for s in range(steps_per_B):\n x = graph_sampler.update(x)\n\n # Compute the ratio of this sample under this distribution and the previous distribution\n curr_lkhd = seval(graph_model.log_p,\n graph_model.get_variables(),\n x['net']['graph'])\n\n graph_model.lkhd_scale.set_value(betas[n-1])\n prev_lkhd = seval(graph_model.log_p,\n graph_model.get_variables(),\n x['net']['graph'])\n\n ratios[n-1] = curr_lkhd - prev_lkhd\n\n # Compute the log weight of this sample\n log_weights[m] = np.sum(ratios)\n\n print \"\"\n print \"W: %f\" % log_weights[m]\n\n # Compute the mean of the weights to get an estimate of the normalization constant\n log_Z = -np.log(N_samples) + logsumexp(log_weights)\n return log_Z",
"def main():\n np.random.seed(219)\n rospy.init_node(\"sawyer_dagger_teacher\")\n pub_start = rospy.Publisher('/teacher/start', JointCommand, queue_size=1)\n pub_epi_fin = rospy.Publisher('/teacher/fin', JointCommand, queue_size=1)\n vel_ik_pos_pub = rospy.Publisher('/teacher/ik_vel/', Pose, queue_size = 3)\n pub3 = rospy.Publisher('/ddpg/vel_start/', Float64, queue_size=1)\n pub4 = rospy.Publisher('/ddpg/vel_end/', Float64, queue_size=1)\n goal_obs_pub = rospy.Publisher('/teacher/goal_obs/', Pose, queue_size=1)\n pos_cmd_pub = rospy.Publisher('/teacher/pos_cmd_pub/', PosCmd, queue_size=1)\n\n\n\n rospy.set_param('dagger_reset',\"false\") # param_name, param_value\n\n\n # Load Gazebo Models via Spawning Services\n # Note that the models reference is the /world frame\n # and the IK operates with respect to the /base frame\n # load_gazebo_models()\n # Remove models from the scene on shutdown\n rospy.on_shutdown(delete_gazebo_models)\n\n limb = 'right'\n hover_distance = 0.15 # meters\n # Starting Joint angles for right arm\n starting_joint_angles = {'right_j0': -0.041662954890248294,\n 'right_j1': -1.0258291091425074,\n 'right_j2': 0.0293680414401436,\n 'right_j3': 1.37518162913313,\n 'right_j4': -0.06703022873354225,\n 'right_j5': 0.7968371433926965,\n 'right_j6': 1.7659649178699421}\n\n pnp = PickAndPlace(limb, hover_distance)\n\n pnp.move_to_start(starting_joint_angles)\n\n \n # m_planner = trajectorySender()\n # An orientation for gripper fingers to be overhead and parallel to the obj\n overhead_orientation = Quaternion(\n x=-0.00142460053167,\n y=0.999994209902,\n z=-0.00177030764765,\n w=0.00253311793936)\n block_poses = list()\n # The Pose of the block in its initial location.\n # You may wish to replace these poses with estimates\n # from a perception node.\n block_poses.append(Pose(\n position=Point(x=0.45, y=0.155, z=-0.129),\n orientation=overhead_orientation))\n # Feel free to add additional desired poses for the object.\n # Each additional pose will get its own pick and place.\n block_poses.append(Pose(\n position=Point(x=0.6, y=-0.1, z=-0.129),\n orientation=overhead_orientation))\n # Move to the desired starting angles\n print(\"Running. Ctrl-c to quit\")\n # pnp.move_to_start(starting_joint_angles)\n idx = 0\n rate = rospy.Rate(1)\n block_quat_pose = [0.00142460053167,\n 0.999994209902,\n 0.00177030764765,\n 0.00253311793936]\n if rospy.has_param('vel_calc'):\n rospy.delete_param('vel_calc')\n load_gazebo_models()\n\n while not rospy.is_shutdown():\n\n\n starting_joint_angles['right_j0'] = np.random.uniform(-0.05, 0.05)\n starting_joint_angles['right_j1'] = np.random.uniform(-0.95, -0.85)\n starting_joint_angles['right_j2'] = np.random.uniform(-0.1, 0.1)\n starting_joint_angles['right_j3'] = np.random.uniform(1.6, 1.7)\n\n # starting_joint_angles['right_j0'] = np.random.uniform(-0.75, 0.75)\n # starting_joint_angles['right_j1'] = np.random.uniform(-0.97, -0.80)\n # starting_joint_angles['right_j2'] = np.random.uniform(-0.15, 0.15)\n # starting_joint_angles['right_j3'] = np.random.uniform(1.55, 1.75)\n\n start_pose = [starting_joint_angles['right_j0'], starting_joint_angles['right_j1'],\n starting_joint_angles['right_j2'], starting_joint_angles['right_j3'],\n starting_joint_angles['right_j4'], starting_joint_angles['right_j5'],\n starting_joint_angles['right_j6']]\n \n while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n if rospy.has_param('dagger_reset'):\n rospy.delete_param('dagger_reset')\n break\n pnp.move_to_start(starting_joint_angles)\n\n\n delete_kinect_camera()\n # delete_gazebo_models()\n delete_gazebo_block()\n rand_x = np.random.uniform(0.45, .75)\n rand_y = np.random.uniform(-0.2, 0.33)\n # rand_x = np.random.uniform(0.44,0.68)\n\n # rand_y = np.random.uniform(-0.20, 0.35)\n pose_block = Pose(position=Point(x=rand_x, y=rand_y, z=1.00)\n , orientation=overhead_orientation)\n pose_rob = Pose(position=Point(x=rand_x-0.015, y=rand_y+0.03, z=0.03), orientation=overhead_orientation) \n\n # rospy.set_param('vel_calc', 'true')\n # pnp.move_to_start(starting_joint_angles)\n # oktogo = pnp.move_to_start_vel_command(start_pose)\n # if rospy.has_param('vel_calc'):\n # rospy.delete_param('vel_calc')\n # loads env\n load_gazebo_block(block_pose=pose_block)\n # load_kinect_camera()\n\n \n\n # rospy.set_param('vel_calc', 'true')\n print 'Reaching target object... Learning...'\n rospy.set_param('epi_start', 'true')\n pnp.reach(pose_rob, pos_cmd_pub)\n # reached = pnp.reach_vel_ctrl(pose_rob)\n rospy.sleep(0.5)\n # if rospy.has_param('vel_calc'):\n # rospy.delete_param('vel_calc')\n # if reached:\n # rospy.set_param('reached', 'true')\n # goal_obs_pub.publish(pose_rob)\n\n\n print 'Reached target object! and Goal obs acquired Resetting...'\n # while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n # if rospy.has_param('demo_success'):\n # break\n while not rospy.is_shutdown(): # wait until trajectory is collected for each episode\n if rospy.has_param('demo_success'):\n rospy.delete_param('demo_success')\n break\n \n # rospy.delete_param('demo_success')\n \n\n return 0",
"def main():\n\n config = read_json_file(CONFIG_FILE)\n posititve_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"POSITIVE_FILENAME\"]\n )\n negative_path = (\n config[\"main\"][\"DATASET_BASE_PATH_DIR\"]\n + config[\"main\"][\"NEGATIVE_FILENAME\"]\n )\n complexity_factor = config[\"main\"][\"COMPLEXITY_FACTOR\"]\n max_sequences_to_fit_pos = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_POS\"]\n max_sequences_to_fit_neg = config[\"main\"][\"MAX_SEQUENCES_TO_FIT_NEG\"]\n\n input_organisms_path = config[\"main\"][\"INPUT_FILENAME\"]\n mean_nodes = 3.0\n mean_fitness = 150\n positive_dataset = read_fasta_file(posititve_path)\n positive_dataset.sort()\n negative_dataset = read_fasta_file(negative_path)\n print(\"{} {}\".format(len(positive_dataset), len(negative_dataset)))\n\n organism_factory = OrganismFactory(\n config[\"organism\"],\n config[\"organismFactory\"],\n config[\"connector\"],\n config[\"pssm\"],\n )\n\n a_organisms = organism_factory.import_organisms(input_organisms_path)\n # random.shuffle(negativeDataset)\n\n for org in a_organisms:\n\n # org.print()\n nodes = org.count_nodes()\n\n p_1 = org.get_seq_set_fitness(\n positive_dataset[:max_sequences_to_fit_pos]\n )\n n_1 = org.get_seq_set_fitness(\n negative_dataset[:max_sequences_to_fit_neg]\n )\n # p1 = 20\n # n1 = org.getSeqSetFitness(negativeDataset[31:32])\n c_1 = org.get_complexity(mean_nodes, mean_fitness)\n\n # Score\n fitness = p_1 - n_1\n effective_fitness = fitness - complexity_factor * c_1\n print(\n (\n \"ORG {} N: {:.2f} P: {:.2f} N: {:.2f} C: {:.2f} F: {:.2f}\"\n + \" EF: {:.2f}\\n\"\n ).format(org._id, nodes, p_1, n_1, c_1, fitness, effective_fitness)\n )\n\n export_organism(\n org,\n positive_dataset,\n \"{}positive_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )\n # exportOrganism(\n # org,\n # negativeDataset[31:32],\n # \"{}negative_{}\".format(config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org.ID),\n # organismFactory,\n # )\n\n export_organism(\n org,\n negative_dataset[:50],\n \"{}negative_{}\".format(\n config[\"main\"][\"RESULT_TEST_BASE_PATH_DIR\"], org._id\n ),\n organism_factory,\n )",
"def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof",
"def main():\n \n # Load the model\n model = EpamModel()\n model.load(\"bayes_1.zip\")\n \n # Load and clean/prepare test data \n x_test = pd.read_csv('BAZA_VALID_INPUT.csv')\n x_test_clean = cleanup_df(x_test)\n \n # Predict\n # FIXME: This currently does probabilistic prediction only!\n y_pred = model.predict(x_test_clean)\n \n with open('output.txt', 'w+') as f:\n for label in y_pred:\n f.write(f'{label}\\n')",
"def transport(input=(None)):\r\n n=100\r\n m=5\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n tf,tfa,tfb=10,20,1000\r\n Nt=10000\r\n iarray=LinearModel(G,x=j,i0=1,L1='L',D=0.1,tf=tf,Nt=1000)\r\n iarrayA=np.transpose(modelA(G,x=j,i0=1,beta=0.5,gamma=0.1,tf=tfa,Nt=Nt))\r\n iarrayB,s=modelB(G,x=j,i0=1,alpha=-0.01,tf=tfb,Nt=Nt)\r\n tarray = np.linspace(0,tf,1000+1)\r\n tarraya = np.linspace(0,tfa,Nt+1)\r\n tarrayb = np.linspace(0,tfb,Nt+1)\r\n\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray,iarray[:,j+1:])\r\n plt.xlabel('time')\r\n plt.ylabel('Intensity')\r\n plt.title(\"Linear model for BA graph(n=100,m=5), D=0.1, with highest node omitted, time step=10\")\r\n plt.show()\r\n\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarraya,iarrayA)\r\n plt.xlabel('Time')\r\n plt.ylabel('Intensity')\r\n plt.title(\"Model A for BA graph(n=100,m=5), with beta=0.5,gamma=0.1,time step=20\")\r\n plt.show()\r\n\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarrayb,iarrayB)\r\n plt.xlabel('Time')\r\n plt.ylabel('Intensity')\r\n plt.title(\"Model B for BA graph(n=100,m=5), with alpha=-0.01, timestep=500\")\r\n plt.show()\r\n\r\n #genrate data for tf=20 for all models\r\n tf=20\r\n iarray=LinearModel(G,x=j,i0=1,L1='L',D=0.1,tf=tf,Nt=Nt)\r\n iarrayA=np.transpose(modelA(G,x=j,i0=1,beta=0.5,gamma=0.1,tf=tf,Nt=Nt))\r\n iarrayB,s=modelB(G,x=j,i0=1,alpha=-0.01,tf=tf,Nt=Nt)\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #generate the means\r\n mean=np.mean(iarray,axis=1)\r\n meanA=np.mean(iarrayA,axis=1)\r\n meanB=np.mean(iarrayB,axis=1)\r\n #generate thevar info\r\n var=np.var(iarray,axis=1)\r\n varA=np.var(iarrayA,axis=1)\r\n varB=np.var(iarrayB,axis=1)\r\n\r\n\r\n\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, meanA ,label='Model A',color='r')\r\n plt.scatter(tarray, meanB ,label='Model B',marker=\"|\" ,alpha=0.5)\r\n plt.scatter(tarray, mean ,label='Linear L ',marker=\"_\")\r\n plt.xlabel('time')\r\n plt.ylabel('Mean Intensity for different models for BA graph(n=100,m=5)')\r\n plt.legend()\r\n plt.title(\"How Mean changes for different models for BA graph(n=100,m=5)\")\r\n plt.show()\r\n\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, var ,label='Linear L')\r\n plt.plot(tarray, varA ,label='Model A')\r\n plt.plot(tarray, varB ,label='Model B')\r\n plt.xlabel('time')\r\n plt.ylabel('Var Intensity ')\r\n plt.legend()\r\n plt.title(\"How variance changes for different models of BA graphs (n=100,m=5)\")\r\n plt.show()\r\n\r\n\r\n\r\n return None #modify as needed\r",
"def simulation(G, # graph object\n pos = None, # positions of nodes\n n = 5, # number of simulation steps\n \n # wrapped args for simulation_step function\n kernel = 'weights', # simulation kernel\n custom_kernel = None, # custom simulation kernel\n WERE_multiplier = 10, # multiplier for WERE kernel\n oblivion = False, # enable information oblivion\n engagement_enforcement = 1.01,\n draw = False, # draw graph\n show_attr = False): # show attributes \n \n #=======================================#\n # append nodes data from 0 step to list #\n #=======================================#\n \n graph_list = []\n graph_list.append(copy.deepcopy(list(G.nodes.data() ) ) )\n \n\n #===================#\n # Run n simulations #\n #===================#\n \n for i in range(n):\n dp.simulation_step(G = G, \n pos = pos, \n \n kernel = kernel,\n custom_kernel = custom_kernel,\n WERE_multiplier = WERE_multiplier, \n oblivion = oblivion, \n engagement_enforcement = engagement_enforcement,\n draw = draw, \n show_attr = show_attr)\n\n # save nodes data to to list\n graph_list.append(copy.deepcopy(list(G.nodes.data() ) ) )\n \n \n #======================================================#\n # Count aware agents before and after simulation steps #\n #======================================================#\n \n # Check number of aware agents in 0 step\n #global aware_first\n aware_first = []\n for i in range(len(graph_list[0])):\n aware_first.append(graph_list[0][i][1]['state'])\n aware_first_c = aware_first.count('aware')\n \n # graph_list[0][1][1]['state']\n \n # Check number of aware agents in the last step\n #global aware_last\n aware_last = []\n graph_list_len = len(graph_list) - 1\n for i in range(len(graph_list[0])):\n aware_last.append(graph_list[graph_list_len][i][1]['state']) # n is the last sim\n aware_last_c = aware_last.count('aware')\n \n #graph_list[5][0][1]['state']\n \n #=================================#\n # diffusion performance measuring #\n #=================================#\n \n # equation for diffusion performance measuring\n avg_aware_inc_per_step = (aware_last_c - aware_first_c) / n\n \n # show graph statistics\n return graph_list, avg_aware_inc_per_step",
"def demo_grading_graph(hunter_bot, target_bot, next_move_fcn, OTHER = None):\n max_distance = 0.98 * target_bot.distance # 0.98 is an example. It will change.\n separation_tolerance = 0.02 * target_bot.distance # hunter must be within 0.02 step size to catch target\n caught = False\n ctr = 0\n #For Visualization\n import turtle\n window = turtle.Screen()\n window.bgcolor('white')\n chaser_robot = turtle.Turtle()\n chaser_robot.shape('arrow')\n chaser_robot.color('blue')\n chaser_robot.resizemode('user')\n chaser_robot.shapesize(0.3, 0.3, 0.3)\n broken_robot = turtle.Turtle()\n broken_robot.shape('turtle')\n broken_robot.color('green')\n broken_robot.resizemode('user')\n broken_robot.shapesize(0.3, 0.3, 0.3)\n size_multiplier = 15.0 #change size of animation\n chaser_robot.hideturtle()\n chaser_robot.penup()\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n chaser_robot.showturtle()\n broken_robot.hideturtle()\n broken_robot.penup()\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n broken_robot.showturtle()\n measuredbroken_robot = turtle.Turtle()\n measuredbroken_robot.shape('circle')\n measuredbroken_robot.color('red')\n measuredbroken_robot.penup()\n measuredbroken_robot.resizemode('user')\n measuredbroken_robot.shapesize(0.1, 0.1, 0.1)\n broken_robot.pendown()\n chaser_robot.pendown()\n\n prediction = turtle.Turtle()\n prediction.shape('arrow')\n prediction.color('pink')\n prediction.resizemode('user')\n prediction.shapesize(0.2, 0.2, 0.2)\n prediction.penup()\n\n meeting = turtle.Turtle()\n meeting.shape('circle')\n meeting.color('red')\n meeting.resizemode('user')\n meeting.shapesize(0.3, 0.3, 0.3)\n meeting.penup()\n #End of Visualization\n # We will use your next_move_fcn until we catch the target or time expires.\n while not caught and ctr < 1000:\n # Check to see if the hunter has caught the target.\n hunter_position = (hunter_bot.x, hunter_bot.y)\n target_position = (target_bot.x, target_bot.y)\n separation = distance_between(hunter_position, target_position)\n if separation < separation_tolerance:\n print(\"You got it right! It took you \", ctr, \" steps to catch the target.\")\n caught = True\n\n # The target broadcasts its noisy measurement\n target_measurement = target_bot.sense()\n\n # This is where YOUR function will be called.\n turning, distance, OTHER = next_move_fcn(hunter_position, hunter_bot.heading, target_measurement, max_distance, OTHER)\n position_guess = OTHER['meeting_position']\n next_target_guess = OTHER['target_position']\n\n # Don't try to move faster than allowed!\n if distance > max_distance:\n distance = max_distance\n\n # We move the hunter according to your instructions\n hunter_bot.move(turning, distance)\n\n # The target continues its (nearly) circular motion.\n target_bot.move_in_circle()\n #Visualize it\n measuredbroken_robot.setheading(target_bot.heading*180/pi)\n measuredbroken_robot.goto(target_measurement[0]*size_multiplier, target_measurement[1]*size_multiplier-100)\n measuredbroken_robot.stamp()\n broken_robot.setheading(target_bot.heading*180/pi)\n broken_robot.goto(target_bot.x*size_multiplier, target_bot.y*size_multiplier-100)\n chaser_robot.setheading(hunter_bot.heading*180/pi)\n chaser_robot.goto(hunter_bot.x*size_multiplier, hunter_bot.y*size_multiplier-100)\n\n prediction.setheading(target_bot.heading*180/pi)\n prediction.goto(next_target_guess[0]*size_multiplier, next_target_guess[1]*size_multiplier-100)\n prediction.stamp()\n\n meeting.clear()\n meeting.setheading(target_bot.heading*180/pi)\n meeting.goto(position_guess[0]*size_multiplier, position_guess[1]*size_multiplier-100)\n meeting.stamp()\n #End of visualization\n\n ctr += 1\n if ctr >= 1000:\n print(\"It took too many steps to catch the target.\")\n return caught",
"def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.01) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit\n return [a.state_action_table, a.reward_hist]",
"def test_generate_barabasi(self):\n config = {\n 'network_params': {\n 'generator': 'barabasi_albert_graph'\n }\n }\n with self.assertRaises(TypeError):\n G = serialization.load_network(config['network_params'])\n config['network_params']['n'] = 100\n config['network_params']['m'] = 10\n G = serialization.load_network(config['network_params'])\n assert len(G) == 100",
"def run(self) -> None:\n barcoded = BarcodedFilename.from_sample(self.analysis.sample)\n\n if barcoded.analyte == Analyte.RNASEQ:\n if self.analysis.parameters[\"rnaseq_aligner\"] == RnaSeqAligner.STAR:\n self.star()\n else:\n raise Exception(\"unexpected aligner for this type of sample\")\n else:\n if self.analysis.parameters[\"aligner\"] == GenericAligner.NOVOALIGN:\n self.novoalign()\n elif self.analysis.parameters[\"aligner\"] == GenericAligner.BWA:\n self.bwa()\n else:\n raise Exception(\"unexpected aligner for this type of sample\")",
"def run(): \n learning_rate = 0.42\n discount_rate = 0.15\n initial_q_hat = 4\n \n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent, learning_rate, discount_rate, initial_q_hat) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n print \"Failed trials: \"\n print a.get_failed_trials()\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line"
]
| [
"0.56036776",
"0.5418923",
"0.54050314",
"0.53924143",
"0.5367906",
"0.5306896",
"0.52951884",
"0.52781194",
"0.5248882",
"0.52286494",
"0.52176565",
"0.5212118",
"0.5211001",
"0.52091",
"0.5207191",
"0.51686174",
"0.51607203",
"0.51577896",
"0.5156206",
"0.5151868",
"0.51253563",
"0.5118526",
"0.51104367",
"0.5109103",
"0.5104256",
"0.50933886",
"0.50911146",
"0.5075411",
"0.5073622",
"0.5060731"
]
| 0.54888535 | 1 |
Compute RHS of modelA at time t | def RHS(y,t):
return np.multiply(A.dot(y),ones-y)-beta*y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def model(self, t):\n return self.traits['A'] * np.sin(2 * np.pi * (t - self.traits['E']) / self.traits['P'])",
"def model(a, t, D, m):\n xprime = a[1] # x' = v\n vprime = -D*a[0]/m # v' = -D*x/m\n return [xprime, vprime]",
"def a(self, x, t0):\n dummy = 0.0 * t0\n x_1 = x[0]\n x_2 = x[1]\n x_3 = x[2]\n x_4 = x[3]\n x_5 = x[4]\n x_6 = x[5]\n x_7 = x[6]\n # Load parameters.\n e_1_tilde = self.e_1_tilde\n e_2_tilde = self.e_2_tilde\n r = self.rr\n z_1 = self.z_1\n z_1_tilde = self.z_1_tilde\n z_2_tilde = self.z_2_tilde\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n alpha_2_tilde = self.alpha_2_tilde\n alpha_11_tilde = self.alpha_11_tilde\n alpha_12_tilde = self.alpha_12_tilde\n mu_H_tilde = self.mu_H_tilde\n mu_1_tilde = self.mu_1_tilde\n mu_2_tilde = self.mu_2_tilde\n sigma_H = self.sigma_H\n sigma_1_tilde = self.sigma_1_tilde\n sigma_2_tilde = self.sigma_2_tilde\n sigma_11 = self.sigma_11\n sigma_12 = self.sigma_12\n #\n # Right hand side of model.\n #\n f_1 = z_1 * alpha_1 * x_3 * (1.0 - x_1) - mu_H_tilde * x_1\n f_2 = (1.0 - x_2) * (\n x_3 * (z_1_tilde * alpha_11_tilde + z_2_tilde * alpha_12_tilde)\n + z_1_tilde * alpha_2_tilde * x_6) - mu_1_tilde * x_2\n f_3 = (z_1 * sigma_H * x_1 +\n (z_1_tilde * sigma_11 + z_2_tilde * sigma_12) * x_2) \\\n * (x_4 - x_3) - (e_1_tilde + x_4) * x_3\n f_4 = x_4 * (1 - x_4)\n f_5 = z_2_tilde * alpha_2 * x_6 * (1.0 - x_5) - mu_2_tilde * x_5\n f_6 = (z_1_tilde * sigma_1_tilde * x_2\n + z_2_tilde * sigma_2_tilde * x_5) * (x_7 - x_6) \\\n - (e_2_tilde + r * x_7) * x_6\n f_7 = r * x_7 * (1.0 - x_7)\n r = np.array([f_1, f_2, f_3, f_4, f_5, f_6, f_7])\n return r",
"def rhs(states, t, parameters, values=None):\n # Imports\n import numpy as np\n import math\n\n # Assign states\n assert(len(states) == 6)\n vlv, vrv, v1s, v2s, v1p, v2p = states\n\n # Assign parameters\n assert(len(parameters) == 38)\n restVlvd=parameters[0]; restVlvs=parameters[1]; restVrvd=parameters[2];\\\n restVrvs=parameters[3]; Emaxlv=parameters[4]; Emaxrv=parameters[5];\\\n Eminlv=parameters[6]; Eminrv=parameters[7]; R1_s=parameters[16];\\\n Rao_s=parameters[18]; Rmit=parameters[19]; c1s=parameters[20];\\\n c2s=parameters[21]; pext_s=parameters[22]; rest_v1s=parameters[23];\\\n rest_v2s=parameters[24]; R1_p=parameters[25]; Rao_p=parameters[27];\\\n Rtric=parameters[28]; c1p=parameters[29]; c2p=parameters[30];\\\n p_epi=parameters[31]; pext_p=parameters[32]; rest_v1p=parameters[33];\\\n rest_v2p=parameters[34]; bcl=parameters[35];\\\n twitchperiod=parameters[37]\n\n # Init return args\n if values is None:\n values = np.zeros((6,), dtype=np.float_)\n else:\n assert isinstance(values, np.ndarray) and values.shape == (6,)\n\n # Expressions for the Time varying elastance component\n t_lv = math.fmod(t, bcl)\n yv = (0.5 - 0.5*math.cos(2.0*math.pi*t_lv/twitchperiod) if t_lv <\\\n twitchperiod else 0)\n Elv = Eminlv + (Emaxlv - Eminlv)*yv\n restVlv = restVlvs + (1 - yv)*(restVlvd - restVlvs)\n plv = p_epi + (-restVlv + vlv)*Elv\n Erv = Eminrv + (Emaxrv - Eminrv)*yv\n restVrv = restVrvs + (1 - yv)*(restVrvd - restVrvs)\n prv = p_epi + (-restVrv + vrv)*Erv\n\n # Expressions for the Pressures and flows component\n p1s = (-pext_s + (-rest_v1s + v1s)/c1s if v1s > 0 else -pext_s -\\\n rest_v1s/c1s)\n p2s = (-pext_s + (-rest_v2s + v2s)/c2s if v2s > 0 else -pext_s -\\\n rest_v2s/c2s)\n p1p = (-pext_p + (-rest_v1p + v1p)/c1p if v1p > 0 else -pext_p -\\\n rest_v1p/c1p)\n p2p = (-pext_p + (-rest_v2p + v2p)/c2p if v2p > 0 else -pext_p -\\\n rest_v2p/c2p)\n qart_s = ((-p1s + plv)/Rao_s if plv > p1s else 0)\n q_mit = ((-plv + p2p)/Rmit if p2p > plv else 0)\n q1_s = (-p2s + p1s)/R1_s\n qart_p = ((-p1p + prv)/Rao_p if prv > p1p else 0)\n q_tric = ((-prv + p2s)/Rtric if p2s > prv else 0)\n q1_p = (-p2p + p1p)/R1_p\n\n # Expressions for the Ventricular volumes component\n values[0] = -qart_s + q_mit\n values[1] = -qart_p + q_tric\n\n # Expressions for the Systemic volumes component\n values[2] = -q1_s + qart_s\n values[3] = -q_tric + q1_s\n\n # Expressions for the Pulmonary volumes component\n values[4] = -q1_p + qart_p\n values[5] = -q_mit + q1_p\n\n # Return results\n return values",
"def RHSnet(y,t,a,b0,b1,g,k,w):\n S = y[:N]\n E = y[N:2*N]\n C = y[2*N:3*N]\n b = b0 + b1*(1+np.cos(2*np.pi*t))\n dy = np.zeros(3*N)\n dy[:N]= k*(1-S)-b*C*S+w*np.dot(P,S)-w*S\n dy[N:2*N]= b*C*S-(k+a)*E+w*np.dot(P,E)-w*E\n dy[2*N:3*N]= a*E-(g+k)*C+w*np.dot(P,C)-w*C\n return dy",
"def modelA(G,x=0,i0=0.1,beta=1.0,gamma=1.0,tf=5,Nt=1000):\r\n\r\n N = G.number_of_nodes()\r\n iarray = np.zeros((N,Nt+1))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n A=(nx.adjacency_matrix(G))*gamma\r\n ones=np.ones(N)\r\n y0=np.zeros(N)\r\n y0[x]=i0\r\n\r\n\r\n def RHS(y,t):\r\n \"\"\"Compute RHS of modelA at time t\r\n input: y should be a size N array\r\n output: dy, also a size N array corresponding to dy/dt\r\n\r\n Discussion: add discussion here\r\n \"\"\"\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y\r\n\r\n iarray[:,:]=np.transpose(scipy.integrate.odeint(RHS,y0,tarray))\r\n\r\n\r\n return iarray",
"def rule(model):\n ind_i = model.timeslots2\n ind_j = model.tasks\n inv = 1-self.valid\n total = sum(\n model.A2[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A3[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots3\n ind_j = model.tasks\n total += sum(\n model.A3[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots4\n ind_j = model.tasks\n total += sum(\n model.A4[i, j] * inv[i + 3, j] for i in ind_i for j in ind_j)\n\n return None, total, 0",
"def RHSnetFomp(y,t,a,b0,b1,g,k,w):\n dy = fn.rhs_omp(P,y,t,a,b0,b1,g,k,w,2)\n return dy",
"def solve(self, model, t_eval):\n\n def residuals(t, y, ydot):\n rhs_eval = model.concatenated_rhs.evaluate(t, y)\n return np.concatenate(\n (\n rhs_eval - ydot[: rhs_eval.shape[0]],\n model.concatenated_algebraic.evaluate(t, y),\n )\n )\n\n y0 = model.concatenated_initial_conditions\n ydot0 = model.concatenated_initial_conditions_ydot\n\n assert y0.shape == ydot0.shape, pybamm.ModelError(\n \"Shape of initial condition y0 {} is different from the shape of initial \"\n \"condition ydot0 {}\".format(y0.shape, ydot0.shape)\n )\n assert y0.shape == residuals(0, y0, ydot0).shape, pybamm.ModelError(\n \"Shape of initial condition y0 {} is different from the shape of residual \"\n \"function {}\".format(y0.shape, residuals(0, y0, ydot0).shape)\n )\n\n self.t, self.y = self.integrate(residuals, y0, ydot0, t_eval)",
"def rule(model, i, j):\n return 1, model.T0_end[i, j] + (1-model.A[i, j]), None",
"def _model(self, t, theta, period, tmpid):\n template = self.templates[tmpid]\n phase = (t / period - theta[2]) % 1\n return theta[0] + theta[1] * template(phase)",
"def rhs_vaccination(t, y, beta_s, beta_a, epsilon,\n delta_e, delta_v, p, q,\n alpha_a, alpha_t, alpha_s,\n mu, mu_s, mu_a,\n lambda_v, lambda_t):\n s, e, i_s, i_a, r, d, v, treat = y\n #\n n_bar = s + e + i_s + i_a + r + v + treat\n force_infection = (beta_s * i_s + beta_a * i_a) / n_bar\n rhs_s = mu * n_bar - force_infection * s - (mu + lambda_v) * s + delta_v * v\n rhs_e = force_infection * (epsilon * v + s) - (mu + delta_e) * e\n rhs_i_s = p * delta_e * e - (mu + mu_s + alpha_s + lambda_t) * i_s - (1.0 - q) * alpha_t * treat\n rhs_i_a = (1 - p) * delta_e * e - (mu + mu_a + alpha_a) * i_a\n rhs_r = alpha_s * i_s + alpha_a * i_a + q * alpha_t * treat - mu * r\n rhs_d = mu_s * i_s + mu_a * i_a\n rhs_v = lambda_v * s - epsilon * force_infection * v - (mu + delta_v) * v\n rhs_treat = lambda_t * i_s - (mu + alpha_t) * treat\n rhs = np.array([rhs_s, rhs_e, rhs_i_s, rhs_i_a, rhs_r, rhs_d, rhs_v, rhs_treat])\n return rhs",
"def RHSnetF(y,t,a,b0,b1,g,k,w):\n dy = fn.rhs(P,y,t,a,b0,b1,g,k,w)\n return dy",
"def evaluate_rhs(self, t, y, ydot=np.empty(0)):\n raise NotImplementedError",
"def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs",
"def eval_model(t,lat,lon,head,pitch,tide=0,temp=None,press=None):\n #get the sun positions for each timestamp, at our known lat,lon\n #sun_head, sun_zen = sunpos_mag(t,lat,lon,tide,temp,press,radians=True)\n sun_head = sunpos_mag(t, lat, lon, tide, temp, press, radians=True)\n sun_zen = sun_head[...,1]\n sun_head = sun_head[...,0]\n\n #TODO: input and output argument mismatch\n #get the ocean model aop values for each camera position\n aop = oceanaop(sun_head,sun_zen,head,pitch,1.33)\n return sun_zen,sun_head,aop",
"def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError",
"def RHS2(y,t):\r\n dy[:N] =y[N:2*N]\r\n dy[N:2*N]=scipy.sparse.csr_matrix.__mul__(L_alpha,y[0:N])\r\n return dy",
"def modelB(G,x=0,i0=0.1,alpha=-0.01,tf=5,Nt=1000):\r\n #set up graph atteributes\r\n N = G.number_of_nodes()\r\n degree_arr=np.asarray(G.degree(),dtype=int)[:,1]\r\n iarray = np.zeros((Nt+1,2*N))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #calucalte operaters and set intial conditions\r\n A=nx.adjacency_matrix(G)\r\n L=scipy.sparse.diags(degree_arr)-A\r\n L_alpha=L*alpha\r\n ones=np.ones(2*N)\r\n\r\n y0=np.zeros(2*N)\r\n y0[N+x]=i0\r\n #Add code here\r\n dy=np.zeros(N*2)\r\n def RHS2(y,t):\r\n \"\"\"Compute RHS of modelB at time t\r\n input: y should be a size N array\r\n output: dy, also a size N array corresponding to dy/dt\r\n\r\n Discussion: add discussion here\r\n \"\"\"\r\n dy[:N] =y[N:2*N]\r\n dy[N:2*N]=scipy.sparse.csr_matrix.__mul__(L_alpha,y[0:N])\r\n return dy\r\n\r\n iarray[:,:]=scipy.integrate.odeint(RHS2,y0,tarray)\r\n\r\n return iarray[:,N:],iarray[:,:N]",
"def model(t, y, mu0, mu1, beta, A, delta, nu, b):\n S,I,R = y[:]\n m = mu(b, I, mu0, mu1)\n \n dSdt = A - delta*S - ((beta*S*I) / (S + I + R))\n dIdt = -(delta + nu)*I - m*I + ((beta * S * I) / (S + I + R))\n dRdt = m * I - delta * R\n \n return [dSdt, dIdt, dRdt]",
"def rule(model, i, j):\n active = 1-self.task_spread[j]\n den = sum(tril[i, :])\n ind = model.timeslots\n total = sum(tril[i, k] * (\n 1 - model.A[k, j] - model.A2[k, j] - model.A3[k, j] - model.A4[\n k, j]) for k in ind)\n total /= den\n total *= active\n return -1 + EPS, model.CTl[i, j] - total, EPS + self.slack_cont",
"def compute_solution(self, model, t_eval):\n timer = pybamm.Timer()\n\n solve_start_time = timer.time()\n pybamm.logger.info(\"Calling DAE solver\")\n solution = self.integrate_casadi(\n self.casadi_problem, self.y0, t_eval, mass_matrix=model.mass_matrix.entries\n )\n solve_time = timer.time() - solve_start_time\n\n # Events not implemented, termination is always 'final time'\n termination = \"final time\"\n\n return solution, solve_time, termination",
"def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )",
"def update_rhs(self, h, a, r):\n return Tensors(\n t1=r.t1 - a.t1 / cc_denom(h.f, 2, 'dir', 'full'),\n t2=r.t2 - a.t2 / cc_denom(h.f, 4, 'dir', 'full'),\n t3=r.t3 - (a.t3 - a.t3.transpose([0, 1, 2, 4, 3, 5])) /\n cc_denom(h.f, 6, 'dir', 'full')\n )",
"def rule(model):\n ind_i = model.timeslots\n ind_j = model.tasks\n total = sum(model.A[i, j] * (1-self.valid[i, j]) for i in ind_i\n for j in ind_j)\n total += sum(model.A2[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n total += sum(model.A3[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n\n return None, total, 0",
"def update_params(self, x_a, r_t, a_t):\n self.A_a[a_t] = self.A_a[a_t] + x_a[:, a_t].reshape(-1, 1).dot(x_a[:, a_t].reshape(-1, 1).T)\n self.A_a_inv[a_t] = inv(self.A_a[a_t])\n self.b_a[a_t] = self.b_a[a_t] + x_a[:, a_t].reshape(-1, 1) * r_t",
"def rhs(model, prognostic, forcing,\n batch_dims=('time', 'x', 'y')):\n\n batch_dims = [dim for dim in batch_dims if dim in prognostic.dims]\n prognostic = prognostic.stack(batch=batch_dims)\n forcing = forcing.stack(batch=batch_dims)\n\n prog = _dataset_to_dict(prognostic)\n forcing = _dataset_to_dict(forcing)\n\n prog.pop('p')\n w = prog.pop('w')\n\n model.eval()\n y, prec = model.rhs(prog, forcing, w)\n\n coords = {'z': prognostic['z'], 'batch': prognostic['batch']}\n dims = ['batch', 'z']\n y = {\n key: xr.DataArray(\n val.data.numpy(), coords=coords, dims=dims)\n for key, val in y.items()\n }\n\n return xr.Dataset(y).unstack('batch')",
"def compute(self, F, variables):\n s_0 = self.initial_state_generator(self.num_samples)\n a_0 = self.policy(s_0)\n a_t_plus_1 = a_0\n x_t = F.expand_dims(F.concat(s_0, a_0, dim=1), axis=1)\n cost = 0\n for t in range(self.n_time_steps):\n variables[self.model.X] = x_t\n res = self.model.Y.factor.predict(F, variables, targets=[self.model.Y], num_samples=self.num_samples)[0]\n s_t_plus_1 = res[0]\n\n cost = cost + self.cost_function(s_t_plus_1, a_t_plus_1)\n\n a_t_plus_1 = mx.nd.expand_dims(self.policy(s_t_plus_1), axis=2)\n x_t = mx.nd.concat(s_t_plus_1, a_t_plus_1, dim=2)\n total_cost = F.sum(cost)\n return total_cost, total_cost",
"def f_model(self, x_a, e_id):\n\n # ======================================================== #\n # initialize x_f with same dimension as x_a\n x_f = np.matrix(np.zeros(x_a.shape))\n\n # ======================================================== #\n # First compute the flow and velocity between cells using estimates x_a, given the previous state estimate\n # dim: 1 x self.num_cells+1, flow[0] is the qin, and flow[self.num_cells] is qout\n vm = self.vm_cells\n beta = self.beta_cells\n rhoc = self.rhoc_cells\n wc = self.wc_cells\n qmax = np.multiply(vm, rhoc) - np.multiply(vm, np.multiply(rhoc, rhoc)) / beta\n rhom = rhoc - qmax / wc\n\n flow = []\n speed = []\n # ---------------------------------\n # the flow in the upstream boundary\n # first_flow = self.__receiving_flow(wc[0,0], rhoc[0,0], rhom[0,0], qmax[0,0],\n # x_a[ self.x_index['density'][0],0])\n # flow.append( np.min( [ x_a[ self.x_index['qin'], 0], first_flow ] ))\n # # print('inflow:{0}, {1}'.format( x_a[ self.x_index['qin'], 0], first_flow ))\n # if x_a[ self.x_index['qin'], 0] <= first_flow:\n # # In freeflow condition, then determine speed by the inflow\n # speed.append( self.__q2v_ff(vm[0,0],beta[0,0], x_a[ self.x_index['qin'], 0]) )\n # else:\n # # congested flow, then determine by the density in the first cell\n # speed.append( self.__rho2v(vm[0,0], beta[0,0], rhoc[0,0], wc[0,0], x_a[ self.x_index['density'][0], 0]) )\n\n # Updated qin to be the actual inflow to the first cell\n flow.append(x_a[self.x_index['qin'], 0])\n speed.append(self.__rho2v(vm[0, 0], beta[0, 0], rhoc[0, 0], wc[0, 0], x_a[self.x_index['density'][0], 0]))\n\n for i in range(1, self.num_cells):\n f_sending = self.__sending_flow(vm[i - 1, 0], beta[i - 1, 0], rhoc[i - 1, 0], qmax[i - 1, 0],\n x_a[self.x_index['density'][i - 1], 0])\n f_receiving = self.__receiving_flow(wc[i, 0], rhoc[i, 0], rhom[i, 0], qmax[i, 0],\n x_a[self.x_index['density'][i], 0])\n if f_sending <= f_receiving:\n flow.append(f_sending)\n else:\n flow.append(f_receiving)\n\n # flow.append( np.min( [f_sending, f_receiving] ) )\n\n # get the velocity between cells\n if f_sending < f_receiving:\n # freeflow; if S < R, use v_upstream\n select_i = i - 1\n speed.append(self.__rho2v(vm[select_i, 0], beta[select_i, 0], rhoc[select_i, 0], wc[select_i, 0],\n x_a[self.x_index['density'][select_i], 0]))\n elif f_sending > f_receiving:\n # congested; # if S < R, use v_downstream\n select_i = i\n speed.append(self.__rho2v(vm[select_i, 0], beta[select_i, 0], rhoc[select_i, 0], wc[select_i, 0],\n x_a[self.x_index['density'][select_i], 0]))\n else:\n # S== R, then use v(rho_c)\n select_i = i - 1\n speed.append(self.__rho2v(vm[select_i, 0], beta[select_i, 0], rhoc[select_i, 0], wc[select_i, 0],\n rhoc[select_i, 0]))\n\n # ---------------------------------\n # the flow in the downstream boundary\n last_i = self.num_cells - 1\n # last_sending = self.__sending_flow(vm[last_i,0],beta[last_i,0],rhoc[last_i,0],qmax[last_i,0],\n # x_a[ self.x_index['density'][last_i], 0])\n # flow.append( np.min([ x_a[self.x_index['qout'], 0], last_sending]) )\n # # the velocity in the downstream boundary\n # if last_sending <= x_a[self.x_index['qout'], 0]:\n # # freeflow, determine by the density in the last cell\n # speed.append( self.__rho2v(vm[last_i,0], beta[last_i,0], rhoc[last_i,0], wc[last_i,0],\n # x_a[ self.x_index['density'][last_i], 0]) )\n # else:\n # # congested, determine by the outflow\n # speed.append( self.__q2v_cf(wc[last_i,0],rhom[last_i,0], x_a[self.x_index['qout'], 0] ) )\n\n # updated qout to be the actual outflow\n flow.append(x_a[self.x_index['qout'], 0])\n speed.append(self.__rho2v(vm[last_i, 0], beta[last_i, 0], rhoc[last_i, 0], wc[last_i, 0],\n x_a[self.x_index['density'][last_i], 0]))\n\n # append the flow and vel in this step to all forecast_flow and forecast_speed\n self.__f_flow['data'][e_id].append(flow)\n self.__f_speed['data'][e_id].append(speed)\n\n # ======================================================== #\n # State Propagation\n # Propagate density for step k, considering the on/off ramp\n for i in range(0, self.num_cells):\n x_f[self.x_index['density'][i], 0] = \\\n x_a[self.x_index['density'][i], 0] + \\\n (flow[i] - flow[i + 1]) * self.dur_steps / self.len_cells\n\n # # check the on and offramps\n # if i not in self.x_index['onramp'].keys() and i not in self.x_index['offramp'].keys():\n #\n # elif i in self.x_index['onramp'].keys() and i not in self.x_index['offramp'].keys():\n # # The update here is a bit complicated. The onramp flow here specifies the demand.\n # # The actual onramp inflow depends on the supply in this cell.\n # # q_on = min( onramp_demand, Receiving(i)-flow[i] )\n # q_on = np.min([ x_a[ self.x_index['onramp'][i], 0],\n # self.__receiving_flow(wc[i,0],rhoc[i,0],rhom[i,0],qmax[i,0],\n # x_a[ self.x_index['density'][i], 0]) - flow[i] ])\n # x_f[ self.x_index['density'][i], 0] = \\\n # x_a[ self.x_index['density'][i], 0 ] +\\\n # ( flow[i] - flow[i+1] + q_on)*self.dur_steps/self.len_cells\n # elif i not in self.x_index['onramp'].keys() and i in self.x_index['offramp'].keys():\n # # The offramp state specifies the supply.\n # # Hence the actual offramp flow depends on the demand in the cell\n # # q_off = min( offramp_supply, Sending(i) - flow[i+1] )\n # q_off = np.min([ x_a[ self.x_index['offramp'][i], 0],\n # self.__sending_flow(vm[i,0],beta[i,0],rhoc[i,0],qmax[i,0],\n # x_a[ self.x_index['density'][i], 0]) - flow[i+1] ])\n # x_f[ self.x_index['density'][i], 0] = \\\n # x_a[ self.x_index['density'][i], 0 ]+\\\n # ( flow[i] - flow[i+1] - q_off)*self.dur_steps/self.len_cells\n # elif i in self.x_index['onramp'].keys() and i in self.x_index['offramp'].keys():\n # q_on = np.min([ x_a[ self.x_index['onramp'][i], 0],\n # self.__receiving_flow(wc[i,0],rhoc[i,0],rhom[i,0],qmax[i,0],\n # x_a[ self.x_index['density'][i], 0]) - flow[i] ])\n # q_off = np.min([ x_a[ self.x_index['offramp'][i], 0],\n # self.__sending_flow(vm[i,0],beta[i,0],rhoc[i,0],qmax[i,0],\n # x_a[ self.x_index['density'][i], 0]) - flow[i+1] ])\n # x_f[ self.x_index['density'][i], 0] = \\\n # x_a[ self.x_index['density'][i], 0 ]+\\\n # ( flow[i] - flow[i+1] + q_on - q_off)*self.dur_steps/self.len_cells\n\n # propagate the boundary flow using random walk.\n # add saturation\n q_in = x_a[self.x_index['qin'], 0]\n x_f[self.x_index['qin'], 0] = float(np.min([np.max([q_in, 0.0]),\n self.qmax_cells[0, 0]]))\n q_out = x_a[self.x_index['qout'], 0]\n x_f[self.x_index['qout'], 0] = float(np.min([np.max([q_out, 0.0]),\n self.qmax_cells[self.num_cells - 1, 0]]))\n\n # ======================================================== #\n # add noise to forecaste state\n x_f += np.matrix(np.random.multivariate_normal(\n np.zeros(self.dim_state), self.Q)).reshape((self.dim_state, 1))\n\n # # update the onramp flows by a random walk\n # if self.loc_onramp is not None:\n # num_onramps = len(self.loc_onramp)\n # e_onramp = np.matrix(np.random.multivariate_normal(np.zeros(num_onramps),\n # self.Q['onramp'])).reshape((num_onramps,1))\n #\n # for i in range(0,len(self.cell_onramp)):\n # cell_id = self.cell_onramp[i]\n # x_f[ self.x_index['onramp'][cell_id], 0] = \\\n # np.min( [ np.max( [ x_a[ self.x_index['onramp'][cell_id], 0 ] + e_onramp[i] , 0.0]),\n # self.qmax_cells[cell_id]])\n #\n #\n #\n # # update the offramp flow by a random walk\n # if self.loc_offramp is not None:\n # num_offramps = len(self.loc_offramp)\n # e_offramp = np.matrix(np.random.multivariate_normal(np.zeros(num_offramps),\n # self.Q['offramp'])).reshape((num_offramps,1))\n #\n # for i in range(0, len(self.cell_offramp)):\n # cell_id = self.cell_offramp[i]\n # x_f[ self.x_index['offramp'][cell_id], 0] = \\\n # np.min( [ np.max( [ x_a[ self.x_index['offramp'][cell_id], 0 ] + e_offramp[i], 0.0 ]),\n # self.qmax_cells[cell_id]])\n\n return x_f",
"def rhs(x, t):\n\n return - np.sin(np.pi * x) * (np.sin(t) - 1 * np.pi ** 2 * np.cos(t))"
]
| [
"0.6416926",
"0.6258128",
"0.6244217",
"0.6219615",
"0.61625826",
"0.6149268",
"0.6111405",
"0.6107605",
"0.609902",
"0.60725325",
"0.6055627",
"0.6053448",
"0.6015801",
"0.6008567",
"0.6007099",
"0.59701043",
"0.5962164",
"0.593483",
"0.5931654",
"0.5921712",
"0.5919895",
"0.58821625",
"0.5869291",
"0.5869291",
"0.58444524",
"0.58298635",
"0.58262384",
"0.5804261",
"0.57943064",
"0.5790949"
]
| 0.71157587 | 0 |
Analyze transport processes (model A, model B, linear diffusion) on BarabasiAlbert graphs. Modify input and output as needed. | def transport(input=(None)):
n=100
m=5
G=nx.barabasi_albert_graph(n, m, seed=5)
maxdeg=0
degree_dist=[]
for i in range(0,n):
degree_dist.append(G.degree[i])
if G.degree[i]>maxdeg:
maxdeg=G.degree[i]
j=i
tf,tfa,tfb=10,20,1000
Nt=10000
iarray=LinearModel(G,x=j,i0=1,L1='L',D=0.1,tf=tf,Nt=1000)
iarrayA=np.transpose(modelA(G,x=j,i0=1,beta=0.5,gamma=0.1,tf=tfa,Nt=Nt))
iarrayB,s=modelB(G,x=j,i0=1,alpha=-0.01,tf=tfb,Nt=Nt)
tarray = np.linspace(0,tf,1000+1)
tarraya = np.linspace(0,tfa,Nt+1)
tarrayb = np.linspace(0,tfb,Nt+1)
plt.figure(figsize=(12, 6))
plt.plot(tarray,iarray[:,j+1:])
plt.xlabel('time')
plt.ylabel('Intensity')
plt.title("Linear model for BA graph(n=100,m=5), D=0.1, with highest node omitted, time step=10")
plt.show()
plt.figure(figsize=(12, 6))
plt.plot(tarraya,iarrayA)
plt.xlabel('Time')
plt.ylabel('Intensity')
plt.title("Model A for BA graph(n=100,m=5), with beta=0.5,gamma=0.1,time step=20")
plt.show()
plt.figure(figsize=(12, 6))
plt.plot(tarrayb,iarrayB)
plt.xlabel('Time')
plt.ylabel('Intensity')
plt.title("Model B for BA graph(n=100,m=5), with alpha=-0.01, timestep=500")
plt.show()
#genrate data for tf=20 for all models
tf=20
iarray=LinearModel(G,x=j,i0=1,L1='L',D=0.1,tf=tf,Nt=Nt)
iarrayA=np.transpose(modelA(G,x=j,i0=1,beta=0.5,gamma=0.1,tf=tf,Nt=Nt))
iarrayB,s=modelB(G,x=j,i0=1,alpha=-0.01,tf=tf,Nt=Nt)
tarray = np.linspace(0,tf,Nt+1)
#generate the means
mean=np.mean(iarray,axis=1)
meanA=np.mean(iarrayA,axis=1)
meanB=np.mean(iarrayB,axis=1)
#generate thevar info
var=np.var(iarray,axis=1)
varA=np.var(iarrayA,axis=1)
varB=np.var(iarrayB,axis=1)
plt.figure(figsize=(12, 6))
plt.plot(tarray, meanA ,label='Model A',color='r')
plt.scatter(tarray, meanB ,label='Model B',marker="|" ,alpha=0.5)
plt.scatter(tarray, mean ,label='Linear L ',marker="_")
plt.xlabel('time')
plt.ylabel('Mean Intensity for different models for BA graph(n=100,m=5)')
plt.legend()
plt.title("How Mean changes for different models for BA graph(n=100,m=5)")
plt.show()
plt.figure(figsize=(12, 6))
plt.plot(tarray, var ,label='Linear L')
plt.plot(tarray, varA ,label='Model A')
plt.plot(tarray, varB ,label='Model B')
plt.xlabel('time')
plt.ylabel('Var Intensity ')
plt.legend()
plt.title("How variance changes for different models of BA graphs (n=100,m=5)")
plt.show()
return None #modify as needed
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bert_module_fn(is_training):\n\n input_ids = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_ids\")\n input_mask = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"input_mask\")\n token_type = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.int32, name=\"segment_ids\")\n\n config = modeling.BertConfig.from_json_file(config_path)\n model = modeling.BertModel(config=config, is_training=is_training,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type)\n \n seq_output = model.all_encoder_layers[seq_layer]\n tok_output = model.all_encoder_layers[tok_layer]\n pool_output = model.get_pooled_output()\n\n config_file = tf.constant(value=config_path, dtype=tf.string, name=\"config_file\")\n vocab_file = tf.constant(value=vocab_path, dtype=tf.string, name=\"vocab_file\")\n lower_case = tf.constant(do_lower_case)\n\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, config_file)\n tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.ASSET_FILEPATHS, vocab_file)\n \n input_map = {\"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": token_type}\n \n output_map = {\"pooled_output\": pool_output,\n \"sequence_output\": seq_output,\n \"token_output\": tok_output}\n\n output_info_map = {\"vocab_file\": vocab_file,\n \"do_lower_case\": lower_case}\n \n hub.add_signature(name=\"tokens\", inputs=input_map, outputs=output_map)\n hub.add_signature(name=\"tokenization_info\", inputs={}, outputs=output_info_map)",
"def forward(self, inputs, token_types, valid_length=None): # pylint: disable=arguments-differ\n bert_output = self.bert(inputs, token_types, valid_length)\n if self.add_query:\n o = mx.ndarray.transpose(bert_output, axes=(2,0,1))\n mask = 1 - token_types\n avg_q = mx.nd.sum(mx.nd.multiply(mask, o), axis=2) / mx.nd.sum(mask, axis=1)\n o = mx.nd.add(o, mx.nd.multiply(avg_q.expand_dims(axis=2), token_types))\n attended_output = mx.ndarray.transpose(o, axes=(1,2,0))\n if self.apply_coattention:\n #############################\n '''\n # option 3: use exactly the QANet way\n # not a good idea, this will cause index shift and thus cause bug\n # but if we are going to use QANet's method for modeling and extracting the output, it'll be fine to do so\n # so I just kept these lines here\n o = mx.ndarray.transpose(bert_output, axes=(2,0,1))\n context_mask = token_types\n query_mask = 1 - context_mask\n context_max_len = bert_output.shape[1] # int(context_mask.sum(axis=1).max().asscalar())\n query_max_len = bert_output.shape[1] # int(query_mask.sum(axis=1).max().asscalar())\n context_raw = mx.nd.multiply(context_mask, o)\n context_raw = mx.ndarray.expand_dims(context_raw, 0)\n # print(context_raw[0,0,0,:])\n # to get the offset to shift using gridgenerator and bilinear-sampler\n raw_offset = query_mask.sum(axis=1).reshape(len(query_mask),1).tile(bert_output.shape[1])\n warp_matrix = mx.ndarray.expand_dims(mx.ndarray.stack(raw_offset, \n mx.nd.zeros(raw_offset.shape).as_in_context(raw_offset.context)), 0)\n grid = GridGenerator(data=warp_matrix, transform_type='warp')\n warpped_out = BilinearSampler(context_raw, grid)\n # the context mask also needs to be shifted\n context_mask = mx.ndarray.expand_dims(context_mask, 0)\n context_mask = mx.ndarray.expand_dims(context_mask, 0)\n context_mask = BilinearSampler(context_mask, grid)\n context_mask = mx.ndarray.squeeze(context_mask, axis=(0, 1))\n # get the two encodings separated\n context_emb_encoded = mx.ndarray.transpose(mx.ndarray.squeeze(warpped_out, axis=0), axes=(1,2,0))\n '''\n #################################\n # option 2: get the two encodings separated\n o = mx.ndarray.transpose(bert_output, axes=(2,0,1))\n context_mask = token_types\n query_mask = 1 - context_mask\n context_max_len = bert_output.shape[1] # int(context_mask.sum(axis=1).max().asscalar())\n query_max_len = bert_output.shape[1] # int(query_mask.sum(axis=1).max().asscalar())\n context_emb_encoded = mx.ndarray.transpose(mx.nd.multiply(context_mask, o), axes=(1,2,0))\n query_emb_encoded = mx.ndarray.transpose(mx.nd.multiply(query_mask, o), axes=(1,2,0))\n '''\n # option 1: context and query differently masked but use the same values\n # problem: almost the same with simply self-attention\n context_mask = token_types\n query_mask = 1 - context_mask\n context_max_len = bert_output.shape[1]\n query_max_len = bert_output.shape[1]\n context_emb_encoded = bert_output\n query_emb_encoded = bert_output\n '''\n # context_mask = context_mask[:,:context_max_len]\n # query_mask = query_mask[:,:query_max_len]\n attended_output = self.co_attention(context_emb_encoded, query_emb_encoded, \n context_mask, query_mask, \n context_max_len, query_max_len)\n if self.apply_self_attention:\n attended_output, att_weights = self.multi_head_attention(bert_output, bert_output) \n if self.add_query or self.apply_self_attention or self.apply_coattention:\n output = self.span_classifier(attended_output)\n else:\n output = self.span_classifier(bert_output)\n return output",
"def predict_taskAB(model, samples: List[Dict], tokenizer=None, step_size: int=32, label_tags: Dict=POLARITY_INV, verbose=False):\n print(\"[preds]: predicting on task A+B ...\")\n #model.freeze()\n predicted = [] # List[Dict] for output\n if verbose: \n print(\"sample_size:\", len(samples))\n print(samples[0])\n\n # pre-process data\n dataA_elems = _read_data_taskA(tokenizer=tokenizer, test=True, test_samples=samples, bert=True)\n #print(\"read_data_size:\", len(dataA_elems))\n\n for step in range(0,len(samples), step_size):\n # test step_size samples at a time\n if step+step_size <= len(samples):\n step_batch_A = dataA_elems[step:step+step_size]\n else:\n step_batch_A = dataA_elems[step:]\n\n if verbose: \n #print(\"step-A:\", step_batch_A)\n print(\"batch_size:\", len(step_batch_A))\n\n # use collate_fn to input step_size samples into the model\n x_A, _, _, tokens = raw2_collate_fn(step_batch_A)\n if verbose:\n print(\"sample_size:\", len(samples))\n #print(\"X-A:\", x_A)\n with torch.no_grad():\n # predict with modelAB\n for i in range(len(x_A)):\n out_A = model.A_model(x_A[i])\n\n logits_A = out_A.logits \n pred_tokens = torch.argmax(logits_A, -1)\n #print(pred_tokens)\n pred_terms, _ = get_preds_terms(pred_tokens, tokens[i], roberta=True)\n\n\n # build (term,aspect) couples to produce correct output for the metrics\n preds = []\n if verbose:\n print(\"\\npred terms:\", pred_terms)\n\n for j in pred_terms:\n # for each predicted term build a couple\n out_B = model.B_model([[x_A[i],j]])\n logits_B = out_B.logits \n pred_sents = torch.argmax(logits_B, -1)\n \n preds.append((j,label_tags[int(pred_sents)]))\n if verbose: print(\"[LOFFA]:\", preds)\n\n if verbose: print(\"[CACCA]:\", preds)\n predicted.append({\"targets\":preds})\n preds = []\n\n print(\"Num predictions:\", len(predicted))\n return predicted",
"def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n model = modeling.AlbertModel(\n config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n #if FLAGS.use_pooled_output:\n tf.logging.info(\"using pooled output\")\n output_layer = model.get_pooled_output()\n #else:\n # tf.logging.info(\"using meaned output\")\n # output_layer = tf.reduce_mean(model.get_sequence_output(), axis=1)\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, probabilities, predictions)",
"def main():\n \n # Load the model\n model = EpamModel()\n model.load(\"bayes_1.zip\")\n \n # Load and clean/prepare test data \n x_test = pd.read_csv('BAZA_VALID_INPUT.csv')\n x_test_clean = cleanup_df(x_test)\n \n # Predict\n # FIXME: This currently does probabilistic prediction only!\n y_pred = model.predict(x_test_clean)\n \n with open('output.txt', 'w+') as f:\n for label in y_pred:\n f.write(f'{label}\\n')",
"def main():\n # Data processing and parsing\n mp_df = pd.read_csv(\"melting_points.csv\")\n bp_df = pd.read_csv(\"boiling_points.csv\")\n data_processing(mp_df)\n data_processing(bp_df)\n # Plots scatterplots of our variables with the boiling and melting point\n plot_scatter('Mass', 'T_exp', mp_df, 'Molecular Weight', 'Melting Point')\n plot_scatter('Mass', 'T_exp', bp_df, 'Molecular Weight', 'Boiling Point')\n plot_scatter('Atom_counts', 'T_exp', mp_df, 'Number of Atoms',\n 'Melting Point')\n \n # Creates models and prints statistics using simply mass and atom counts\n simple_parameters = ['Mass', 'Atom_counts', 'T_exp']\n model = ml_df(mp_df, simple_parameters, 0.2)\n plot_T(model, simple_parameters, \"simple_melting.png\", 'Melting Point')\n slope_average(mp_df, simple_parameters, 0.2, 100)\n model2 = ml_df(bp_df, simple_parameters, 0.2)\n plot_T(model2, simple_parameters, 'simple_boiling.png', 'Boiling Point')\n slope_average(bp_df, simple_parameters, 0.2, 100)\n # Creates models and prints statistics using more variables\n complex_parameters = [\"Mass\", \"Atom_counts\", \"C\", \"H\", \"Acid\", \"Alcohol\",\n \"Unsaturation\", \"T_exp\"]\n model3 = ml_df(mp_df, complex_parameters, 0.2)\n plot_T(model3, complex_parameters, \"complex_melting.png\", 'Melting Point')\n slope_average(mp_df, complex_parameters, 0.2, 100)\n model4 = ml_df(bp_df, complex_parameters, 0.2)\n plot_T(model4, complex_parameters, \"complex_boiling.png\", 'Boiling Point')\n slope_average(bp_df, complex_parameters, 0.2, 100)\n # Creates models and prints statistics for our model using all available variables\n complex_parameters2 = [\"Mass\", \"Atom_counts\", \"C\", \"H\", 'O', 'N', 'F',\n 'Cl', 'Br', 'I', 'S', 'Si', 'Halide', \"Acid\", \"Alcohol\",\n \"Unsaturation\", \"T_exp\"]\n model = ml_df(mp_df, complex_parameters2, 0.2)\n plot_T(model, complex_parameters2, \"more_complex_melting.png\",\n 'Melting Point')\n slope_average(mp_df, complex_parameters2, 0.2, 100)\n # Tries a few different regressor models\n slope_average(mp_df, complex_parameters2, 0.2, 100, LinearRegression())\n slope_average(bp_df, complex_parameters2, 0.2, 100, RandomForestRegressor())",
"def create_model(is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings):\n\n # bert_module = hub.Module(\n # BERT_MODEL_HUB,\n # trainable=True)\n \n # bert_inputs = dict(\n # input_ids=input_ids,\n # input_mask=input_mask,\n # segment_ids=segment_ids)\n\n # bert_outputs = bert_module(\n # inputs=bert_inputs,\n # signature=\"tokens\",\n # as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_output\" for token-level output.\n # output_layer = bert_outputs[\"sequence_output\"]\n \n\n model = modeling.BertModel(\n config=bert_config,\n is_training=not is_predicting,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings\n )\n\n output_layer = model.get_sequence_output()\n\n\n\n batch_size = output_layer.shape[0]\n max_seq_length = output_layer.shape[1]\n hidden_size = output_layer.shape[2]\n \n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [vocab_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [vocab_size], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n # add a max_seq length stack of bias so that we add the bias to each word distributoin\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_answer = tf.one_hot(input_ids, depth=vocab_size)\n\n\n predictions = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predictions, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_answer * log_probs, axis=-1)\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=input_ids, logits=logits)\n \n loss = tf.reduce_mean(per_example_loss)\n return (loss, predictions, log_probs)",
"def forward(self, hg, samp_bias1=None, samp_bias2=None):\n h_1_all = [];h_2_all = [];c_all = [];logits = []\n result = {}\n # process features\n features = hg.srcdata['h']\n feats = self.normal_feat(features, self.meta_paths)\n # shuffled features\n shuf_feats = self.shuf_feats(feats)\n\n for idx, meta_path in enumerate(self.meta_paths):\n new_g = dgl.metapath_reachable_graph(hg, meta_path)\n for i in range(self.sc):\n new_g = dgl.add_self_loop(new_g)\n\n feats[idx] = F.dropout(feats[idx], self.dropout, training=self.training)\n shuf_feats[idx] = F.dropout(shuf_feats[idx], self.dropout, training=self.training)\n\n h_1 = self.gcn[idx](new_g, feats[idx])\n c = self.readout(h_1)\n c = self.readout_act_func(c)\n h_2 = self.gcn[idx](new_g, shuf_feats[idx])\n\n\n logit = self.disc(c, h_1, h_2, samp_bias1, samp_bias2)\n h_1_all.append(h_1.unsqueeze(0))\n h_2_all.append(h_2.unsqueeze(0))\n c_all.append(c)\n logits.append(logit)\n result['logits'] = logits\n\n # Attention or not\n if self.isAttn:\n r\"\"\"\n .. math::\n \\begin{equation}\n \\mathbf{h}_{i}=\\mathcal{Q}\\left(\\left\\{\\mathbf{h}^{(r)} \\mid r \\in \\mathcal{R}\\right\\}\\right)=\\sum_{r \\in \\mathcal{R}} a_{i}^{(r)} \\mathbf{h}^{(r)}\n \\end{equation}\n\n where :math:`a_{i}^{(r)}` denotes the importance of relationr in generating the final embedding of node videfined as:\n\n .. math::\n \\begin{equation}\n a_{i}^{(r)}=\\frac{\\exp \\left(\\mathbf{q}^{(r)} \\cdot \\mathbf{h}_{i}^{(r)}\\right)}{\\sum_{r^{\\prime} \\in \\mathcal{R}} \\exp \\left(\\mathbf{q}^{\\left(r^{\\prime}\\right)} \\cdot \\mathbf{h}_{i}^{r^{\\prime}}\\right)}\n \\end{equation}\n \"\"\"\n\n h_1_all_lst = [];h_2_all_lst = [];c_all_lst = []\n for h_idx in range(self.nheads):\n h_1_all_, h_2_all_, c_all_ = self.attn[h_idx](h_1_all, h_2_all, c_all)\n h_1_all_lst.append(h_1_all_);h_2_all_lst.append(h_2_all_); c_all_lst.append(c_all_)\n\n h_1_all = torch.mean(torch.cat(h_1_all_lst, 0), 0).unsqueeze(0)\n h_2_all = torch.mean(torch.cat(h_2_all_lst, 0), 0).unsqueeze(0)\n\n else:\n h_1_all = torch.mean(torch.cat(h_1_all, 0), 0).unsqueeze(0)\n h_2_all = torch.mean(torch.cat(h_2_all, 0), 0).unsqueeze(0)\n\n # Lcs = [Z − AVG { H(r)|r∈ R }]^2 - [Z − AVG { ~H(r)|r∈ R }]^2\n pos_reg_loss = ((self.H - h_1_all) ** 2).sum()\n neg_reg_loss = ((self.H - h_2_all) ** 2).sum()\n reg_loss = pos_reg_loss - neg_reg_loss\n result['reg_loss'] = reg_loss\n\n # semi-supervised module\n if self.isSemi:\n r\"\"\"\n Extension to Semi-Supervised Learning\n\n .. math::\n \\begin{equation}\n \\ell_{\\text {sup }}=-\\frac{1}{\\left|\\mathcal{Y}_{L}\\right|} \\sum_{l \\in \\mathcal{Y}_{L}} \\sum_{i=1}^{c} Y_{l i} \\ln \\hat{Y}_{l i}\n \\end{equation}\n\n Where :math:`mathcal{Y}_{L}` is the set of node indices with labels\n \"\"\"\n semi = self.logistic(self.H).squeeze(0)\n result['semi'] = semi\n\n # result: ['logits','reg_loss','semi']\n return result",
"def forward(\r\n self,\r\n input_ids,\r\n attention_mask: torch.Tensor,\r\n token_type_ids: torch.Tensor\r\n ):\r\n ### YOUR CODE HERE\r\n output = self.bert(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n token_type_ids=token_type_ids,\r\n )\r\n\r\n sequence_output = output[0] # the last hidden state (batch, sequence_length, hidden_size)\r\n logits = self.qa_outputs(sequence_output)\r\n start_logits, end_logits = logits.split(1, dim=-1)\r\n start_logits = start_logits.squeeze(-1)\r\n end_logits = end_logits.squeeze(-1)\r\n\r\n outputs = (start_logits, end_logits) # + output[2:]\r\n\r\n return outputs\r\n ### END YOUR CODE",
"def build_bert(self, verbose=True):\r\n # bert inputs\r\n bert_word_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name=\"bert_word_input\")\r\n bert_mask_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name='bert_mask_input')\r\n bert_segment_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype=\"int32\", name=\"bert_segment_input\")\r\n \r\n inputs = [bert_word_ids, bert_mask_ids, bert_segment_ids]\r\n\r\n bert_out = BertLayer(n_fine_tune_layers=self._params.n_fine_tune_layers, bert_path=self._params.bert_path, name=\"bert_layer\")([bert_word_ids, bert_mask_ids, bert_segment_ids])\r\n\r\n features = bert_out\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, self._params.max_sent_len), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n\r\n features = Concatenate(name=\"bert_and_dict_features\")([features, dict_embeddings])\r\n\r\n z = Dense(self._params.fc_dim, activation='relu', name=\"fc_dense\")(features)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n\r\n # It is recommended that you use this optimizer for fine tuning, since this\r\n # is how the model was trained (note that the Adam m/v variables are NOT\r\n # loaded from init_checkpoint.)\r\n optimizer = AdamWeightDecayOptimizer(\r\n learning_rate=1e-5,\r\n weight_decay_rate=0.01,\r\n beta_1=0.9,\r\n beta_2=0.999,\r\n epsilon=1e-6,\r\n exclude_from_weight_decay=[\"LayerNorm\", \"layer_norm\", \"bias\"])\r\n \r\n model.compile(loss=loss, optimizer=optimizer)\r\n\r\n self.model = model",
"def create_model(bert_model_hub, is_predicting, input_ids, input_mask,\n segment_ids, labels, num_labels):\n\n bert_module = hub.Module(bert_model_hub, trainable=True)\n bert_inputs = dict(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n output_layer = bert_outputs[\"pooled_output\"]\n\n hidden_size = output_layer.shape[-1].value\n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\"output_bias\", [num_labels],\n initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n predicted_labels = tf.squeeze(\n tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predicted_labels, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, predicted_labels, log_probs)",
"def test_extended_barabasi_albert(self, m=2):\n seed = 42\n repeats = 2\n BA_model = barabasi_albert_graph(100, m, seed)\n BA_model_edges = BA_model.number_of_edges()\n\n while repeats:\n repeats -= 1\n\n # This behaves just like BA, the number of edges must be the same\n G1 = extended_barabasi_albert_graph(100, m, 0, 0, seed)\n assert_equal(G1.size(), BA_model_edges)\n\n # More than twice more edges should have been added\n G1 = extended_barabasi_albert_graph(100, m, 0.8, 0, seed)\n assert_greater(G1.size(), BA_model_edges * 2)\n\n # Only edge rewiring, so the number of edges less than original\n G2 = extended_barabasi_albert_graph(100, m, 0, 0.8, seed)\n assert_equal(G2.size(), BA_model_edges)\n\n # Mixed scenario: less edges than G1 and more edges than G2\n G3 = extended_barabasi_albert_graph(100, m, 0.3, 0.3, seed)\n assert_greater(G3.size(), G2.size())\n assert_less(G3.size(), G1.size())\n\n # Testing exceptions\n ebag = extended_barabasi_albert_graph\n assert_raises(NetworkXError, ebag, m, m, 0, 0)\n assert_raises(NetworkXError, ebag, 1, 0.5, 0, 0)\n assert_raises(NetworkXError, ebag, 100, 2, 0.5, 0.5)",
"def analyze(self):\n self.grayscale = (input(\"[G]rayscale or [C]olor? \").lower()[0] == \"g\")\n for i in range(1, 6):\n for j in range(1, 10):\n network_name = \"acas_%d_%d\" % (i, j)\n try:\n distance_classified = self.read_artifact(\n \"%s/distance\" % network_name)\n theta_classified = self.read_artifact(\n \"%s/theta\" % network_name)\n sample_pre, sample_post = self.read_artifact(\n \"%s/sample\" % network_name)\n single_line_data = self.read_artifact(\n \"%s/single_lines\" % network_name)\n except KeyError:\n # Skip due to missing data.\n continue\n print(\"Analyzing network:\", network_name)\n self.distance_plot(distance_classified)\n self.finalize_plot(\"%s/distance\" % network_name)\n self.theta_plot(theta_classified)\n self.finalize_plot(\"%s/theta\" % network_name)\n self.overlapping_plot(distance_classified, theta_classified)\n self.finalize_plot(\"%s/overlapping\" % network_name)\n self.sample_plot(sample_pre, sample_post)\n self.finalize_plot(\"%s/sample\" % network_name)\n\n self.single_line_plots(network_name, single_line_data)\n return True",
"def __init__(self, config: BertConfig):\r\n super().__init__(config)\r\n ### YOUR CODE HERE\r\n self.num_labels = config.num_labels # [0, 1] (start or end)\r\n self.bert = BertModel(config)\r\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # TODO: Not a separate FFN ? (For Start_FFN and End_FFN)\r\n\r\n ### END YOUR CODE\r\n\r\n # Don't forget initializing the weights\r\n self.init_weights()",
"def compareAB(model1_name, model2_name, X_test_B, X_test_S, analysis_dir=\"Analysis/\"):\n #Load best weights\n model = tf.keras.models.load_model(\"Models/\"+model1_name)\n bkg_preds1 = model.predict(X_test_B).flatten()\n sig_preds1 = model.predict(X_test_S).flatten()\n\n model = tf.keras.models.load_model(\"Models/\"+model2_name)\n bkg_preds2 = model.predict(X_test_B).flatten()\n sig_preds2 = model.predict(X_test_S).flatten()\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds1 > thresh)/len(bkg_preds1)\n sig_eff_temp = np.sum(sig_preds1 > thresh)/len(sig_preds1)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model1_name + ' Background rejection @0.5 Signal efficiency = {:.2e}'.format(bkg_eff_50), xy=(0.05, 0.95), xycoords='axes fraction')\n print(sig_eff_50)\n\n sig_eff = []\n bkg_eff = []\n sig_eff_50 = 1.0\n bkg_eff_50 = 1.0\n for thresh in (1-np.arange(0.00005, 0.8, 0.01)):\n bkg_eff_temp = np.sum(bkg_preds2 > thresh)/len(bkg_preds2)\n sig_eff_temp = np.sum(sig_preds2 > thresh)/len(sig_preds2)\n sig_eff.append(sig_eff_temp)\n bkg_eff.append(1/bkg_eff_temp)\n if abs(sig_eff_temp-0.5) < abs(sig_eff_50-0.5):\n sig_eff_50 = sig_eff_temp\n bkg_eff_50 = 1/bkg_eff_temp\n plt.semilogy(sig_eff, bkg_eff)\n plt.annotate(model2_name + ' Background rejection @0.5 Signal efficiency = {:.3e}'.format(bkg_eff_50), xy=(0.05, 0.88), xycoords='axes fraction')\n print(sig_eff_50)\n\n plt.legend([model1_name, model2_name])\n plt.xlabel(\"Signal efficiency\")\n plt.ylabel(\"Background rejection\")\n plt.gcf().set_size_inches(8.3, 5.85)\n plt.savefig(analysis_dir+\"ROC\" + model1_name + \"VS\" + model2_name + \".pdf\", format=\"pdf\")\n plt.show()",
"def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )",
"def __init__(self, args, number_of_labels, number_of_features,adj):\n super(SpGAT, self).__init__()\n self.args=args\n \n self.number_of_labels = number_of_labels\n self.number_of_features = number_of_features\n self.device = args.device\n self.adj= sparse_mx_to_torch_sparse_tensor(adj).to(self.device).to_dense()\n self.attentions = [SpGraphAttentionLayer(number_of_features, \n args.hidden, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=True) for _ in range(args.nheads)]\n for i, attention in enumerate(self.attentions):\n self.add_module('attention_{}'.format(i), attention)\n\n self.out_att = SpGraphAttentionLayer(args.hidden * args.nheads, \n args.Q, \n dropout=args.dropout, \n alpha=args.alpha, \n concat=False)",
"def __init__(self,\n in_node_dim: int = 39,\n hidden_node_dim: int = 64,\n heads: int = 4,\n dropout: float = 0.0,\n num_conv: int = 3,\n predictor_hidden_feats: int = 32,\n n_tasks: int = 1,\n **kwargs):\n model = GAT(\n in_node_dim,\n hidden_node_dim,\n heads,\n dropout,\n num_conv,\n predictor_hidden_feats,\n n_tasks,\n )\n super(GATModel, self).__init__(model, **kwargs)",
"def barabasi_albert_graph(T, m, seed=None, is_directed=False):\n n = m + T - 1\n if m < 1 or m >= n:\n raise nx.NetworkXError(\"Barabási–Albert network must have m >= 1\"\n \" and m < n, m = %d, n = %d\" % (m, n))\n\n # Add m initial nodes (m0 in barabasi-speak)\n time = 0\n starting_graph = nx.path_graph(m) # start with a tree\n G = nx.DiGraph() if is_directed else nx.Graph()\n G.add_edges_from(starting_graph.edges(data=True), t=time, w=1)\n\n # Target nodes for new edges\n targets = list(range(m))\n # List of existing nodes, with nodes repeated once for each adjacent edge\n repeated_nodes = []\n # Start adding the other n-m nodes. The first node is m.\n source = m\n while source < n:\n time += 1 # increase time\n # Add edges to m nodes from the source.\n G.add_edges_from(zip([source] * m, targets), t=time, w=1)\n # Add one node to the list for each new edge just created.\n repeated_nodes.extend(targets)\n # And the new node \"source\" has m edges to add to the list.\n repeated_nodes.extend([source] * m)\n # Now choose m unique nodes from the existing nodes\n # Pick uniformly from repeated_nodes (preferential attachment)\n targets = _random_subset(repeated_nodes, m, seed)\n source += 1\n\n edgelist_path = f'datasets/synthetic/BA_{m}_{T}_raw.g'\n print(f'Weighted edgelist written at {edgelist_path!r}')\n nx.write_edgelist(G, path=edgelist_path, data='t')\n return G",
"def model_and_data(request, hyperparams, estep_conf):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n\n precision, N, D, H, batch_size = get(hyperparams, \"precision\", \"N\", \"D\", \"H\", \"batch_size\")\n\n if request.param == \"BSC\":\n W_gt = generate_bars(H, bar_amp=10.0, precision=precision)\n sigma2_gt = to.ones((1,), dtype=precision, device=tvo.get_device())\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n\n sigma2_init = to.tensor([1.0], dtype=precision, device=tvo.get_device())\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = BSC(\n H=H, D=D, W_init=W_gt, sigma2_init=sigma2_gt, pies_init=pies_gt, precision=precision\n )\n\n fname = \"bars_test_data_bsc.h5\"\n\n write_dataset(fname, N, D, np.float32, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"sigma2\"] = sigma2_init\n model.theta[\"pies\"] = pies_init\n\n elif request.param == \"NoisyOR\":\n W_gt = generate_bars(H, bar_amp=0.8, bg_amp=0.1, precision=precision)\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = NoisyOR(H=H, D=D, W_init=W_gt, pi_init=pies_gt, precision=precision)\n\n fname = \"bars_test_data_nor.h5\"\n\n write_dataset(fname, N, D, np.uint8, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"pies\"] = pies_init\n\n if tvo.get_run_policy() == \"mpi\":\n dist.barrier()\n\n return model, fname",
"def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )",
"def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )",
"def test_dual_barabasi_albert(self, m1=1, m2=4, p=0.5):\n seed = 42\n repeats = 2\n\n while repeats:\n repeats -= 1\n\n # This should be BA with m = m1\n BA1 = barabasi_albert_graph(100, m1, seed)\n DBA1 = dual_barabasi_albert_graph(100, m1, m2, 1, seed)\n assert_equal(BA1.size(), DBA1.size())\n\n # This should be BA with m = m2\n BA2 = barabasi_albert_graph(100, m2, seed)\n DBA2 = dual_barabasi_albert_graph(100, m1, m2, 0, seed)\n assert_equal(BA2.size(), DBA2.size())\n\n # Testing exceptions\n dbag = dual_barabasi_albert_graph\n assert_raises(NetworkXError, dbag, m1, m1, m2, 0)\n assert_raises(NetworkXError, dbag, m2, m1, m2, 0)\n assert_raises(NetworkXError, dbag, 100, m1, m2, -0.5)\n assert_raises(NetworkXError, dbag, 100, m1, m2, 1.5)",
"def create_model(\n bert_config,\n is_training,\n input_ids,\n input_mask,\n segment_ids,\n labels,\n num_labels,\n use_one_hot_embeddings,\n):\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\",\n [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02),\n )\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer()\n )\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n # probabilities = tf.nn.softmax(logits, axis=-1) ### multiclass case\n probabilities = tf.nn.sigmoid(logits) # multi-label case\n\n labels = tf.cast(labels, tf.float32)\n tf.logging.info(\n \"num_labels:{};logits:{};labels:{}\".format(num_labels, logits, labels)\n )\n per_example_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=logits\n )\n loss = tf.reduce_mean(per_example_loss)\n\n # probabilities = tf.nn.softmax(logits, axis=-1)\n # log_probs = tf.nn.log_softmax(logits, axis=-1)\n #\n # one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n #\n # per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n # loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)",
"def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings, is_prediction=False):\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n if is_prediction:\n return tf.constant(0.0, dtype=tf.float32), tf.constant(0.0, dtype=tf.float32), logits, probabilities\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n label_smoothing = tf.constant(FLAGS.label_smoothing, dtype=tf.float32)\n\n one_hot_labels = one_hot_labels*(1 - label_smoothing) + label_smoothing / num_labels\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n\n loss = tf.reduce_mean(per_example_loss)\n\n return loss, per_example_loss, logits, probabilities",
"def bert_score(preds: Union[List[str], Dict[str, Tensor]], target: Union[List[str], Dict[str, Tensor]], model_name_or_path: Optional[str]=None, num_layers: Optional[int]=None, all_layers: bool=False, model: Optional[Module]=None, user_tokenizer: Any=None, user_forward_fn: Callable[[Module, Dict[str, Tensor]], Tensor]=None, verbose: bool=False, idf: bool=False, device: Optional[Union[str, torch.device]]=None, max_length: int=512, batch_size: int=64, num_threads: int=4, return_hash: bool=False, lang: str='en', rescale_with_baseline: bool=False, baseline_path: Optional[str]=None, baseline_url: Optional[str]=None) ->Dict[str, Union[List[float], str]]:\n if len(preds) != len(target):\n raise ValueError('Number of predicted and reference sententes must be the same!')\n if verbose and not _TQDM_AVAILABLE:\n raise ModuleNotFoundError('An argument `verbose = True` requires `tqdm` package be installed. Install with `pip install tqdm`.')\n if model is None:\n if not _TRANSFORMERS_AVAILABLE:\n raise ModuleNotFoundError('`bert_score` metric with default models requires `transformers` package be installed. Either install with `pip install transformers>=4.0` or `pip install torchmetrics[text]`.')\n if model_name_or_path is None:\n warn(f'The argument `model_name_or_path` was not specified while it is required when default `transformers` model are used.It is, therefore, used the default recommended model - {_DEFAULT_MODEL}.')\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n model = AutoModel.from_pretrained(model_name_or_path or _DEFAULT_MODEL)\n else:\n tokenizer = user_tokenizer\n model.eval()\n model\n try:\n if num_layers and num_layers > model.config.num_hidden_layers:\n raise ValueError(f'num_layers={num_layers} is forbidden for {model_name_or_path}. Please use num_layers <= {model.config.num_hidden_layers}')\n except AttributeError:\n warn('It was not possible to retrieve the parameter `num_layers` from the model specification.')\n _are_empty_lists = all(isinstance(text, list) and len(text) == 0 for text in (preds, target))\n _are_valid_lists = all(isinstance(text, list) and len(text) > 0 and isinstance(text[0], str) for text in (preds, target))\n _are_valid_tensors = all(isinstance(text, dict) and isinstance(text['input_ids'], Tensor) for text in (preds, target))\n if _are_empty_lists:\n warn('Predictions and references are empty.')\n output_dict: Dict[str, Union[List[float], str]] = {'precision': [0.0], 'recall': [0.0], 'f1': [0.0]}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict\n baseline = _load_baseline(lang, model_name_or_path, baseline_path, baseline_url) if rescale_with_baseline else None\n if _are_valid_lists:\n target_dataset = TextDataset(target, tokenizer, max_length, idf=idf)\n preds_dataset = TextDataset(preds, tokenizer, max_length, idf=idf, tokens_idf=target_dataset.tokens_idf)\n elif _are_valid_tensors:\n target_dataset = TokenizedDataset(**target, idf=idf)\n preds_dataset = TokenizedDataset(**preds, idf=idf, tokens_idf=target_dataset.tokens_idf)\n else:\n raise ValueError('Invalid input provided.')\n target_loader = DataLoader(target_dataset, batch_size=batch_size, num_workers=num_threads)\n preds_loader = DataLoader(preds_dataset, batch_size=batch_size, num_workers=num_threads)\n target_embeddings, target_idf_scale = _get_embeddings_and_idf_scale(target_loader, target_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n preds_embeddings, preds_idf_scale = _get_embeddings_and_idf_scale(preds_loader, preds_dataset.max_length, model, device, num_layers, all_layers, idf, verbose, user_forward_fn)\n precision, recall, f1_score = _get_precision_recall_f1(preds_embeddings, target_embeddings, preds_idf_scale, target_idf_scale)\n if baseline is not None:\n precision, recall, f1_score = _rescale_metrics_with_baseline(precision, recall, f1_score, baseline, num_layers, all_layers)\n output_dict = {'precision': precision.tolist(), 'recall': recall.tolist(), 'f1': f1_score.tolist()}\n if return_hash:\n output_dict.update({'hash': _get_hash(model_name_or_path, num_layers, idf)})\n return output_dict",
"def ta_gen(self, dataset):\n\n model_path = os.path.join(self.check_point, 'model_gaussian.pt')\n if not os.path.exists(model_path):\n raise Exception('Cannot find %s.' % model_path)\n\n self.model = torch.load(model_path)\n _, _, stats, outputs, names = self.align_gen(dataset, is_test=True)\n return stats, outputs, names",
"def transform_output_anaylsis(self):\n\n for ix, curbbattinst in enumerate(self.inputlist):\n # set up files and file names\n source_file_of_interest: str = curbbattinst.source_file\n source_file_of_interest_basename: str = os.path.splitext(source_file_of_interest)[0]\n\n # look at Status Files\n pardir = os.path.dirname(source_file_of_interest)\n statusfiles = [f for f in os.listdir(pardir) if os.path.isfile(os.path.join(pardir, f)) and f.endswith(\"STATUS\")]\n\n if len(statusfiles) > 1:\n raise Exception(\"Multiple Status Files during BBMCTS-Batch\")\n\n if len(statusfiles) == 1:\n statusfile = statusfiles[0].split(\".\")[-1]\n status = int(statusfile.split('_')[0])\n self.mcts_statesequences[ix].status = status\n check_for_errors_index = min(status + 1, self.path_length-1) # the next transformer, but if all were possible, the last one\n else:\n self.mcts_statesequences[ix].status = -1\n check_for_errors_index = 0\n\n # log if we had unexpected errors, if no state, 1st transformer caused problems, otherwise use status ...\n error_file_transf = os.path.join(source_file_of_interest_basename + \".stderr\")\n self.__check_error_file_transformations(err_file=error_file_transf, attinstance=curbbattinst,\n iteration=ix, check_index=check_for_errors_index)\n\n # trim sequence to only valid transformers\n for i in range(len(self.mcts_statesequences[ix].states)-1, self.mcts_statesequences[ix].status, -1):\n del self.mcts_statesequences[ix].states[i]\n assert len(self.mcts_statesequences[ix].states) == self.mcts_statesequences[ix].status+1",
"def create_model(is_predicting, input_ids, input_mask, segment_ids, labels, num_labels):\n BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n bert_module = hub.Module(\n BERT_MODEL_HUB,\n trainable=True)\n bert_inputs = dict(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids)\n bert_outputs = bert_module(\n inputs=bert_inputs,\n signature=\"tokens\",\n as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_outputs\" for token-level output.\n output_layer = bert_outputs[\"pooled_output\"]\n\n print(\"output_layer.shape[-1]:\"+str(output_layer.shape[-1]))\n hidden_size = output_layer.shape[-1]\n\n # Create our own layer to tune for politeness data.\n output_weights = tf.compat.v1.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.compat.v1.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.compat.v1.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, rate=.1)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predicted_labels, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n return (loss, predicted_labels, log_probs)",
"def process(self):\n coo_adj = sp.load_npz(os.path.join(self.raw_path, \"adj_full.npz\"))\n g = from_scipy(coo_adj)\n\n features = np.load(os.path.join(self.raw_path, \"feats.npy\"))\n features = F.tensor(features, dtype=F.float32)\n\n y = [-1] * features.shape[0]\n with open(os.path.join(self.raw_path, \"class_map.json\")) as f:\n class_map = json.load(f)\n for key, item in class_map.items():\n y[int(key)] = item\n labels = F.tensor(np.array(y), dtype=F.int64)\n\n with open(os.path.join(self.raw_path, \"role.json\")) as f:\n role = json.load(f)\n\n train_mask = np.zeros(features.shape[0], dtype=bool)\n train_mask[role[\"tr\"]] = True\n\n val_mask = np.zeros(features.shape[0], dtype=bool)\n val_mask[role[\"va\"]] = True\n\n test_mask = np.zeros(features.shape[0], dtype=bool)\n test_mask[role[\"te\"]] = True\n\n g.ndata[\"feat\"] = features\n g.ndata[\"label\"] = labels\n g.ndata[\"train_mask\"] = generate_mask_tensor(train_mask)\n g.ndata[\"val_mask\"] = generate_mask_tensor(val_mask)\n g.ndata[\"test_mask\"] = generate_mask_tensor(test_mask)\n\n if self._reorder:\n self._graph = reorder_graph(\n g,\n node_permute_algo=\"rcmk\",\n edge_permute_algo=\"dst\",\n store_ids=False,\n )\n else:\n self._graph = g"
]
| [
"0.56914574",
"0.5504473",
"0.5404608",
"0.52371424",
"0.5136819",
"0.50922596",
"0.5070473",
"0.4993317",
"0.49904677",
"0.49719086",
"0.49631143",
"0.49408865",
"0.49349648",
"0.49195486",
"0.48649365",
"0.48622385",
"0.48500225",
"0.48488343",
"0.4847742",
"0.48362046",
"0.48339167",
"0.48339167",
"0.48152307",
"0.4813411",
"0.48124692",
"0.48124436",
"0.48061204",
"0.48055327",
"0.47990608",
"0.4797252"
]
| 0.71537685 | 0 |
used to count frequency of results in a list, returning dictionaary | def CountFrequency(my_list):
# Creating an empty dictionary
freq = {}
for item in my_list:
if (item in freq):
freq[item] += 1
else:
freq[item] = 1
return freq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(freq_list: list[str]) -> dict[str, int]:\n returned_dict: dict[str, int] = {}\n for item in freq_list:\n if item in returned_dict.keys():\n # increase the value associated with that key \n returned_dict[item] += 1\n else:\n # assign that key the value of 1\n returned_dict[item] = 1\n return returned_dict",
"def count_list_freq(l):\n freq = {}\n for items in l:\n freq[items] = l.count(items)\n return freq",
"def counts(li):\n d={}\n for x in li:\n d[x]=d.get(x,0)+1\n return dict(sorted([x,d[x]] for x in d))",
"def list_count(l):\n return dict((l.count(it), it) for it in l)",
"def count(list: list[str]) -> dict[str, int]:\n result: dict[str, int] = {}\n for keys in list: \n if keys not in result: \n result[keys] = 1 \n else: \n result[keys] += 1\n return result",
"def frequency(lst):\n\n count = dict()\n for word in lst:\n if word in count:\n count[word] += 1\n else:\n count[word] = 1\n return count",
"def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END",
"def count(wrd):\n ltrs = {}\n for i in wrd:\n ltrs[i] = wrd.count(i)\n return ltrs",
"def Counts(dict_of_list):\n return {k: len(v) for k, v in dict_of_list.iteritems()}",
"def construct_ngrams_dict(ngrams_list):\n counts = {}\n\n for t in ngrams_list:\n key = hash_function(t)\n if key in counts:\n counts[key] += 1\n else:\n counts[key] = 1\n return counts",
"def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic",
"def freq_count(self):\n #eg: fc = spammy.freq_count()\n count_dict = defaultdict(int)\n for entry in self._train_list:\n if entry in self._vocab_set:\n count_dict[entry] += 1\n return count_dict",
"def dictCount(aList: list) -> dict:\n d = defaultdict(int)\n for elm in aList:\n d[elm] += 1\n\n return d",
"def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq",
"def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist",
"def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted",
"def wordListToFreqDict(word_list: list) -> dict:\n word_freq = [word_list.count(p) for p in word_list]\n return dict(list(zip(word_list, word_freq)))",
"def get_frequencies(tokens):\n cnt = {}\n\n for word in tokens:\n if word not in cnt:\n cnt[word] = 0\n\n cnt[word] += 1\n\n return cnt",
"def counts(sequence):\n # initialize the countainer\n count = defaultdict(int)\n # iterates through sequence elements\n for item in sequence:\n # if element not in counts add 0\n # else add 1\n count[item] = count.get(item, 0) + 1\n return dict(count)",
"def countby(iteratee, seq):\n return dict(Counter(map(iteratee, seq)))",
"def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict",
"def num_of_sets(l):\r\n distinct_sweets = set(l) #let's find all distinct sweets from input list\r\n dict_of = {} #empty dict to store key:value (sweet:number of occurrences)\r\n\r\n for i in distinct_sweets:\r\n dict_of[i] = l.count(i)\r\n \r\n key_min = min(dict_of.keys(), key=(lambda k: dict_of[k]))\r\n return dict_of[key_min]",
"def countit(objs):\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out",
"def count(words):\n word_count = {}\n num_words = 0\n unique_words = 0\n for word in words:\n num_words += 1\n if word_count.has_key(word):\n word_count[word] += 1\n else:\n word_count[word] = 1\n unique_words += 1\n word_count[\"total\"] = num_words\n word_count[\"unique\"] = unique_words\n return word_count",
"def array_occurrences(cmd_out: list) -> defaultdict:\n array_frequency = defaultdict(int) # type: defaultdict\n array_name = 0\n for entry in cmd_out:\n array_frequency[entry[array_name]] += 1\n return array_frequency",
"def frequencies(seq):\n d = dict()\n for item in seq:\n try:\n d[item] += 1\n except KeyError:\n d[item] = 1\n return d",
"def partition(list_of_tokens):\n freq_count = defaultdict(int)\n for word in list_of_tokens:\n freq_count[word] += 1\n\n return freq_count",
"def get_frequency(sequences):\n frequency = Counter()\n for seq in sequences:\n frequency.update({l: seq.count(l) for l in set(seq)})\n\n return dict(frequency)",
"def count_dict(self, lst):\n nos = list(self.digits)\n digit_count = dict([(digit, 0) for digit in nos])\n for item in lst:\n for num in item:\n digit_count[num] += 1\n return digit_count",
"def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n freq = {}\n for x in sequence:\n freq[x] = freq.get(x,0) + 1\n return freq"
]
| [
"0.8195011",
"0.8185501",
"0.80188507",
"0.8011016",
"0.7753846",
"0.76451635",
"0.73659384",
"0.73326683",
"0.73188984",
"0.72865754",
"0.72838765",
"0.72762924",
"0.72522473",
"0.72518456",
"0.722965",
"0.72236264",
"0.72004676",
"0.71779644",
"0.71361697",
"0.71243215",
"0.70905995",
"0.7061739",
"0.70464045",
"0.70367396",
"0.70352304",
"0.70315266",
"0.69885",
"0.6964278",
"0.6936341",
"0.6897299"
]
| 0.8210879 | 0 |
Get the latest trades that have occured for a specific market. | def public_market_history(self, market_symbol):
return self.get(f'markets/{market_symbol}/trades') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_mytrades(self, symbol):\r\n param = {}\r\n param['symbol'] = self.__transfer_symbol(symbol)\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/history-orders', param, self.timeout)",
"def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n self.load_markets()\n market = None\n request = {}\n if limit is not None:\n request['take'] = limit\n request['take'] = limit\n if since is not None:\n request['toTime'] = self.yyyymmdd(self.milliseconds(), '.')\n request['fromTime'] = self.yyyymmdd(since, '.')\n if symbol is not None:\n market = self.market(symbol)\n request['pair'] = market['id']\n response = self.privateGetOrderOrderHistory(self.extend(request, params))\n #\n # [\n # {\n # \"ticks\":1574767951,\n # \"created\":\"26/11/19 13:32\",\n # \"action\":1,\n # \"price\":\"1000\",\n # \"pair\":\"EthNis\",\n # \"reference\":\"EthNis|10867390|10867377\",\n # \"fee\":\"0.5\",\n # \"feeAmount\":\"0.08\",\n # \"feeCoin\":\"₪\",\n # \"firstAmount\":\"-0.015\",\n # \"firstAmountBalance\":\"9\",\n # \"secondAmount\":\"14.93\",\n # \"secondAmountBalance\":\"130,233.28\",\n # \"firstCoin\":\"ETH\",\n # \"secondCoin\":\"₪\"\n # },\n # {\n # \"ticks\":1574767951,\n # \"created\":\"26/11/19 13:32\",\n # \"action\":0,\n # \"price\":\"1000\",\n # \"pair\":\"EthNis\",\n # \"reference\":\"EthNis|10867390|10867377\",\n # \"fee\":\"0.5\",\n # \"feeAmount\":\"0.08\",\n # \"feeCoin\":\"₪\",\n # \"firstAmount\":\"0.015\",\n # \"firstAmountBalance\":\"9.015\",\n # \"secondAmount\":\"-15.08\",\n # \"secondAmountBalance\":\"130,218.35\",\n # \"firstCoin\":\"ETH\",\n # \"secondCoin\":\"₪\"\n # }\n # ]\n #\n return self.parse_trades(response, market, since, limit)",
"def update_TradeHistory(self, market):\n ##self.marketid is to do!!!\n mid = self.marketid(market)\n history = self.Request.fetch('markettrades',params={'marketid':mid})\n pair = self.Pairs[mid]\n self.TradeHistory[pair] = history\n return 0",
"async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n if since is not None:\n request['startTime'] = self.iso8601(since)\n else:\n # by default reverse=false, i.e. trades are fetched since the time of market inception(year 2015 for XBTUSD)\n request['reverse'] = True\n if limit is not None:\n request['count'] = limit\n response = await self.publicGetTrade(self.extend(request, params))\n #\n # [\n # {\n # timestamp: '2018-08-28T00:00:02.735Z',\n # symbol: 'XBTUSD',\n # side: 'Buy',\n # size: 2000,\n # price: 6906.5,\n # tickDirection: 'PlusTick',\n # trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',\n # grossValue: 28958000,\n # homeNotional: 0.28958,\n # foreignNotional: 2000\n # },\n # {\n # timestamp: '2018-08-28T00:00:03.778Z',\n # symbol: 'XBTUSD',\n # side: 'Sell',\n # size: 1000,\n # price: 6906,\n # tickDirection: 'MinusTick',\n # trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',\n # grossValue: 14480000,\n # homeNotional: 0.1448,\n # foreignNotional: 1000\n # },\n # ]\n #\n return self.parse_trades(response, market, since, limit)",
"async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n sort = '-1'\n request = {\n 'symbol': market['id'],\n }\n if since is not None:\n request['start'] = since\n sort = '1'\n if limit is not None:\n request['limit'] = limit # default 120, max 5000\n request['sort'] = sort\n response = await self.publicGetTradesSymbolHist(self.extend(request, params))\n #\n # [\n # [\n # ID,\n # MTS, # timestamp\n # AMOUNT,\n # PRICE\n # ]\n # ]\n #\n trades = self.sort_by(response, 1)\n return self.parse_trades(trades, market, None, limit)",
"async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = None\n request = {\n 'end': self.milliseconds(),\n }\n if since is not None:\n request['start'] = since\n if limit is not None:\n request['limit'] = limit # default 25, max 1000\n method = 'privatePostAuthRTradesHist'\n if symbol is not None:\n market = self.market(symbol)\n request['symbol'] = market['id']\n method = 'privatePostAuthRTradesSymbolHist'\n response = await getattr(self, method)(self.extend(request, params))\n return self.parse_trades(response, market, since, limit)",
"async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n request = {}\n market = None\n if symbol is not None:\n market = self.market(symbol)\n request['pair'] = market['id']\n if limit is not None:\n request['count'] = limit\n if since is not None:\n request['since'] = self.parse_to_int(since / 1000)\n response = await self.privateGetUserSpotTradeHistory(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n trades = self.safe_value(data, 'trades', [])\n return self.parse_trades(trades, market, since, limit)",
"def get_markets(self, market):\n url = \"{url}/{market}\".format(url=self.MARKET_SERVICE_URL,\n market=market)\n\n return self.make_request(url)",
"async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = None\n request = {}\n if symbol is not None:\n market = self.market(symbol)\n request['symbol'] = market['id']\n if since is not None:\n request['startTime'] = self.iso8601(since)\n if limit is not None:\n request['count'] = limit\n request = self.deep_extend(request, params)\n # why the hassle? urlencode in python is kinda broken for nested dicts.\n # E.g. self.urlencode({\"filter\": {\"open\": True}}) will return \"filter={'open':+True}\"\n # Bitmex doesn't like that. Hence resorting to self hack.\n if 'filter' in request:\n request['filter'] = self.json(request['filter'])\n response = await self.privateGetExecutionTradeHistory(request)\n #\n # [\n # {\n # \"execID\": \"string\",\n # \"orderID\": \"string\",\n # \"clOrdID\": \"string\",\n # \"clOrdLinkID\": \"string\",\n # \"account\": 0,\n # \"symbol\": \"string\",\n # \"side\": \"string\",\n # \"lastQty\": 0,\n # \"lastPx\": 0,\n # \"underlyingLastPx\": 0,\n # \"lastMkt\": \"string\",\n # \"lastLiquidityInd\": \"string\",\n # \"simpleOrderQty\": 0,\n # \"orderQty\": 0,\n # \"price\": 0,\n # \"displayQty\": 0,\n # \"stopPx\": 0,\n # \"pegOffsetValue\": 0,\n # \"pegPriceType\": \"string\",\n # \"currency\": \"string\",\n # \"settlCurrency\": \"string\",\n # \"execType\": \"string\",\n # \"ordType\": \"string\",\n # \"timeInForce\": \"string\",\n # \"execInst\": \"string\",\n # \"contingencyType\": \"string\",\n # \"exDestination\": \"string\",\n # \"ordStatus\": \"string\",\n # \"triggered\": \"string\",\n # \"workingIndicator\": True,\n # \"ordRejReason\": \"string\",\n # \"simpleLeavesQty\": 0,\n # \"leavesQty\": 0,\n # \"simpleCumQty\": 0,\n # \"cumQty\": 0,\n # \"avgPx\": 0,\n # \"commission\": 0,\n # \"tradePublishIndicator\": \"string\",\n # \"multiLegReportingType\": \"string\",\n # \"text\": \"string\",\n # \"trdMatchID\": \"string\",\n # \"execCost\": 0,\n # \"execComm\": 0,\n # \"homeNotional\": 0,\n # \"foreignNotional\": 0,\n # \"transactTime\": \"2019-03-05T12:47:02.762Z\",\n # \"timestamp\": \"2019-03-05T12:47:02.762Z\"\n # }\n # ]\n #\n return self.parse_trades(response, market, since, limit)",
"def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass",
"def returnTradeHistory(self,\n currency_pair=\"all\",\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass",
"def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n self.load_markets()\n market = self.market(symbol)\n method = self.options['fetchTradesMethod'] # public_get_exchanges_pair_trades or public_get_exchanges_pair_lasttrades\n request = {\n 'pair': market['id'],\n }\n if since is not None:\n request['date'] = self.parse_to_int(since)\n if limit is not None:\n request['limit'] = limit # max 100000\n response = getattr(self, method)(self.extend(request, params))\n #\n # [\n # {\"date\":1651785980,\"price\":127975.68,\"amount\":0.3750321,\"isBid\":true,\"tid\":1261018},\n # {\"date\":1651785980,\"price\":127987.70,\"amount\":0.0389527820303982335802581029,\"isBid\":true,\"tid\":1261020},\n # {\"date\":1651786701,\"price\":128084.03,\"amount\":0.0015614749161156156626239821,\"isBid\":true,\"tid\":1261022},\n # ]\n #\n if isinstance(response, str):\n raise ExchangeError(response)\n return self.parse_trades(response, market, since, limit)",
"def get_trades_history(self, symbol, start_time, end_time, limit=1000):\n payload = {'symbol': symbol, 'start': start_time, 'end': end_time, 'limit': limit}\n return self.public_request('GET', '/api/v1/trades', **payload)",
"async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = await self.publicGetPairTransactions(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n trades = self.safe_value(data, 'transactions', [])\n return self.parse_trades(trades, market, since, limit)",
"def get_historic_data(self):\n\n historic_market_events = []\n\n return historic_market_events",
"def get_trades_for(self, ticker):\n return trade.Trade.all_from_account_id_and_ticker(self.id, ticker)",
"def get_price_history_lookback(access_token,ticker,periodType,period,frequencyType,frequency):\r\n \r\n price_url = 'https://api.tdameritrade.com/v1/marketdata/{}/pricehistory'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Parameters for period of time and frequency of data to get\r\n params = {'periodType':periodType,\r\n 'period': period,\r\n 'frequencyType': frequencyType,\r\n 'frequency': frequency}\r\n \r\n #Make the get request to TD Ameritrade\r\n price_history_json = requests.get(url=price_url,headers=headers,params=params)\r\n return price_history_json.json()",
"def getMarketPrices(market, interval):\n try:\n tf = {3600: '3600Min',\n 60: '60Min',\n 30: '30Min',\n 5: '5Min',\n 1: '1Min'}\n history = API.getmarkethistory(MARKET)\n df = pd.DataFrame(history)\n df.index = pd.to_datetime(df['TimeStamp'])\n prices = df['Price'].resample(tf[interval]).ohlc()\n if BACKTESTFILE != \"\":\n return prices.dropna()\n return prices.dropna().tail(MEAN)\n except Exception as e:\n logging.error(\"failed at getMarketPrices\")\n logging.error(str(e))",
"def retrieve_trades(self):\n trades_file = self.current_trades_path()\n if not trades_file.exists():\n LOGGER.info(f\"no trades stored for league {self.league_id}\")\n return set()\n fo = trades_file.open(\"rb\")\n return pickle.load(fo)",
"def return_trade_history(self, currency_pair):\n return self.api_query('returnTradeHistory', {\"currencyPair\": currency_pair})",
"async def get_trade(self, symbol, limit=500):\n uri = \"/fapi/v1/trades\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit\n }\n success, error = await self.request(\"GET\", uri, params)\n return success, error",
"def trades(Symbol='tBTCUSD', **params):\n endpoint = f'trades/{Symbol}/hist'\n return request(authenticate=False, version=2, endpoint=endpoint, method='GET', query_params=params)",
"def trades(self) -> list[TradeOffer]:\n return self._connection.trades",
"def returnTradeHistory(self, time=1 * 60 * 60, limit=100):\n assert limit <= 100, \"'limit' has to be smaller than 100\"\n return self.dpay.rpc.get_trade_history(\n transactions.formatTimeFromNow(-time),\n transactions.formatTimeFromNow(),\n limit,\n api=\"market_history\"\n )",
"def get_full_history(symbol):\n to_date = int(datetime.datetime.timestamp(datetime.datetime.now()))\n from_date = int(datetime.datetime.timestamp(datetime.datetime(1990, 1, 1, 1, 0, 0)))\n url_base = \"https://query1.finance.yahoo.com/v7/finance/download/\"\n url_params = f\"{symbol}.NS?period1={from_date}&period2={to_date}&interval=1d&events=history\"\n resp = requests.get(url_base + url_params)\n a = csv_to_list(resp)[1:]\n return create_price(symbol, a)",
"async def get_trades(self, symbol, limit=100):\n uri = \"/v3/trades\"\n params = {\n \"symbol\": symbol,\n \"limit\": limit\n }\n success, error = await self.request(\"GET\", uri, params)\n return success, error",
"def returnTicker(self):\n ticker = {}\n t = self.dpay.rpc.get_ticker(api=\"market_history\")\n ticker = {'highest_bid': float(t['highest_bid']),\n 'latest': float(t[\"latest\"]),\n 'lowest_ask': float(t[\"lowest_ask\"]),\n 'percent_change': float(t[\"percent_change\"]),\n 'bbd_volume': t[\"bbd_volume\"],\n 'dpay_volume': t[\"dpay_volume\"]}\n return ticker",
"def returnMarketHistory(\n self,\n bucket_seconds=60 * 5,\n start_age=1 * 60 * 60,\n stop_age=0,\n ):\n return self.dpay.rpc.get_market_history(\n bucket_seconds,\n transactions.formatTimeFromNow(-start_age - stop_age),\n transactions.formatTimeFromNow(-stop_age),\n api=\"market_history\"\n )",
"def get_market_orderbook(self, market):\n return self.__call__('market', 'getmarketorderbook',\n {'marketname': market})",
"def get_open_orders(self, market):\n #{'success': True, 'message': '', 'result': [{'Uuid': None, 'OrderUuid': '7f43f22f-586b-46d8-a4b2-f457cfeb2aac', 'Exchange': 'BTC-GEO', 'OrderType': 'LIMIT_SELL', 'Quantity': 2.03478908, 'QuantityRemaining': 2.03478908, 'Limit': 0.00097503, 'CommissionPaid': 0.0, 'Price': 0.0, 'PricePerUnit': None, 'Opened': '2017-07-03T14:13:20.903', 'Closed': None, 'CancelInitiated': False, 'ImmediateOrCancel': False, 'IsConditional': False, 'Condition': 'NONE', 'ConditionTarget': None}]}\n #{'success': 1, 'return': {'240005185729406': {'pair': 'lsk_btc', 'type': 'sell', 'amount': 1, 'rate': 0.096319, 'timestamp_created': '1499255345', 'status': 0}}}\n result = self.api_query('ActiveOrders', {'pair': market})\n\n openOrder =[]\n if result['success'] == 1:\n try :\n for key, value in result['return'].items():\n openOrder.append({'OrderUuid':key})\n result = {'success': True, 'message': '', 'result' : openOrder}\n except:\n result = {'success': False, 'message': '', 'result': openOrder}\n else:\n result = {'success': False, 'message': '', 'result': openOrder}\n\n return result"
]
| [
"0.65934575",
"0.64599925",
"0.6330147",
"0.6324947",
"0.630509",
"0.628073",
"0.6178016",
"0.6147427",
"0.60577464",
"0.60470587",
"0.60470587",
"0.59924877",
"0.59791535",
"0.597466",
"0.59554976",
"0.5939027",
"0.5874146",
"0.5853247",
"0.5833754",
"0.57954925",
"0.578244",
"0.5779385",
"0.57629824",
"0.57585233",
"0.5755235",
"0.5734202",
"0.5733229",
"0.5728293",
"0.5727494",
"0.57097584"
]
| 0.70258033 | 0 |
Get the balance from your account for a specific currency. | def account_balance(self, currency_symbol):
return self.get(f'balances/{currency_symbol}', auth=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_balance(self, currency=None):\n if currency:\n return self.__call__('balance', 'getbalance',\n {'currencyname': currency})\n return self.__call__('balance', 'getbalances')",
"def getBalance(self, currency=''):\n\n if self.app.getExchange() == 'binance':\n if self.mode == 'live':\n model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret())\n df = model.getAccount()\n if isinstance(df, pd.DataFrame):\n if currency == '':\n # retrieve all balances\n return df\n else:\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n else:\n return 0.0\n else:\n # return dummy balances\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n if self.app.getExchange() == 'binance':\n self.balance = self.balance.replace('QUOTE', currency)\n else: \n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n else:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty:\n self.balance.loc[len(self.balance)] = [currency, 0, 0, 0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n\n else:\n if self.mode == 'live':\n # if config is provided and live connect to Coinbase Pro account portfolio\n model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())\n if currency == '':\n # retrieve all balances\n return model.getAccounts()[['currency', 'balance', 'hold', 'available']]\n else:\n df = model.getAccounts()\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n \n else:\n # return dummy balances\n\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n elif currency in ['BCH','BTC','ETH','LTC','XLM']:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty == True:\n self.balance.loc[len(self.balance)] = [currency,0,0,0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))",
"def get_balance(self, ticker):\n return self.trading_client.account_balance(ticker, 'usd')",
"def get_wallet_balance(self, walletId, currency):\n return",
"def get_balance(self):\n return self._call_account_method(\n 'getBalance'\n )",
"def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result",
"def get_wallet_balance():\n try:\n if CONF.exchange == 'bitmex':\n return EXCHANGE.fetch_balance()['info'][0]['walletBalance'] * CONF.satoshi_factor\n if CONF.exchange == 'kraken':\n asset = CONF.base if CONF.base != 'BTC' else 'XBt'\n return float(EXCHANGE.private_post_tradebalance({'asset': asset})['result']['tb'])\n if CONF.exchange == 'liquid':\n result = EXCHANGE.private_get_accounts_balance()\n if result is not None:\n for bal in result:\n if bal['currency'] == CONF.base:\n return float(bal['balance'])\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_wallet_balance()",
"def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"def balance(self, account_number: int): \n return self._accounts[account_number][1]",
"def get_balance(card):\n data = {\n \"Card.Number\": card[0],\n \"Card.Pin\": card[1],\n }\n\n response = requests.post(BALANCE_URL, data=data, headers=HEADERS)\n if response.status_code == 200:\n match = BALANCE_RE.search(response.text)\n if match:\n return float(match.group(1))",
"def currency_account(self, currency):\r\n param = {}\r\n param['currency'] = currency\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account', param, self.timeout)",
"def account_balance_in_card_currency(self):\n return self._account_balance_in_card_currency",
"def get_account_balance(self):\n return int(self.request('get', 'fort/accounts')['balance'])",
"def get_balance(self, exchange_id, coin):\n self.check_arguments(exchange_id, coin)\n\n try:\n balance = self.get_balance_helper(exchange_id, coin)\n return balance\n except RetryError:\n raise ServerError(exchange_id)",
"def balance(self, card_number):\n database_cursor.execute(f\"SELECT balance FROM card WHERE number = {card_number};\")\n return database_cursor.fetchone()[0]",
"async def get_balance(self) -> int:\n # todo: support both strong and eventual consistency\n try:\n response = await self.storage.get(pk=self.unique_id, fields=\"balance\")\n except storage.exceptions.ObjectNotFoundError:\n raise crud.exceptions.WalletNotFoundError(\n f\"Wallet with {self.wallet_id=} does not exists\"\n )\n\n return int(response[\"balance\"])",
"def balance(self):\n url = self.base_url + 'account/balance'\n self.session.headers.update(self.sign(url))\n resp = self.session.get(url)\n try:\n data = resp.json()\n data['amount'] = float(data['amount'])\n return pd.Series(data)\n except:\n return resp",
"def balance(self) -> float:\n\t\tbalance = 0\n\t\tfor transaction in self.transactions:\n\t\t\tsign = 1 if transaction.receiving_account == self.__number else -1\n\t\t\tbalance += sign*transaction.usd*transaction.completed\n\t\t# The bank has infinite money\n\t\tif self.name == Account.BANK:\n\t\t\tbalance = Decimal('Infinity')\n\t\treturn balance",
"def getBalance(self):\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n sql_command = \"\"\"select amount from accounts where name=?;\"\"\"\n\n cursor.execute(sql_command, (self.name, ))\n\n return round(float(re.sub(r'[\\(\\),]', '', str(cursor.fetchone()))), 2)",
"def get_account_balances(self):\n params = clean_locals(locals())\n date_time_sent = datetime.datetime.utcnow()\n response = self.request('GetAccountBalances', params, secure=True)\n data = self.process_response(response, date_time_sent, None)\n return parse_account_balance(data.get('data', {})) if data.get('data') else {}",
"def get_account_balance(self, account_number):\n\n if not isinstance(account_number, str):\n raise ValueError('Invalid type <{}> for account number'.format(\n type(account_number)))\n\n try:\n result = self.di.get(account_number) if self.di is not None \\\n else self.accounts.get(account_number, None)\n if result is not None:\n result = result[\"balance\"]\n\n except DBConnectionError:\n result = \"Connection error occurred. Try Again.\"\n return result",
"def authorized_get_account_balance(self, huid):\n acc = self.request('get', safeformat('fort/accounts/{:hex}', huid))\n return int(acc['balance'])",
"def cash_balance(self):\n cash_transaction = CashTransaction(self.user)\n return cash_transaction.get_balance_amount()",
"def get_account_balance(account):\n balance = 0\n\n for address in get_addresses_by_account(account):\n balance += get_address_balance(address)\n\n return float(balance)",
"def getBalance(self):\n return str(self.account.getBalance())",
"async def fetch_balance(self, params={}):\n # self api call does not return the 'used' amount - use the v1 version instead(which also returns zero balances)\n # there is a difference between self and the v1 api, namely trading wallet is called margin in v2\n await self.load_markets()\n accountsByType = self.safe_value(self.options, 'v2AccountsByType', {})\n requestedType = self.safe_string(params, 'type', 'exchange')\n accountType = self.safe_string(accountsByType, requestedType, requestedType)\n if accountType is None:\n keys = list(accountsByType.keys())\n raise ExchangeError(self.id + ' fetchBalance() type parameter must be one of ' + ', '.join(keys))\n isDerivative = requestedType == 'derivatives'\n query = self.omit(params, 'type')\n response = await self.privatePostAuthRWallets(query)\n result = {'info': response}\n for i in range(0, len(response)):\n balance = response[i]\n type = self.safe_string(balance, 0)\n currencyId = self.safe_string_lower(balance, 1, '')\n start = len(currencyId) - 2\n isDerivativeCode = currencyId[start:] == 'f0'\n # self will only filter the derivative codes if the requestedType is 'derivatives'\n derivativeCondition = (not isDerivative or isDerivativeCode)\n if (accountType == type) and derivativeCondition:\n code = self.safe_currency_code(currencyId)\n account = self.account()\n account['total'] = self.safe_string(balance, 2)\n account['free'] = self.safe_string(balance, 4)\n result[code] = account\n return self.safe_balance(result)",
"def get_balance(self, player_name):\n balance = int(self.db.read_value(player_name, \"money\"))\n return balance",
"def balance(self):\n return self.selectedAccount.balance",
"def get_balance(self):\n balance = 0\n for transaction in self.ledger:\n balance += transaction[\"amount\"]\n return balance",
"def balance(self, date=None):\r\n\r\n qs = self._entries()\r\n if date:\r\n qs = qs.filter(transaction__t_stamp__lt=date)\r\n r = qs.aggregate(b=Sum('amount'))\r\n b = r['b']\r\n\r\n flip = self._DEBIT_IN_DB()\r\n if self._positive_credit():\r\n flip *= -1\r\n\r\n if b == None:\r\n b = Decimal(\"0.00\")\r\n b *= flip\r\n\r\n #print \"returning balance %s for %s\" % (b, self)\r\n return b"
]
| [
"0.8360675",
"0.8266783",
"0.7911432",
"0.7850472",
"0.7594469",
"0.75444335",
"0.7526113",
"0.7363458",
"0.7354129",
"0.7344984",
"0.731404",
"0.72621316",
"0.7212807",
"0.71113855",
"0.7090096",
"0.708783",
"0.70214397",
"0.6956173",
"0.6894783",
"0.6855522",
"0.6837875",
"0.67794347",
"0.6771336",
"0.67708004",
"0.67250925",
"0.6715251",
"0.670284",
"0.6682295",
"0.6655315",
"0.6653243"
]
| 0.8442685 | 0 |
Construct and send a HTTP request to the Bittrex API. | def send_request(self, method, uri, values=None, headers=None, auth=False):
url = BittrexAutoTraderRequest.BASE_URL + '/' + uri
data = ''
if method == 'GET':
if values:
url += BittrexAutoTraderRequest._create_query_str(values)
else:
data = json.dumps(values)
req = None
for _ in range(BittrexAutoTraderRequest.CONNECT_RETRIES):
# Sign authentication requests.
if auth is True:
timestamp = str(round(time.time() * 1000))
content_hash = BittrexAutoTraderRequest._hash_content(data)
signature = BittrexAutoTraderRequest._sign_request(
self.secret, method, url, timestamp, content_hash
)
if headers is None:
headers = {}
headers['Api-Key'] = self.apikey
headers['Api-Timestamp'] = timestamp
headers['Api-Signature'] = signature
headers['Api-Content-Hash'] = content_hash
try:
if method == 'GET':
req = requests.get(url, headers=headers)
else:
req = requests.request(
method, url, json=values, headers=headers
)
except requests.exceptions.ConnectionError:
time.sleep(BittrexAutoTraderRequest.CONNECT_WAIT)
else:
break
res = req.json()
if res is None:
print('Script failure: Connection timeout', file=sys.stderr)
sys.exit(1)
if req.ok is False:
print("Bittex response: %s" % res['code'], file=sys.stderr)
sys.exit(1)
# Return list of dicts.
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _request(http, project, method, data, base_url, client_info):\n user_agent = client_info.to_user_agent()\n headers = {\n \"Content-Type\": \"application/x-protobuf\",\n \"User-Agent\": user_agent,\n connection_module.CLIENT_INFO_HEADER: user_agent,\n }\n api_url = build_api_url(project, method, base_url)\n\n response = http.request(url=api_url, method=\"POST\", headers=headers, data=data)\n\n if response.status_code != 200:\n error_status = status_pb2.Status.FromString(response.content)\n raise exceptions.from_http_status(\n response.status_code, error_status.message, errors=[error_status]\n )\n\n return response.content",
"def tapi(self,method,argc,**kwargs):\n url = self.btce_trade_url + argc + '/'\n kwargs['nonce'] = str(int(time.time()))\n kwargs['method'] = argc\n body = urllib.urlencode(kwargs)\n sign = self.hash_tapi( body )\n headers = dict( Sign = sign, Key = self.trade_key )\n if method == 'POST':\n response = requests.post( url,\n data = body,\n headers = headers,\n )\n elif method == 'GET':\n response = requests.get( url,\n headers = headers,\n )\n return response.text",
"def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])",
"def request(self, url, data=None, params={}, files=None):\n params['token'] = self.token\n request = self.make_request(url, data=data, params=params, files=files)\n return request",
"def api( self, method, argc, **kwargs ):\n url = self.btce_url + argc + '/'\n body = urllib.urlencode(kwargs)\n sign = self.hash_hmac( body )\n headers = dict( Sign = sign, Uid = self.uid )\n if method == 'POST':\n response = requests.post( url,\n data = body,\n headers = headers,\n )\n elif method == 'GET':\n response = requests.get( url,\n headers = headers,\n )\n return response.text",
"def _SendRequest(self, method, path, query, content):\n assert path.startswith(\"/\")\n\n curl = self._CreateCurl()\n\n if content is not None:\n encoded_content = self._json_encoder.encode(content)\n else:\n encoded_content = \"\"\n\n # Build URL\n urlparts = [self._base_url, path]\n if query:\n urlparts.append(\"?\")\n urlparts.append(urlencode(self._EncodeQuery(query)))\n\n url = \"\".join(urlparts)\n\n self._logger.debug(\"Sending request %s %s (content=%r)\",\n method, url, encoded_content)\n\n # Buffer for response\n encoded_resp_body = _CompatIO()\n\n # Configure cURL\n curl.setopt(pycurl.CUSTOMREQUEST, str(method))\n curl.setopt(pycurl.URL, str(url))\n curl.setopt(pycurl.POSTFIELDS, str(encoded_content))\n curl.setopt(pycurl.WRITEFUNCTION, encoded_resp_body.write)\n\n try:\n # Send request and wait for response\n try:\n curl.perform()\n except pycurl.error as err:\n if err.args[0] in _CURL_SSL_CERT_ERRORS:\n raise CertificateError(\"SSL certificate error %s\" % err,\n code=err.args[0])\n\n raise GanetiApiError(str(err), code=err.args[0])\n finally:\n # Reset settings to not keep references to large objects in memory\n # between requests\n curl.setopt(pycurl.POSTFIELDS, \"\")\n curl.setopt(pycurl.WRITEFUNCTION, lambda _: None)\n\n # Get HTTP response code\n http_code = curl.getinfo(pycurl.RESPONSE_CODE)\n\n # Was anything written to the response buffer?\n if encoded_resp_body.tell():\n encoded_resp_body.seek(0)\n response_content = simplejson.load(encoded_resp_body)\n else:\n response_content = None\n\n if http_code != HTTP_OK:\n if isinstance(response_content, dict):\n msg = (\"%s %s: %s\" %\n (response_content[\"code\"],\n response_content[\"message\"],\n response_content[\"explain\"]))\n else:\n msg = str(response_content)\n\n raise GanetiApiError(msg, code=http_code)\n\n return response_content",
"def make_request(self, url, action, data='', status_code='', parser=None):\n self._url = self.get_api_path(url)\n headers = {\n 'Content-Type': \"application/json\",\n 'Token': self.token,\n\n }\n kwargs = {}\n if headers:\n kwargs.update(headers=headers)\n if data:\n kwargs.update(data=json.dumps(data))\n\n return getattr(self.http, action.lower())(self._url, **kwargs)",
"def _make_request(self, method, url, post_data=None, body=None):\r\n if not self.connection:\r\n self._connect()\r\n try:\r\n self.connection.close()\r\n except:\r\n pass\r\n self.connection.connect()\r\n headers = {}\r\n if self.auth_header:\r\n headers[\"Authorization\"] = self.auth_header\r\n self.connection.request(method, url, body, headers)\r\n resp = self.connection.getresponse()\r\n return resp",
"def make_request(self, params=None, ignore_response=False, raw_response=False):\n # If we're pinging the TWC engine ensure that integration is disabled,\n # and default channel and BA is provided.\n if self.project == 'TWC':\n if params is not None:\n params['disable_integration'] = 'true'\n else:\n params = {'disable_integration': 'true'}\n\n # Make the request to the endpoint\n if params is None:\n response = urllib2.urlopen(self.endpoint)\n else:\n response = urllib2.urlopen(self.endpoint, urllib.urlencode(params))\n\n if raw_response:\n r = ET.fromstring(response.read())\n return r.iter()\n elif not ignore_response:\n return self.parse_response(response.read())\n else:\n return None",
"def rpc_request(method, params, url=LOCAL):\n client = HTTPClient(url)\n return client.request(method, params)",
"def make_http_request(method, url, params=None, data=None, cookies=None, headers=None, timeout=None):\n\n http = httplib2.Http(timeout=timeout)\n\n #if data is not None:\n # data = urllib.urlencode(data)\n\n if params is not None:\n enc_params = urllib.urlencode(params, doseq=True)\n if urlparse.urlparse(url).query:\n url = '%s&%s' % (url, enc_params)\n else:\n url = '%s?%s' % (url, enc_params)\n\n resp, content = http.request(url, method, headers=headers, body=data)\n\n print content\n\n return DataComResponse(resp, content.decode('utf-8'), url)",
"def call(self):\n # if this is a POST request, process data\n if self.data:\n post_json = json.dumps(self.data)\n values = {'json': post_json, 'apikey': API_KEY}\n post = urllib.parse.urlencode(values)\n\n else:\n post = None\n\n req = urllib.request.Request(self.url, post)\n\n try:\n self.response = urllib.request.urlopen(req, timeout=self.timeout)\n\n except (URLError, HTTPError, timeout) as error:\n self.response = error",
"def _make_request(self, method, url, post_data=None, body=None):\n if not self.connection:\n self._connect()\n try:\n self.connection.close()\n except:\n pass\n self.connection.connect()\n headers = {}\n if self.auth_header:\n headers[\"Authorization\"] = self.auth_header\n self.connection.request(method, url, body, headers)\n resp = self.connection.getresponse()\n return resp",
"def _make_api_call(url, params=None):\n if params is None:\n params = {}\n\n # params['apikey'] = CONFIG.BIOPORTAL_API_KEY\n params['apikey'] = \"8316a8aa-ff8e-4d6e-aa95-faeabfc72d2a\"\n return requests.get(url, params=params)",
"def send_bitbucket_request(req_url, auth_tokens):\n # Success status 200, return JSON\n req = requests.get(req_url, auth=auth_tokens)\n if req.status_code == 200:\n return json.loads(req.content)\n return {}",
"def http_request(self, path=\"/\", method=\"GET\", host=None, port=None, json=False, data=None):\n\n host = host or '127.0.0.1'\n port = port or 8080\n url = get_url(host=host, port=port, path=path)\n\n return self.http_session.request(method, url, json=json, data=data)",
"def send_request(self, url, additional_headers=None):\n\n if not API_KEY:\n raise Exception(\n \"No api key found in environment variables or key is None/empty string\"\n )\n\n headers = {\"X-API-KEY\": API_KEY}\n if additional_headers:\n headers.update(additional_headers)\n\n res = requests.get(url, headers=headers, timeout=ERFPACHT_API_REQUEST_TIMEOUT)\n\n logger.debug(\"Response status: {}, text: {}\".format(res.status_code, res.text))\n\n if res.status_code == 403:\n raise Exception(\n \"Unable to authenticate to source API. Check if the provided api key is correct and if you are making the request through a whitelisted environment (e.g. secure VPN).\"\n )\n\n # Handle 400 range responses\n if str(res.status_code)[0] == \"4\":\n raise Exception(\n \"The source API responded with 4xx status code, saying: {}\".format(\n res.text\n )\n )\n\n return res",
"def _request(self, url, params, base_url=None, first_request_time=None, verbose=False, requests_kwargs=None):\n\n if not first_request_time:\n first_request_time = datetime.now()\n\n if base_url is None:\n base_url = self.base_url\n\n elapsed = datetime.now() - first_request_time\n # TODO: to catch timeouts\n # if elapsed > self.retry_timeout:\n # raise TimeOutException()\n\n # create url :: self._generate_query_url(url, params)\n query_url = url\n\n # url encoding of params\n # TODO: use urlencoding here on params\n\n requests_kwargs = requests_kwargs or {}\n final_requests_kwargs = dict(self.requests_kwargs, **requests_kwargs)\n\n # method\n requests_method = self.session.get\n\n try:\n response = requests_method(\n base_url + query_url,\n params=params,\n **final_requests_kwargs)\n\n # temporary, for logging\n if verbose:\n pretty_print_POST(response.request)\n\n except requests.exceptions.Timeout:\n raise TimeOutException()\n except Exception as e:\n raise TransportError(e)\n\n result = self._get_body(response)\n\n return result",
"def _make_request(self, method: str, params: Dict) -> Dict:\n\n # Define a new session.\n request_session = requests.Session()\n request_session.verify = True\n\n # Define a new request.\n request_request = requests.Request(\n method=method.upper(),\n url=self.bea_url,\n params=params\n ).prepare()\n\n # Send the request.\n response: requests.Response = request_session.send(\n request=request_request\n )\n\n # Close the Session\n request_session.close()\n\n print(response.url)\n\n # If the response is OK then return it.\n if response.ok and self._format == 'JSON':\n return response.json()\n elif response.ok and self._format == 'XML':\n return response.text\n else:\n raise requests.ConnectionError()",
"def _make_request(self):\n response = urllib2.urlopen(\n url=self.api_url,\n data=self._get_request_data()\n )\n content = response.read()\n return json.loads(content.decode('utf8'))",
"def request(self, base_url, path_components, params, method='GET', headers=None, raw_stream=False, retries=0):\n if retries < self.max_retries:\n # Add API version to url path if needed\n if base_url == Mixpanel.IMPORT_API or base_url == Mixpanel.BETA_IMPORT_API:\n base = [base_url]\n else:\n base = [base_url, str(Mixpanel.VERSION)]\n\n request_url = '/'.join(base + path_components)\n\n encoded_params = Mixpanel._unicode_urlencode(params)\n\n # Set up request url and body based on HTTP method and endpoint\n if method == 'GET' or method == 'DELETE':\n data = None\n request_url += '?' + encoded_params\n else:\n data = encoded_params\n if base_url == self.IMPORT_API or 'import-people' in path_components or 'import-events' in path_components:\n data += '&verbose=1'\n # Uncomment the line below to log the request body data\n # Mixpanel.LOGGER.debug(method + ' data: ' + data)\n if isinstance(data, str):\n data = data.encode()\n # Uncomment the line below to log the request body data\n # Mixpanel.LOGGER.debug(data)\n Mixpanel.LOGGER.debug(\"Request Method: \" + method)\n Mixpanel.LOGGER.debug(\"Request URL: \" + request_url)\n\n if headers is None:\n headers = {}\n headers['Authorization'] = 'Basic {encoded_secret}'.format(\n encoded_secret=base64.b64encode((self.api_secret + ':').encode()).decode())\n request = urllib.request.Request(request_url, data, headers)\n Mixpanel.LOGGER.debug(\"Request Headers: \" + json.dumps(headers))\n # This is the only way to use HTTP methods other than GET or POST with urllib2\n if method != 'GET' and method != 'POST':\n request.get_method = lambda: method\n\n try:\n response = urllib.request.urlopen(request, timeout=self.timeout)\n Mixpanel.LOGGER.debug('response')\n Mixpanel.LOGGER.debug(response)\n if raw_stream and base_url == Mixpanel.RAW_API:\n return response\n except urllib.error.HTTPError as e:\n Mixpanel.LOGGER.warning('The server couldn\\'t fulfill the request.')\n Mixpanel.LOGGER.warning('Error code: {}'.format(e.code))\n Mixpanel.LOGGER.warning('Reason: {}'.format(e.reason))\n if hasattr(e, 'read'):\n Mixpanel.LOGGER.warning('Response: {}'.format(e.read()))\n if e.code >= 500:\n # Retry if we get an HTTP 5xx error\n Mixpanel.LOGGER.warning(\"Attempting retry #\" + str(retries + 1))\n self.request(base_url, path_components, params, method=method, headers=headers,\n raw_stream=raw_stream, retries=retries + 1)\n except urllib.error.URLError as e:\n Mixpanel.LOGGER.warning('We failed to reach a server.')\n Mixpanel.LOGGER.warning('Reason: {}'.format(str(e.reason)))\n if hasattr(e, 'read'):\n Mixpanel.LOGGER.warning('Response: {}'.format(e.read()))\n Mixpanel.LOGGER.warning(\"Attempting retry #\" + str(retries + 1))\n self.request(base_url, path_components, params, method=method, headers=headers, raw_stream=raw_stream,\n retries=retries + 1)\n except SSLError as e:\n if e.message == 'The read operation timed out':\n Mixpanel.LOGGER.warning('The read operation timed out.')\n self.timeout = self.timeout + 30\n Mixpanel.LOGGER.warning(\n 'Increasing timeout to ' + str(self.timeout) + ' and attempting retry #' + str(retries + 1))\n self.request(base_url, path_components, params, method=method, headers=headers,\n raw_stream=raw_stream, retries=retries + 1)\n else:\n raise\n\n else:\n try:\n # If the response is gzipped we go ahead and decompress\n if response.info().get('Content-Encoding') == 'gzip':\n buf = io.StringIO(response.read())\n f = gzip.GzipFile(fileobj=buf)\n response_data = f.read()\n else:\n response_data = response.read()\n return response_data.decode()\n except IncompleteRead as e:\n Mixpanel.LOGGER.warning(\"Response data is incomplete. Attempting retry #\" + str(retries + 1))\n self.request(base_url, path_components, params, method=method, headers=headers,\n raw_stream=raw_stream, retries=retries + 1)\n else:\n Mixpanel.LOGGER.warning(\"Maximum retries reached. Request failed. Try again later.\")\n raise BaseException",
"def request(self, verb, address, params=None, data=None):\n return BWUser.bare_request(verb=verb, address_root=self.api_url,\n address_suffix=address,\n access_token=self.token,\n params=params or dict(),\n data=data or dict())",
"def make_HTTP_request(self, method, url, body, headers, callback=None):\r\n self.push_HTTP_request(method, url, body, headers, callback)\r\n self.pop_response()",
"def make_request(url, params, auth=None, data=None, contentType=None):\n #print 'make_request'\n\n # Import Gevent and monkey patch\n #import gevent\n from gevent import monkey\n monkey.patch_all()\n\n # Import IO Libraries\n import urllib\n import urllib2\n\n if params:\n url = url + '?' + urllib.urlencode(params)\n\n #print url\n #print data\n #print auth\n #print contentType\n\n req = urllib2.Request(url, data=data)\n\n if auth:\n req.add_header('AUTHORIZATION', 'Basic ' + auth)\n\n if contentType:\n req.add_header('Content-type', contentType)\n else:\n if data:\n req.add_header('Content-type', 'text/xml')\n\n\n return urllib2.urlopen(req)",
"def http_request(self, method, path, data=None, params=None):\n\n s = Session()\n url = urljoin(self.BASE_URL, path)\n full_url = url\n try:\n full_url = full_url + \"?\" + urlencode(params)\n except:\n pass\n\n headers = self.request_headers(method, full_url)\n\n req = Request(\n method,\n url,\n headers=headers,\n data=data,\n params=params\n )\n prepped = req.prepare()\n resp = s.send(prepped, timeout=self.timeout)\n if resp.status_code == 429:\n raise errors.APIRateLimitError(\"Threat Stack API rate limit exceeded\")\n else:\n return self.handle_response(resp)",
"def _make_request(self):\n try:\n self.response = requests.request(\n method=self.method,\n url=self.url,\n params=self.params,\n data=self.data,\n )\n\n logger.debug(f\"Request URL: {self.response.url}\")\n\n self.response.raise_for_status()\n\n # wrap all `requests` library error and serve as custom application error\n except RequestException as e:\n logger.error(e.__str__(), exc_info=True)\n raise ExternalAPIError(\n \"Error while communication with External API\"\n )",
"def send_get(self, api_url, query=None):\n resp = requests.get(self.base_url + api_url, params=query)\n\n return resp",
"def _make_request(self, method, path, **kwargs):\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'User-Agent': USER_AGENT,\r\n }\r\n headers.update(kwargs.get('headers', {}))\r\n kwargs['headers'] = headers\r\n kwargs['auth'] = self.auth\r\n\r\n url = '/'.join((self.endpoint, 'v1', self.account_id, path))\r\n resp = requests.request(method, url, **kwargs)\r\n resp.raise_for_status()\r\n return resp",
"def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = HTTP_1_1,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)",
"def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = b'HTTP/1.1',\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)"
]
| [
"0.6721595",
"0.6096072",
"0.6095601",
"0.60827345",
"0.60781235",
"0.6015352",
"0.5985145",
"0.59460795",
"0.5938586",
"0.5925046",
"0.5918472",
"0.5911465",
"0.5895516",
"0.5888817",
"0.5880682",
"0.5820867",
"0.5807918",
"0.57948893",
"0.57811356",
"0.5774148",
"0.5771573",
"0.5768816",
"0.5768315",
"0.57611936",
"0.57547414",
"0.5750689",
"0.57261395",
"0.5722344",
"0.566236",
"0.56444085"
]
| 0.6212627 | 1 |
Returns a query string of name/value pairs. | def _create_query_str(data):
params = []
for name, value in data.items():
params.append(name + '=' + str(value))
return '?' + '&'.join(params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_query_string(self):\n \n query_items = {}\n \n for key, val in self.__dict__.iteritems():\n if not key.startswith('_'):\n query_items[key] = val.encode('utf-8')\n \n return urllib.urlencode(query_items)",
"def get_query_string(self):\r\n pass",
"def query_dict(self):\r\n if self._query_dict is None:\r\n def _split(param):\r\n p = param.split('=')\r\n return (unquote_plus(p[0]),\r\n unquote_plus('='.join(p[1:])))\r\n self._query_dict = dict(_split(p) for p in self.query.split('&')\r\n if p)\r\n return self._query_dict",
"def to_query_str(params):\n if not params:\n return ''\n\n # PERF: This is faster than a list comprehension and join, mainly\n # because it allows us to inline the value transform.\n query_str = '?'\n for k, v in params.items():\n if v is True:\n v = 'true'\n elif v is False:\n v = 'false'\n else:\n v = str(v)\n\n query_str += k + '=' + v + '&'\n\n return query_str[:-1]",
"def get_query_params(query):\n params = {}\n if query:\n delim = \"&\"\n if \"&\" not in query and \";\" in query:\n delim = \";\"\n for k_v in query.split(delim):\n k, v = k_v, \"\"\n if \"=\" in k_v:\n k, v = k_v.split(\"=\")\n params[k] = v\n return params",
"def query_params(self):\n return self.request._request.GET",
"def query(self):\n return self.event.get('queryStringParameters', dict())",
"def _get_query_part(params: dict) -> str:\n params_cleaned = {k: v for k, v in params.items() if v is not None}\n return ('?' + urlencode(params_cleaned, quote_via=quote, safe=\"/,\")) if params_cleaned else \"\"",
"def _GetQueryParams(options):\n values = {'host': options.host,\n 'port': options.port,\n 'user': options.user,\n 'name': options.name\n }\n if options.email:\n values['email'] = options.email\n if options.revision:\n values['revision'] = options.revision\n if options.root:\n values['root'] = options.root\n if options.bot:\n values['bot'] = options.bot\n if options.patch:\n values['patch'] = options.patch\n return values",
"def query_params(self):\n path = self._get_id_path('query_params')\n \n response = self._GET(path)\n self._set_attrs_to_values({'query_params': response})\n return response",
"def build_query_string(params: Optional[Dict[str, Any]] = None) -> str:\n if params is None:\n return ''\n components = []\n for key, value in params.items():\n if isinstance(value, (list, tuple, set)):\n for v in value:\n c = '='.join([key, quote_plus(str(v))])\n components.append(c)\n else:\n c = '='.join([key, quote_plus(str(value))])\n components.append(c)\n if len(components) > 0:\n return '?{}'.format('&'.join(components))\n return ''",
"def parseQueryString():\n\tqs = cgi.FieldStorage()\n\treturn({'char': qs.getvalue('char'), 'zone': qs.getvalue('zone')})",
"def create_query_sting(param_dict):\n params = \"&\".join([f\"{key}={value}\" for key, value in param_dict.items()])\n return params.replace(\"#\", \"%23\")",
"def query_string(source=None, **kwargs):\n q = QueryDict('', True)\n if source:\n q.update(source)\n for k, v in kwargs.items():\n if v not in ['']:\n q.update({k: v})\n if q:\n return \"{}\".format('?' + q.urlencode())\n return q.urlencode()",
"def getParsedQueryString(self):\n return cgi.parse_qs(self.query_string)",
"def queryparams(*args, **kwargs):\n args = dict(args)\n args.update(kwargs)\n return urllib.urlencode(args)",
"def urlencode(query):\n\n if hasattr(query, 'items'):\n # mapping objects\n query = query.items()\n l = []\n for k, v in query:\n k = quote_plus(k)\n if isinstance(v, basestring):\n v = quote_plus(v)\n l.append(k + '=' + v)\n else:\n v = quote_plus(unicode(v))\n l.append(k + '=' + v)\n return '&'.join(l)",
"def encoded_query_str(request):\n return updated_query_str(request)",
"def GetQueryString(self):\n \n rdict = {-1: [], 1: []}\n for c_w_coeff in self.reactants:\n c = numpy.abs(c_w_coeff.coeff)\n s = numpy.sign(c_w_coeff.coeff)\n if s == 0:\n continue\n if c == 1:\n rdict[s].append(c_w_coeff.GetName())\n else:\n rdict[s].append('%g %s' % (c, c_w_coeff.GetName()))\n \n return '%s = %s' % (' + '.join(rdict[-1]), ' + '.join(rdict[1]))",
"def query_string_get(request, name, default_value=_NOT_SET, validator=None):\n return _XXX_get(request.rel_url.query, name, default_value, validator)",
"def get_query_string(p, new_params=None, remove=None):\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n\n for r in remove:\n for k in p.keys():\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if k in p and v is None:\n del p[k]\n elif v is not None:\n p[k] = v\n return mark_safe(\n '?' + '&'.join(\n [u'%s=%s' % (k, v) for k, v in p.items()]\n ).replace(' ', '%20')\n )",
"def inspect_query(querystring: str) -> dict:\n return _parse_query(querystring)",
"def query_string(context, add=None, remove=None):\n # Written as an inclusion tag to simplify getting the context.\n add = string_to_dict(add)\n remove = string_to_list(remove)\n params = dict(context['request'].GET.items())\n response = get_query_string(params, add, remove)\n return {'response': response}",
"def _build_param_request(self):\n search_params = []\n for param in self.params:\n # print(param)\n if self.params[param] is not None:\n search_params.append(param + '={}'.format(self.params[param]))\n search_params = '&' + '&'.join(search_params)\n return search_params",
"def toQueryString(self):\n return self.__str__()",
"def get_site_info_query_params(self, options={}):\n query_string_values = {}\n\n query_string_values['app_id'] = options['app_id'] if 'app_id' in options else self.app_id\n\n query_string_values['cache_ok'] = options['cache_ok'] if 'cache_ok' in options else self.cache_ok\n\n query_string_values['full_render'] = options['full_render'] if 'full_render' in options else self.full_render\n\n query_string_values['version'] = options['version'] if 'version' in options else self.version\n\n return query_string_values",
"def query_options_to_url(self):\n return '&'.join(['$%s=%s' % (key, value) for (key, value) in self.query_options.items()])",
"def _get_query(request):\n query = request.GET.get(\"query\", \"\")\n date = request.GET.get(\"date\", \"\")\n timestamp = request.GET.get(\"timestamp\", None)\n sort = request.GET.get(\"sort\", \"top\").lower()\n filter = request.GET.get(\"filter\", \"following\").lower()\n\n if timestamp:\n t = parse(timestamp, ignoretz=True)\n timestamp = pytz.utc.localize(t)\n else:\n timestamp = timezone.now()\n\n start_time = ''\n end_time = ''\n\n if date:\n start_time, end_time = DateRangeParser().parse(date)\n\n get_dict = {\n \"query\": query,\n \"filter\": filter,\n \"sort\": sort,\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"username\": request.GET.get(\"username\", \"\"),\n \"orderBy\": request.GET.get(\"orderBy\", \"start_time\"),\n \"direction\": request.GET.get(\"direction\", \"\"),\n \"template\": request.GET.get(\"template\", \"\"),\n \"type\": request.GET.get(\"type\", \"\"),\n \"page\": request.GET.get(\"page\", 1),\n 'timestamp': timestamp,\n }\n\n return get_dict, query, date, sort, filter",
"def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str",
"def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val\n\n return results"
]
| [
"0.6976844",
"0.67801315",
"0.6587585",
"0.6399101",
"0.63933206",
"0.6382786",
"0.6369876",
"0.6252732",
"0.6232739",
"0.6229379",
"0.62207365",
"0.620232",
"0.6188986",
"0.6145918",
"0.6139969",
"0.6119223",
"0.61142987",
"0.6113928",
"0.6108501",
"0.60919887",
"0.60715574",
"0.6034732",
"0.6026599",
"0.59800464",
"0.5970455",
"0.59125566",
"0.58986974",
"0.58819747",
"0.585021",
"0.58439076"
]
| 0.7345456 | 0 |
Returns signed request using the HMAC SHA512 algorithm. | def _sign_request(secret, method, url, timestamp, content_hash=None):
message = f'{timestamp}{url}{method}{content_hash}'
return hmac.new(secret.encode('utf-8'), message.encode('utf-8'), hashlib.sha512).hexdigest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign_hmac_sha1(client, request):\n base_string = generate_signature_base_string(request)\n return hmac_sha1_signature(\n base_string, client.client_secret, client.token_secret)",
"def HmacSha512(key: Union[bytes, str],\n data: Union[bytes, str]) -> bytes:\n return hmac.new(AlgoUtils.Encode(key), AlgoUtils.Encode(data), hashlib.sha512).digest()",
"def sha512(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha512\")",
"def _sha512(data):\r\n return hashlib.sha512(data).hexdigest()",
"def f(data=None):\n\n hsh = SHA512.new()\n hsh.update(b\"1\")\n hsh.update(data)\n return hsh",
"def get_sha512(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha512_from_stream(i)",
"def get_signed(self, **payload):\n param = ''\n for k in payload:\n param += '&' + k + '=' + str(payload[k])\n param = param.lstrip('&')\n signature = hmac.new(self.secret, param, digestmod=hashlib.sha256).hexdigest()\n\n return signature",
"def _gen_api_sig(self, endpoint: str) -> str:\n return hmac.new(self._api_secret.encode(),\n endpoint.encode(),\n hashlib.sha512).hexdigest()",
"def _sha512(message):\n return hashlib.sha512(message).hexdigest()",
"def sign(self, params: Dict[str, Any]) -> str:\n\n assert self.secret is not None, \"A client secret is required to sign requests.\"\n\n query = urlencode(params)\n signature = hmac.new(self.secret.encode(), query.encode(), hashlib.sha512)\n\n return signature.hexdigest()",
"def sign(self, request, consumer, token):\r\n key, raw = self.signing_base(request, consumer, token)\r\n hashed = hmac.new(key, raw, sha)\r\n # Calculate the digest base 64.\r\n return binascii.b2a_base64(hashed.digest())[:-1]",
"def make_hmac(self, msg):\r\n return hmac.new(self.hmacKey, msg, sha256).digest()",
"def sha512(s: str) -> str:\n return hashlib.sha512(s.encode()).hexdigest()",
"def xml_get_sha512(xml, secret):\n xml_string = xml_to_string(xml, encode_base64=False) + secret\n return hashlib.sha512(xml_string).hexdigest()",
"def sign(allocate_quota_request):\n if not isinstance(allocate_quota_request, sc_messages.AllocateQuotaRequest):\n raise ValueError(u'Invalid request')\n op = allocate_quota_request.allocateOperation\n if op is None or op.methodName is None or op.consumerId is None:\n logging.error(u'Bad %s: not initialized => not signed', allocate_quota_request)\n raise ValueError(u'allocate_quota request must be initialized with an operation')\n md5 = hashlib.md5()\n md5.update(op.methodName.encode('utf-8'))\n md5.update(b'\\x00')\n md5.update(op.consumerId.encode('utf-8'))\n if op.labels:\n signing.add_dict_to_hash(md5, encoding.MessageToPyValue(op.labels))\n for value_set in op.quotaMetrics:\n md5.update(b'\\x00')\n md5.update(value_set.metricName.encode('utf-8'))\n for mv in value_set.metricValues:\n metric_value.update_hash(md5, mv)\n\n md5.update(b'\\x00')\n return md5.digest()",
"def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()",
"def sign_plaintext(client, request):\n return plaintext_signature(client.client_secret, client.token_secret)",
"def hash_hmac( self, msg ):\n result = hmac.new( self.secret, msg, hashlib.sha512 )\n return result.hexdigest()",
"def sign_request(request, key, iv):\n hashed_request = SHA1.new(bytes(json.dumps(request), \"ASCII\"))\n\n cipher = AES.new(b16decode(key), AES.MODE_CBC, b16decode(iv))\n ciphertext = cipher.encrypt(pad(hashed_request.digest(), 16))\n\n return b64encode(ciphertext)",
"def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):\r\n message = signing_format_message(method, headers_dict, body_dict)\r\n\r\n # hmac needs a byte string for it's starting key, can't be unicode.\r\n hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)\r\n signature = binascii.b2a_base64(hashed.digest()).rstrip('\\n')\r\n authorization_header = \"SSI {}:{}\".format(access_key, signature)\r\n\r\n message += '\\n'\r\n return message, signature, authorization_header",
"def _hmac_create(self, password, shared_key):\n hmac_value = base64.b64encode(hmac.new(\n smart_str(shared_key),\n smart_str(password),\n hashlib.sha512).digest())\n return hmac_value",
"def _generate_signature(self):\n self.logger.debug(f'body payload {self.body_payload}')\n return hmac.new(self.__decrypted_secret, self.body_payload, hashlib.sha1).hexdigest()",
"def _hmac_create(self, password, shared_key):\n hmac_value = base64.b64encode(hmac.new(\n smart_str(shared_key),\n smart_str(password),\n hashlib.sha512).digest())\n return hmac_value",
"def Sign(self):\n return self.hmac.digest()",
"def _sign(self, path, nonce, data):\n url = '/{0}/{1}'.format(self._version, path)\n urlencoded_data = urllib.urlencode(data)\n msg = url + hashlib.sha256(str(nonce) + urlencoded_data).digest()\n signature = hmac.new(base64.b64decode(self._secret), msg,\n hashlib.sha512)\n return base64.b64encode(signature.digest())",
"def bitmex_signature(apiSecret, verb, url, nonce, postdict=None):\n data = ''\n if postdict:\n # separators remove spaces from json\n # BitMEX expects signatures from JSON built without spaces\n data = json.dumps(postdict, separators=(',', ':'))\n parsedURL = urllib.parse.urlparse(url)\n path = parsedURL.path\n if parsedURL.query:\n path = path + '?' + parsedURL.query\n # print(\"Computing HMAC: %s\" % verb + path + str(nonce) + data)\n message = (verb + path + str(nonce) + data).encode('utf-8')\n print(\"Signing: %s\" % str(message))\n\n signature = hmac.new(apiSecret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest()\n print(\"Signature: %s\" % signature)\n return signature",
"def get_sha512_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m = hashlib.sha512()\n return calc_hash(src, m)",
"def signed_request(self, method, api_url, **payload):\n\n r_url = self.base_url + api_url\n payload['timestamp'] = self.get_server_time()\n payload['signature'] = self.get_signed(**payload)\n\n headers = {\n 'X-MBX-APIKEY': self.key,\n }\n\n try:\n r = requests.request(method, r_url, headers=headers, params=payload)\n r.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n print(err.text)\n sys.exit(1)\n if r.status_code == 200:\n return r.json()",
"def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()",
"def g(data_1=None, data_2=None):\n\n hsh = SHA512.new()\n hsh.update(b\"2\")\n hsh.update(data_1)\n hsh.update(data_2)\n return hsh"
]
| [
"0.6270291",
"0.6157462",
"0.59586555",
"0.59178203",
"0.5838815",
"0.5823104",
"0.5810403",
"0.57807946",
"0.57741207",
"0.5754222",
"0.57061774",
"0.56298774",
"0.5580467",
"0.55793107",
"0.5564648",
"0.5523409",
"0.5509964",
"0.54881704",
"0.54717016",
"0.5429446",
"0.5428879",
"0.54150784",
"0.5399535",
"0.53772336",
"0.5376097",
"0.5367579",
"0.5347684",
"0.5314167",
"0.5313786",
"0.53131896"
]
| 0.6462081 | 0 |
Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but different orders will return true | def sort_json_policy_dict(policy_dict):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_ikepolicy_sort(self):\r\n resources = \"ikepolicies\"\r\n cmd = ikepolicy.ListIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def test_list_ipsecpolicy_sort(self):\r\n resources = \"ipsecpolicies\"\r\n cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])",
"def test__ApplicationCommandPermissionOverwrite__sort():\n entity_0 = ApplicationCommandPermissionOverwrite(\n allow = True,\n target = (ApplicationCommandPermissionOverwriteTargetType.role, 202302210014),\n )\n entity_1 = ApplicationCommandPermissionOverwrite(\n allow = True,\n target = (ApplicationCommandPermissionOverwriteTargetType.user, 202302210015),\n )\n entity_2 = ApplicationCommandPermissionOverwrite(\n allow = True,\n target = (ApplicationCommandPermissionOverwriteTargetType.channel, 202302210016),\n )\n entity_3 = ApplicationCommandPermissionOverwrite(\n allow = True,\n target = (ApplicationCommandPermissionOverwriteTargetType.role, 202302210017),\n )\n entity_4 = ApplicationCommandPermissionOverwrite(\n allow = True,\n target = (ApplicationCommandPermissionOverwriteTargetType.user, 202302210018),\n )\n entity_5 = ApplicationCommandPermissionOverwrite(\n allow = True,\n target = (ApplicationCommandPermissionOverwriteTargetType.channel, 202302210019),\n )\n \n input = [\n entity_0,\n entity_1,\n entity_2,\n entity_3,\n entity_4,\n entity_5,\n ]\n \n expected_output = [\n entity_0,\n entity_3,\n entity_1,\n entity_4,\n entity_2,\n entity_5,\n ]\n \n vampytest.assert_eq(sorted(input), expected_output)",
"def sort_list_by_president_order(pronoun_proportion_list):\n return sorted(pronoun_proportion_list, key=lambda (k,d,v): (d,k,v))",
"def sort_json(dp):\n def get_sort_key(d, ks: list):\n res = []\n for k in ks:\n if isinstance(d[k], (list, tuple)):\n for v in d[k]:\n res.append(v)\n elif d[k] is None:\n res.append('')\n else:\n res.append(d[k])\n return res\n\n def proc(x, ks):\n return sorted(x, key=partial(get_sort_key, ks=ks))\n\n if 'resources' in dp.keys():\n dp['resources'] = proc(dp['resources'], ['path'])\n\n if 'ddfSchema' in dp.keys():\n schema = dp['ddfSchema']\n for t in ['concepts', 'entities', 'datapoints']:\n if t in schema.keys():\n for v in schema[t]:\n v['resources'] = sorted(v['resources'])\n schema[t] = proc(schema[t], ['value', 'primaryKey'])\n\n dp['ddfSchema'] = schema\n\n return dp",
"def check_element_order(context_action_dict):\n for question in context_action_dict:\n elements = context_action_dict[question]\n context_action_dict[question] = sorted(elements, key=lambda x: x['indexes'][0])\n return context_action_dict",
"def checkSort(input_jsonl: str, sort_keys: List[str], depths: List[int], combination: str = \"x*y\") -> None:\n\n assert os.path.exists(input_jsonl)\n\n print(\"Reading input ... \", end=\"\", flush=True)\n\n with open(input_jsonl, \"r\") as f:\n\n lines = f.readlines()\n docDicts = [json.loads(s) for s in lines]\n\n print(\"Checking sort ... \", flush=True)\n\n doc_weights = _getdocweights(docDicts, sort_keys, depths, combination)\n\n reverse_sorteddocs = all(doc_weights[i] >= doc_weights[i + 1] for i in range(len(doc_weights) - 1))\n\n if reverse_sorteddocs:\n print(\"Docs are sorted in decreasing order\")\n return None\n else:\n sorteddocs = all(doc_weights[i] <= doc_weights[i + 1] for i in range(len(doc_weights) - 1))\n if sorteddocs:\n print(\"Docs are sorted in increasing order\")\n return None\n else:\n print(\"Docs are NOT sorted\")\n return None",
"def test_sort_by_similarity(self):\n expected_ids = [id_ for id_, weight in sorted(self.id_weight_map.items(),\n key=lambda item: item[1])\n if weight >= Assessment.similarity_options[\"threshold\"]]\n\n query = [{\n \"object_name\": \"Assessment\",\n \"type\": \"ids\",\n \"order_by\": [{\"name\": \"__similarity__\"}],\n \"filters\": {\n \"expression\": {\n \"op\": {\"name\": \"similar\"},\n \"object_name\": \"Assessment\",\n \"ids\": [str(self.assessment.id)],\n },\n },\n }]\n response = self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n )\n\n # note that in our test data every similar object has a different weight;\n # the order of objects with same weight is undefined after sorting\n self.assertListEqual(\n json.loads(response.data)[0][\"Assessment\"][\"ids\"],\n expected_ids,\n )",
"def test_sort(self):\n # Create a new REANATemplate with an empty workflow specification and\n # a list of five parameters\n template = REANATemplate(\n workflow_spec={},\n parameters=[\n pd.parameter_declaration('A', index=1),\n pd.parameter_declaration('B'),\n pd.parameter_declaration('C'),\n pd.parameter_declaration('D', index=2),\n pd.parameter_declaration('E', index=1)\n ],\n validate=True\n )\n # Get list of sorted parameter identifier from listing\n keys = [p.identifier for p in template.list_parameter()]\n self.assertEqual(keys, ['B', 'C', 'A', 'E', 'D'])",
"def compare_json(js1, js2):\n return js1.items() <= js2.items()",
"def _order_params(data):\n data = dict(filter(lambda el: el[1] is not None, data.items()))\n has_signature = False\n params = []\n for key, value in data.items():\n if key == 'signature':\n has_signature = True\n else:\n params.append((key, str(value)))\n # sort parameters by key\n params.sort(key=itemgetter(0))\n if has_signature:\n params.append(('signature', data['signature']))\n return params",
"def _compile_order(self, orderings):\n to_apply = []\n for o in orderings:\n descending = False\n if o.startswith(\"-\"):\n descending = True\n o = o[1:]\n to_apply.append((o, descending))\n\n def compare(res1, res2):\n # res1 and res2 are attribute dictionaries\n # Apply each comparison in order\n # Note that we consider None to be bigger than anything else (i.e.\n # in an ascending sort, None comes after everything else)\n for attr, descending in to_apply:\n if descending:\n x, y = res2.get(attr, []), res1.get(attr, [])\n else:\n x, y = res1.get(attr, []), res2.get(attr, [])\n if x < y:\n return -1\n elif x > y:\n return 1\n return 0\n\n return compare",
"def custom_sort(pseudo):\n # Unpack\n pred = pseudo[\"pred_list\"]\n lab = pseudo[\"lab_list\"]\n name = pseudo[\"name_list\"]\n \n # Sort\n sorted_list = list(zip(pred, lab, name))\n sorted_list.sort(key=lambda x: x[0], reverse=True)\n \n pred_sorted = [row[0] for row in sorted_list]\n lab_sorted = [row[1] for row in sorted_list]\n name_sorted = [row[2] for row in sorted_list]\n \n # Re-pack\n pseudo = {\n \"pred_list\": pred_sorted,\n \"lab_list\": lab_sorted,\n \"name_list\": name_sorted\n }\n \n return pseudo",
"def test_onlyP(self):\n data = {\n 'toSort' :[{'d': '01:06:2015', 'originalOrder': 1, 'p': 250, 'r': 1},\n {'d': '15:06:2015', 'originalOrder': 2, 'p': 200, 'r': 2},\n {'d': '02:06:2015', 'originalOrder': 3, 'p': 100, 'r': 2}],\n 'dPriority': 0,\n 'pPriority': 1,\n 'rPriority': 0\n }\n sortedData = jsonRequest(data)\n self.assertEquals(sortedData[0]['originalOrder'], 3)\n self.assertEquals(sortedData[1]['originalOrder'], 2)\n self.assertEquals(sortedData[2]['originalOrder'], 1)",
"def get_policies():\r\n policy = policies.values()\r\n return policy",
"def dic_sort(list_of_dicts, key):\n for passnum in range(len(list_of_dicts) - 1, 0, -1):\n is_sorted = True\n for idx in range(passnum):\n if list_of_dicts[idx][key] > list_of_dicts[idx + 1][key]:\n temp = list_of_dicts[idx]\n list_of_dicts[idx] = list_of_dicts[idx + 1]\n list_of_dicts[idx + 1] = temp\n is_sorted = False\n if is_sorted:\n return",
"def sort_probs(probs_list):\n return sorted(probs_list, key=lambda x: x[1])",
"def test_key_order_property_sorter(\n properties: list[str], expected: list[str]\n ) -> None:\n result = sorted(properties, key=functools.cmp_to_key(task_property_sorter))\n assert expected == result",
"async def test_txn_list_sorted_by_many_keys(self):\n paging = Mocks.make_paging_response(0, 3)\n transactions = Mocks.make_txns('2', '1', '0')\n self.stream.preset_response(head_id='2', paging=paging, transactions=transactions)\n\n response = await self.get_assert_200(\n '/transactions?sort=-header_signature,payload.length')\n page_controls = Mocks.make_paging_controls()\n sorting = (Mocks.make_sort_controls('header_signature', reverse=True) +\n Mocks.make_sort_controls('payload', compare_length=True))\n self.stream.assert_valid_request_sent(\n paging=page_controls,\n sorting=sorting)\n\n self.assert_has_valid_head(response, '2')\n self.assert_has_valid_link(response,\n '/transactions?head=2&sort=-header_signature,payload.length')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], '2', '1', '0')",
"def test__Oauth2User__sort():\n user_id_0 = 202302040027\n user_id_1 = 202302040028\n user_id_2 = 202302040029\n \n user_0 = Oauth2User._create_empty(user_id_0)\n user_1 = Oauth2User._create_empty(user_id_1)\n user_2 = Oauth2User._create_empty(user_id_2)\n \n to_sort = [\n user_1,\n user_2,\n user_0,\n ]\n \n expected_output = [\n user_0,\n user_1,\n user_2,\n ]\n \n vampytest.assert_eq(sorted(to_sort), expected_output)",
"def _order_json(self, json_string):\n\n if isinstance(json_string, dict):\n return sorted((k, self._order_json(v)) for k, v in json_string.items())\n if isinstance(json_string, list):\n return sorted(self._order_json(x) for x in json_string)\n else:\n return json_string",
"def normaliseandsort(slu_hyps):\n result = []\n sorted_hyps = slu_hyps.items()\n sorted_hyps.sort(key=lambda x: -x[1])\n total_score = sum(slu_hyps.values())\n for hyp, score in sorted_hyps:\n if total_score == 0:\n result.append({\"score\": 0, \"slu-hyp\": json.loads(hyp)})\n else:\n result.append({\"score\": min(1.0, score/total_score), \"slu-hyp\": json.loads(hyp)})\n return result",
"async def test_txn_list_sorted_with_nested_keys(self):\n paging = Mocks.make_paging_response(0, 3)\n transactions = Mocks.make_txns('0', '1', '2')\n self.stream.preset_response(head_id='2', paging=paging, transactions=transactions)\n\n response = await self.get_assert_200(\n '/transactions?sort=header.signer_pubkey')\n page_controls = Mocks.make_paging_controls()\n sorting = Mocks.make_sort_controls('header', 'signer_pubkey')\n self.stream.assert_valid_request_sent(\n paging=page_controls,\n sorting=sorting)\n\n self.assert_has_valid_head(response, '2')\n self.assert_has_valid_link(response,\n '/transactions?head=2&sort=header.signer_pubkey')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], '0', '1', '2')",
"def sorting(recommendation: List[Tuple[str, int]]) -> None:\n \n for tup in range(len(recommendation)):\n score = recommendation[tup][1]\n alpha = recommendation[tup][0]\n for j in range(tup + 1, len(recommendation)):\n if recommendation[j][1] > score or \\\n (recommendation[j][1] == score and recommendation[j][0] < alpha):\n recommendation[j], recommendation[tup] = recommendation[tup], \\\n recommendation[j]",
"def _sorted_members(injson: dict) -> list:\n members = [AocMember.member_from_json(injson[member]) for member in injson]\n members.sort(key=lambda x: x.local_score, reverse=True)\n\n return members",
"def compare_json_to_json(json1, json2): # , json_object, filters):\n return ordered(json1) == ordered(json2)",
"def policy_json_to_txt(policy_json):\n ret = []\n if policy_json and policy_json.get('version', None) == '1_0':\n for item in policy_json['rules']:\n line = policy_line_format.format(gate=item['gate'], trigger=item['trigger'], action=item['action'])\n if 'params' in item:\n line += ':'\n for param in item['params']:\n line += param_format.format(name=param['name'], value=param['value'])\n ret.append(line)\n\n return ret",
"def test_list_namespaced_policy(self):\n pass",
"def __qualitaetsListeProteins(self):\n rv = []\n pam30_sortierbar = {}\n for key in pam30.keys():\n pam30_sortierbar[str(pam30[key]) + \";\" + ''.join(key)] = pam30[key]\n if key[0] != key[1]:\n pam30_sortierbar[\n str(pam30[key]) + \";\" + ''.join((key[1], key[0]))\n ] = pam30[key]\n sorted_keys = list(pam30_sortierbar.keys())\n sorted_keys.sort(key=lambda k: int(k.split(\";\")[0]), reverse=True)\n # debugging kept for historical reasons\n # for key in iter(sorted_keys):\n # print(key.split(\";\")[1] + \" has score \" + str(pam30_sortierbar[key]))\n for key in iter(sorted_keys):\n rv.append(key.split(\";\")[1])\n return(rv)",
"def inline_policies_json(self):\n policies = {}\n for policy in self.inline_policies:\n policies[policy.policy_id] = policy.json\n return policies"
]
| [
"0.61905175",
"0.6146387",
"0.5959015",
"0.5510478",
"0.54657024",
"0.5459662",
"0.5401011",
"0.5387481",
"0.5371819",
"0.53341407",
"0.5314138",
"0.53106123",
"0.5293855",
"0.5270968",
"0.5247466",
"0.5171823",
"0.51387554",
"0.5132049",
"0.5127369",
"0.5119485",
"0.5113474",
"0.5106762",
"0.50890803",
"0.5084728",
"0.5068791",
"0.50678927",
"0.5051701",
"0.50473446",
"0.50468725",
"0.5043034"
]
| 0.79705065 | 0 |
Rainbow movie theater light style chaser animation. | def theaterChaseRainbow(strip, wait_ms=30):
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, wheel((i+j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def theaterChaseRainbow(strip, wait_ms=50):\n\tfor j in range(256):\n\t\tfor q in range(3):\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, wheel((i+j) % 255))\n\t\t\tstrip.show()\n\t\t\ttime.sleep(wait_ms/1000.0)\n\t\t\tfor i in range(0, strip.numPixels(), 3):\n\t\t\t\tstrip.setPixelColor(i+q, 0)",
"def theaterChaseRainbow(strip, wait_ms=50):\r\n for j in range(256):\r\n for q in range(3):\r\n for i in range(0, strip.numPixels(), 3):\r\n strip.setPixelColor(i+q, wheel((i+j) % 255))\r\n strip.show()\r\n time.sleep(wait_ms/1000.0)\r\n for i in range(0, strip.numPixels(), 3):\r\n strip.setPixelColor(i+q, 0)",
"def led_theaterChaseRainbow(strip, wait_ms=25):\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color_wheel((i+j) % 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def theaterChaseRainbow(self, wait_ms=50):\n for j in range(256):\n for q in range(3):\n for i in range(0, self.ring.numPixels(), 3):\n self.ring.setPixelColor(i + q, self.wheel((i + j) % 255))\n self.ring.show()\n time.sleep(wait_ms / 1000.0)\n for i in range(0, self.ring.numPixels(), 3):\n self.ring.setPixelColor(i + q, 0)",
"def rainbowCycle(strip, wait_ms=2, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n #time.sleep(wait_ms/1000.0)",
"def theaterChaseRainbow(strip, state, maxBrightness, wait_ms=50, bling=True):\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i + q, wheel((i + j) % 255))\n if (STATE != state):\n break\n if (STATE != state):\n break\n if bling:\n global CYCLECOUNTER\n CYCLECOUNTER = CYCLECOUNTER + 1\n if CYCLECOUNTER > BLINGDELAY:\n CYCLECOUNTER = 1\n if CYCLECOUNTER == BLINGDELAY:\n strip.setPixelColor(random.randint(0, strip.numPixels()), Color(255, 255, 255))\n brightness = int((LED_BRIGHTNESS * maxBrightness) / 255)\n strip.setBrightness(brightness)\n strip.show()\n time.sleep(wait_ms / 1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i + q, 0)\n if (STATE != state):\n off(strip)\n break",
"def rainbowCycle(strip, wait_ms=20, iterations=5):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel(((i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)",
"def rainbowCycle(strip, wait_ms=20, iterations=5):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)",
"def rainbowCycle(strip, wait_ms=20, iterations=5):\r\n for j in range(256*iterations):\r\n for i in range(strip.numPixels()):\r\n strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\r\n strip.show()\r\n time.sleep(wait_ms/1000.0)",
"def rainbowCycle(strip, wait_ms=1, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)",
"def rainbow(strip, wait_ms=20, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((i+j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)",
"def rainbowCycle(strip, wait_ms=20, iterations=5):\n\tfor j in range(256*iterations):\n\t\tfor i in range(strip.numPixels()):\n\t\t\tstrip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n\t\tstrip.show()\n\t\ttime.sleep(wait_ms/1000.0)",
"def rainbowCycle(wait_ms=20, iterations=1):\n while globals.outroEfeitoRainbow:\n for j in range(256*iterations):\n if not globals.outroEfeitoRainbow:\n off()\n break\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n time.sleep(wait_ms/1000.0)",
"async def rainbow(self, params):\n wait_ms = params.get('wait_ms', 2)\n try:\n while True:\n await self.lights.rainbow_cycle(wait_ms=wait_ms)\n except KeyboardInterrupt:\n pass",
"def rainbow(strip, wait_ms=20, iterations=1):\n\tfor j in range(256*iterations):\n\t\tfor i in range(strip.numPixels()):\n\t\t\tstrip.setPixelColor(i, wheel((i+j) & 255))\n\t\tstrip.show()\n\t\ttime.sleep(wait_ms/1000.0)",
"def rainbow(strip, wait_ms=20, iterations=1):\r\n for j in range(256*iterations):\r\n for i in range(strip.numPixels()):\r\n strip.setPixelColor(i, wheel((i+j) & 255))\r\n strip.show()\r\n time.sleep(wait_ms/1000.0)",
"def rainbowCycle(self, wait_ms=20, iterations=5):\n\t\tstrip = self._strip\n\t\tfor j in range(256*iterations):\n\t\t\tfor i in range(strip.numPixels()):\n\t\t\t\tstrip.setPixelColor(i, self.wheel(((int(i * 256 / strip.numPixels()) + j) & 255)))\n\t\t\tstrip.show()\n\t\t\ttime.sleep(wait_ms/1000.0)",
"def rainbowCycle(strip, state, maxBrightness, wait_ms=20, bling=True):\n for i in range(255):\n for j in range(strip.numPixels()):\n color = ((j * 255) / 300) + i\n while color > 255:\n color -= 255\n strip.setPixelColor(j, wheel(color))\n if (STATE != state):\n break\n if bling:\n global CYCLECOUNTER\n CYCLECOUNTER = CYCLECOUNTER + 1\n if CYCLECOUNTER > BLINGDELAY:\n CYCLECOUNTER = 1\n if CYCLECOUNTER == BLINGDELAY:\n strip.setPixelColor(random.randint(0, strip.numPixels()), Color(255, 255, 255))\n brightness = int((LED_BRIGHTNESS * maxBrightness) / 255)\n strip.setBrightness(brightness)\n strip.show()\n if (STATE != state):\n off(strip)\n break\n time.sleep(wait_ms / 1000.0)",
"def led_rainbowCycle(strip, wait_ms=2, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color_wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)",
"def led_rainbow(strip, wait_ms=2, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color_wheel((i+j) & 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)",
"def breathingRainbow(strip, state, maxBrightness, wait_ms=50):\n global BRIGHTNESS\n direction = 1\n step = 1\n minBreath = 8\n maxBreath = maxBrightness\n\t\n if BRIGHTNESS < minBreath:\n BRIGHTNESS = minBreath\n for j in range(256):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, wheel((i + j) & 255))\n if (STATE != state):\n break\n BRIGHTNESS = BRIGHTNESS + (direction * step)\n if BRIGHTNESS >= maxBreath or BRIGHTNESS < minBreath:\n direction = direction * -1\n strip.setBrightness(BRIGHTNESS)\n if (STATE != state):\n off(strip)\n break\n strip.show()\n time.sleep(wait_ms / 1000.0)",
"def rainbowCycle(self, wait_ms=20, iterations=5):\n\n for j in range(256 * iterations):\n for i in range(self.ring.numPixels()):\n self.ring.setPixelColor(i, self.wheel((int(i * 256 / self.ring.numPixels()) + j) & 255))\n self.ring.show()\n time.sleep(wait_ms / 1000.0)",
"def rainbow_all(self):\n while True:\n for g in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(255, g, 0)\n time.sleep(self.speed)\n\n for r in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(r, 255, 0)\n time.sleep(self.speed)\n\n for b in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(0, 255, b)\n time.sleep(self.speed)\n\n for g in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(0, g, 255)\n time.sleep(self.speed)\n\n for r in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(r, 0, 255)\n time.sleep(self.speed)\n\n for b in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(255, 0, b)\n time.sleep(self.speed)",
"def rainbow_example(rounds=1, delay_secs=0.01):\n set_color('black') # Start with all LED's \"off\"\n update()\n\n saturation = 100 # 0 (grayer) to 100 (full color)\n brightness = 100 # 0 (darker) to 100 (brighter)\n\n for i in range(0, rounds):\n for hue in tuple(range(0, 360)) + tuple(range(360, -1, -1)): # 0..360..0\n color_str = \"hsb({}, {}%, {}%)\".format(hue, saturation, brightness)\n push_color(color_str)\n update()\n sleep(delay_secs)",
"def led_theaterChase(strip, color, wait_ms=50, iterations=5):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def theaterChase(strip, color, wait_ms=50, iterations=10):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)",
"def theaterChase(strip, color, wait_ms=50, iterations=10):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n time.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels(), 3):\n strip.setPixelColor(i+q, 0)",
"def rainbow_cycle(self, wait_ms=1, iterations=1):\n for j in range(255*iterations):\n for i in range(self.np.n):\n pixel_index = (i * 256 // self.np.n) + j\n self.np[i] = wheel(self.np, pixel_index & 255)\n self.np.show()\n time.sleep(wait_ms/1000.0)\n\n return True",
"def theaterChase(strip, color, wait_ms=50, iterations=10):\r\n for j in range(iterations):\r\n for q in range(3):\r\n for i in range(0, strip.numPixels(), 3):\r\n strip.setPixelColor(i+q, color)\r\n strip.show()\r\n time.sleep(wait_ms/1000.0)\r\n for i in range(0, strip.numPixels(), 3):\r\n strip.setPixelColor(i+q, 0)",
"def do_animations(self):\n self.animate_bloop(700, 160, 50)"
]
| [
"0.76151276",
"0.7562134",
"0.73810625",
"0.7269356",
"0.70212775",
"0.6984534",
"0.6916089",
"0.68641406",
"0.68477833",
"0.68404734",
"0.68403065",
"0.6827585",
"0.68145055",
"0.6809809",
"0.67955655",
"0.6782512",
"0.6765697",
"0.66468316",
"0.6621634",
"0.65628374",
"0.6544751",
"0.6480361",
"0.6350565",
"0.63374406",
"0.61892927",
"0.6116769",
"0.60881007",
"0.6074554",
"0.6036822",
"0.6036775"
]
| 0.7606191 | 1 |
List png frame files for a video. | def list_pngs(video_title: str) -> list:
path_to_pngs = os.path.join('frames', video_title)
files = os.listdir(path_to_pngs)
png_files = [f for f in files if os.path.splitext(f)[1] == '.png']
png_files = sorted(png_files, key=lambda s: int(s[5:-4]))
return png_files | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_video_frames(self):\r\n\r\n vid_dir = self._video_dir\r\n vid_frames = [str(img_path) for img_path in\r\n Path(vid_dir).glob('*.jpg')]\r\n if len(vid_frames) == 0:\r\n vid_frames = [str(img_path) for img_path in\r\n Path(vid_dir).glob('*.png')]\r\n list_of_frames = sorted(vid_frames)\r\n\r\n self._vid_frames = [list_of_frames]\r\n\r\n return self._vid_frames",
"def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]",
"def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)",
"def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)",
"def test_video(video_path):\n def get_clips(frames_list, sequence_size=11):\n clips = []\n clip = []\n cnt = 0\n sz = len(frames_list)\n for i in range(0, sz-sequence_size):\n for idx in range(i, i+sequence_size):\n clip.append(frames_list[idx])\n clips.append(clip)\n clip = []\n return clips\n \n all_frames = []\n # loop over all the images in the folder (0.png,1.png,..,199.png)\n dir_path = listdir(video_path)\n dir_path = sorted(dir_path, key=lambda name: int(name[0:-4]))\n for i in dir_path:\n if str(join(video_path, i))[-3:] == \"png\":\n img_path = join(video_path, i)\n all_frames.append(img_path)\n clips = get_clips(frames_list=all_frames, sequence_size=11)\n# clips = get_clips_by_stride(stride=1, frames_list=all_frames, sequence_size=11)\n return clips",
"def extract_frames_from_directory(count, source, destination):\n all_videos = os.listdir(source)\n print(all_videos)\n\n for video in all_videos:\n video_file = source + video # Retrieve a video from the OverHeadPress\n cap = cv2.VideoCapture(video_file) # capturing the video from the given path\n dim = (224, 224)\n\n while cap.isOpened():\n frame_id = cap.get(1) # current frame number\n ret, frame = cap.read()\n if not ret:\n break\n\n # We are capturing at 28 frames per second. \n # If we want to capture every 0.2 seconds we will take every 5 frames\n if frame_id % 8 == 0:\n filename =\"frame%d.jpg\" % count\n count+=1\n resized = cv2.resize(frame, dim)\n cv2.imwrite(destination + filename, resized)\n\n cap.release()\n print (\"Finished processing: \" + video + \". Ended at video: \" + str(count))",
"def extract_frames():\n vc = cv2.VideoCapture(INPUT_FILE)\n c=1\n\n if vc.isOpened():\n rval , frame = vc.read()\n else:\n rval, frame = False, False\n\n while rval:\n # cv2.imwrite((MODIFIED_FRAMES_DIR + 'img' + str(c) + '.jpg'),frame)\n cv2.imwrite((MODIFIED_FRAMES_DIR + str(c) + '.jpg'),frame)\n c = c + 1\n cv2.waitKey(1)\n rval, frame = vc.read()\n vc.release()\n print(\"All frames extracted successfully...\")",
"def split_video(filename):\n # Read video\n cap = cv2.VideoCapture(filename)\n # Make sure video is being read\n if cap.isOpened():\n # If video is being read successfully\n success, frame = cap.read()\n images = []\n while success:\n # Append frames to list\n images.append(frame)\n # Read new frame\n success, frame = cap.read()\n return images",
"def video_to_frames(video, output_base_dir):\n # extract frames from a video and save to directory with the name of the video and file name 'video_name_x.jpg' where\n # x is the frame index\n vidcap = cv2.VideoCapture(video)\n count = 0\n filename = os.path.split(video)[1]\n prefix = os.path.splitext(filename)[0]\n frame_sub_dir = os.path.join(output_base_dir, prefix)\n os.mkdir(frame_sub_dir)\n logger.info(\"created {} folder for frames\".format(frame_sub_dir))\n start = time.time()\n while vidcap.isOpened():\n success, image = vidcap.read()\n if success:\n # Add padding to the frame index. e.g. 1 -> 000001, 10 -> 000010 etc.\n image_name = prefix + '_{0:06d}.jpg'.format(count)\n cv2.imwrite(os.path.join(frame_sub_dir, image_name), image)\n count += 1\n if count % REPORT_STATUS == 0:\n logger.info(\"extracted {} frames. \".format(count))\n logger.info(\"took {:10.4f} seconds to extract {} frames\".format(time.time() - start, REPORT_STATUS))\n start = time.time()\n else:\n break\n cv2.destroyAllWindows()\n vidcap.release()\n logger.info(\"written {} frames for {}\".format(count, filename))\n return frame_sub_dir",
"def generate_video(image_folder, video_name, video_frames_path):\n \n try:\n os.stat(video_frames_path)\n except:\n os.makedirs(video_frames_path)\n \n images = [img for img in os.listdir(image_folder)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\"png\") or\n img.endswith(\"tif\")]\n\n images.sort()\n\n print(images)\n\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n\n height, width, layers = frame.shape\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(video_frames_path + '/' + video_name, fourcc, 1, (width, height))\n\n # Appending the images to the video one by one\n video_frame = np.zeros((height, width, 3), np.uint8)\n for image in images:\n img = cv2.imread(os.path.join(image_folder, image), cv2.IMREAD_UNCHANGED)\n video_frame = overlay_transparent(video_frame, img)\n cv2.imwrite(os.path.join(video_frames_path, image), video_frame)\n video.write(video_frame)\n\n # Deallocating memories taken for window creation\n cv2.destroyAllWindows()\n video.release() # releasing the video generated",
"def videoFromImages(img_dir, out_dir, fps=30):\n onlyfiles = glob.glob(img_dir + \"/*.png\")\n onlyfiles = natsorted(onlyfiles)\n\n fourcc = cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\")\n\n img = cv2.imread(onlyfiles[0])\n vid_out = cv2.VideoWriter(out_dir, fourcc, fps, (img.shape[1], img.shape[0]))\n\n for i in range(len(onlyfiles)):\n img = cv2.imread(onlyfiles[i])\n vid_out.write(img)\n vid_out.release()",
"def getVideoFrames(videoFilePath, startFrameNumber=-1, endFrameNumber=-1):\n frames=[]\n vidcap = cv2.VideoCapture(videoFilePath)\n fps=vidcap.get(cv2.CAP_PROP_FPS)\n totalFrame=vidcap.get(cv2.CAP_PROP_FRAME_COUNT)\n if startFrameNumber==-1:\n startFrameNumber = 0\n if endFrameNumber == -1:\n endFrameNumber = totalFrame - 1\n success,image = vidcap.read()\n count = 0\n success = True\n while success:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = color.rgb2yiq(image).astype(np.float32)\n \n if count<startFrameNumber:\n success,image = vidcap.read()\n count+=1\n continue\n elif count>=endFrameNumber:\n break\n else: \n frames.append(image)\n success,image = vidcap.read()\n count += 1\n frames=np.array(frames)\n \n return fps, frames",
"def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()",
"def findpaths(path):\n print('[INFO] Searching for .png images in ', path)\n frame_paths = []\n frame_to_path_dict = {}\n path_to_frame_dict = {}\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n if name.find('.png') != -1:\n frame_path = os.path.join(root, name)\n # NOTE: may want to change to deal with generic file names\n match = re.search(r'(?P<video_id>\\d+)_(?P<frame_id>\\d+).png', name)\n # video_id = int(match.group('video_id'))\n frame_id = int(match.group('frame_id'))\n frame_paths.append(frame_path)\n frame_to_path_dict[frame_id] = frame_path\n path_to_frame_dict[frame_path] = frame_id\n frame_paths_sorted = sorted(frame_paths, key=lambda x: int(path_to_frame_dict[x]))\n print('[INFO] %i frames located ' % (len(frame_paths)))\n return frame_paths_sorted, frame_to_path_dict, path_to_frame_dict",
"def make_images_from_video(video_name, video_dir, out_dir, limit=None):\n video_path = f\"{video_dir}/{video_name}\"\n video_name = os.path.basename(video_path)\n vidcap = cv2.VideoCapture(video_path)\n print(video_path)\n frame = 0\n while True:\n it_worked, img = vidcap.read()\n if not it_worked:\n break\n frame += 1\n # print(frame)\n image_path = f\"{out_dir}/{video_name}\".replace(\".mp4\", f\"_{frame}.png\")\n success = cv2.imwrite(image_path, img)\n if not success:\n raise ValueError(\"couldn't write image successfully\")\n if limit and frame > limit:\n print(f\"Made maximum: {limit} frames\")\n break",
"def download_frames(\n self,\n frame_ids: Sequence[int],\n *,\n outdir: StrPath = \".\",\n quality: str = \"original\",\n filename_pattern: str = \"frame_{frame_id:06d}{frame_ext}\",\n ) -> Optional[List[Image.Image]]:\n # TODO: add arg descriptions in schema\n\n outdir = Path(outdir)\n outdir.mkdir(exist_ok=True)\n\n for frame_id in frame_ids:\n frame_bytes = self.get_frame(frame_id, quality=quality)\n\n im = Image.open(frame_bytes)\n mime_type = im.get_format_mimetype() or \"image/jpg\"\n im_ext = mimetypes.guess_extension(mime_type)\n\n # FIXME It is better to use meta information from the server\n # to determine the extension\n # replace '.jpe' or '.jpeg' with a more used '.jpg'\n if im_ext in (\".jpe\", \".jpeg\", None):\n im_ext = \".jpg\"\n\n outfile = filename_pattern.format(frame_id=frame_id, frame_ext=im_ext)\n im.save(outdir / outfile)",
"def _parse_list(self):\n frame_path = [x.strip().split(' ') for x in open(self._image_set)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))",
"def _parse_list(self):\n frame_path = [x.strip().split(' ') for x in open(self._image_set)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))",
"def get_frames_and_titles(video_label):\n\n frames = []\n titles = []\n cap = cv2.VideoCapture(video_label)\n frameRate = cap.get(5) # frame rate\n numero = 29\n index = 0\n while (cap.isOpened()):\n frameId = cap.get(1) # current frame number\n ret, frame = cap.read()\n # Apply template Matching\n if (ret != True):\n break\n if (frameId % math.floor(frameRate) == 0):\n frames.append(frame)\n index = index\n titles.append(\"Frame {}\".format(int(index / 30)))\n\n if numero > 30:\n break\n index = index + 1\n cap.release()\n\n return frames, titles",
"def extract_frames(video, out_path, xform=identity):\n if os.path.exists(out_path):\n msg = '[extract_frames] Frames already exist, skipping extraction: {}'\n print(msg.format(out_path))\n return\n\n os.makedirs(out_path)\n msg = \"[extract_frames] Starting on length {}s, at {} fps to {}\"\n print(msg.format(video.duration, video.fps, out_path))\n start_time = time.time()\n\n frame_num = 0\n iterator = video.iter_frames(fps=video.fps)\n for frame in tqdm.tqdm(iterator, total=video.fps * video.duration):\n frame_fd = os.path.join(out_path, 'frame_{:03d}.jpg'.format(frame_num))\n # Apply custom transformation\n frame = xform(frame)\n # Swap RGB to BGR to work with OpenCV\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n cv2.imwrite(frame_fd, frame)\n frame_num += 1\n\n msg = '[extract_frames] Extracted {} frames in {:.0f}s to {}'\n print(msg.format(frame_num-1, time.time() - start_time, out_path))",
"def get_frames(self):\n if not self.video:\n return []\n # We cannot validate shape on construction as that happens inside graph\n # mode as we construct from a tf.data.Dataset, so we validate here.\n self.video[0].validate_shape_and_dtype()\n return self.video",
"def imagesFromVideo(vid_path, out_path):\n if not os.path.exists(vid_path):\n raise ValueError(\"Input video file %s does not exist.\" % vid_path)\n\n cap = skvideo.io.FFmpegReader(vid_path)\n frame_nr, _, _, _ = cap.getShape()\n\n with tqdm.tqdm(total=frame_nr) as pbar:\n for i, frame in enumerate(cap.nextFrame()):\n Image.fromarray(frame).save(out_path + \"/\" + str(i) + \".png\")\n pbar.update()\n\n cap.close()",
"def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()",
"def readFrames(video):\n frames = []\n while True:\n _, frame = video.read()\n\n if frame is None:\n break\n else:\n frames.append(frame)\n video.release()\n return frames",
"def extract_frames(movie_path, max_frames=None, rotate_angle=0, use_grayscale=False):\n if not os.path.exists(movie_path):\n print(\"Input video file is not found\")\n return 1\n\n # load video\n cap = cv2.VideoCapture()\n cap.open(movie_path)\n\n if not cap.isOpened():\n print(\"Failed to open input video\")\n return 1\n\n # get frame count\n frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n # get frame delta jump\n skip_delta = 0\n if max_frames and frame_count > max_frames:\n skip_delta = frame_count / max_frames\n\n frame_id = 0\n frames = []\n # while we didn't finish getting frames\n while frame_id < frame_count:\n ret, frame = cap.read()\n if not ret:\n print(\"Failed to get the frame {f}\".format(f=frame_id))\n else:\n # Rotate if needed:\n if rotate_angle > 0:\n if rotate_angle == 90:\n frame = cv2.transpose(frame)\n frame = cv2.flip(frame, 1)\n elif rotate_angle == 180:\n frame = cv2.flip(frame, -1)\n elif rotate_angle == 270:\n frame = cv2.transpose(frame)\n frame = cv2.flip(frame, 0)\n\n if use_grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\n frames.append(frame)\n print(f\"added frame {frame_id}/{frame_count}\")\n frame_id += int(1 + skip_delta)\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id)\n\n return frames",
"def get_frame_list(self):\r\n\r\n logger.debug('Executing frame extraction')\r\n\r\n frames_loaded = False\r\n\r\n # Try to load YAML file with frame list\r\n if os.path.exists(self.frames_file_path):\r\n\r\n print 'Loading YAML file with frame list'\r\n logger.debug('Loading YAML file with frame list')\r\n\r\n f_list = utils.load_YAML_file(self.frames_file_path)\r\n\r\n if f_list:\r\n self.frame_list = f_list\r\n\r\n print 'YAML file with frame_list loaded'\r\n logger.debug('YAML file with frame_list loaded')\r\n\r\n frames_loaded = True\r\n\r\n if not frames_loaded:\r\n\r\n print '\\n\\n### Frame extraction ###\\n'\r\n logger.debug('\\n\\n### Frame extraction ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n if not (os.path.exists(self.frames_path)):\r\n os.makedirs(self.frames_path)\r\n\r\n # Counter for all frames\r\n frame_counter = 0\r\n\r\n # Value of frame_counter for last analyzed frame\r\n last_anal_frame = 0\r\n\r\n # Open video file\r\n capture = cv2.VideoCapture(self.resource_path)\r\n\r\n self.frame_list = []\r\n\r\n # Save parameters for this video\r\n param_dict = {}\r\n\r\n if capture is None or not capture.isOpened():\r\n\r\n error = 'Error in opening video file'\r\n\r\n print error\r\n logger.debug(error)\r\n\r\n return\r\n\r\n else:\r\n\r\n video_fps = capture.get(cv2.cv.CV_CAP_PROP_FPS)\r\n\r\n param_dict[c.VIDEO_FPS_KEY] = video_fps\r\n\r\n # Original number of frames\r\n tot_frames = capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\r\n\r\n param_dict[c.VIDEO_TOT_FRAMES_KEY] = tot_frames\r\n\r\n self.fps = video_fps\r\n\r\n self.video_frames = float(tot_frames)\r\n\r\n # Saved frames\r\n saved_frames = 0\r\n\r\n while True:\r\n\r\n # Read frame\r\n ret, frame = capture.read()\r\n\r\n # If no frame is read, abort\r\n if not ret:\r\n break\r\n\r\n used_fps = c.USED_FPS\r\n use_or_fps = c.USE_ORIGINAL_FPS\r\n use_or_res = c.USE_ORIGINAL_RES\r\n used_res_scale_factor = c.USED_RES_SCALE_FACTOR\r\n\r\n if self.params is not None:\r\n\r\n if c.USED_FPS_KEY in self.params:\r\n used_fps = self.params[c.USED_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_FPS_KEY in self.params:\r\n use_or_fps = self.params[c.USE_ORIGINAL_FPS_KEY]\r\n\r\n if c.USE_ORIGINAL_RES_KEY in self.params:\r\n use_or_res = self.params[c.USE_ORIGINAL_RES_KEY]\r\n\r\n if c.USED_RES_SCALE_FACTOR_KEY in self.params:\r\n used_res_scale_factor = self.params[\r\n c.USED_RES_SCALE_FACTOR_KEY]\r\n\r\n # Next frame to be analyzed\r\n next_frame = last_anal_frame + (video_fps / used_fps) - 1\r\n\r\n if use_or_fps or (frame_counter > next_frame):\r\n\r\n # Frame position in video in milliseconds\r\n elapsed_ms = capture.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n\r\n # print 'elapsed video s =', elapsed_video_s\r\n\r\n fr_name = '%07d.png' % frame_counter\r\n\r\n frame_path = os.path.join(self.frames_path, fr_name)\r\n\r\n # Resize frame\r\n if not use_or_res:\r\n fx = used_res_scale_factor\r\n\r\n fy = used_res_scale_factor\r\n\r\n interp = cv2.INTER_AREA\r\n\r\n frame = cv2.resize(src=frame, dsize=(0, 0),\r\n fx=fx, fy=fy,\r\n interpolation=interp)\r\n\r\n cv2.imwrite(frame_path, frame,\r\n [cv.CV_IMWRITE_PNG_COMPRESSION, 0])\r\n\r\n frame_dict = {c.SAVED_FRAME_NAME_KEY: fr_name,\r\n c.ELAPSED_VIDEO_TIME_KEY: int(elapsed_ms)}\r\n\r\n self.frame_list.append(frame_dict)\r\n\r\n last_anal_frame = frame_counter\r\n\r\n saved_frames += 1\r\n\r\n frame_counter += 1\r\n\r\n self.progress = 100 * (frame_counter / self.video_frames)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n del capture\r\n\r\n self.saved_frames = float(saved_frames)\r\n\r\n param_dict[c.VIDEO_SAVED_FRAMES_KEY] = self.saved_frames\r\n\r\n # Save frame list in YAML file\r\n utils.save_YAML_file(self.frames_file_path, self.frame_list)\r\n\r\n # Save video parameters in YAML file\r\n\r\n utils.save_YAML_file(self.params_file_path, param_dict)\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for frame extraction:', str(time_in_seconds), 's\\n'\r\n logger.debug(\r\n 'Time for frame extraction:', str(time_in_seconds), 's\\n')\r\n\r\n self.anal_times[c.FRAME_EXTRACTION_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)",
"def save_video_frames(video_in, image_dir, image_prefix, frame_offset=1):\n print('Analyzing file: ' + video_in)\n print('Storing in directory: ' + image_dir)\n print('Frame offset: ' + str(frame_offset))\n vidcap = cv2.VideoCapture(video_in)\n success, image = vidcap.read()\n filename_count = 0\n frame_count = 0\n while success:\n success,image = vidcap.read()\n frame_count += 1\n if (frame_count % frame_offset == 0):\n filename = os.path.join(image_dir, '%s_frame%d.jpg' % (image_prefix, filename_count))\n cv2.imwrite(filename, image) # save frame as JPEG file\n filename_count += 1\n print(str(filename_count) + ' frames saved')",
"def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames",
"def stream_frames(video_capture):",
"def video_by_frame(video):\n cap = cv2.VideoCapture(video)\n\n while True:\n ret, im = cap.read()\n yield im"
]
| [
"0.74903274",
"0.71694463",
"0.69626606",
"0.68849015",
"0.68159395",
"0.6740354",
"0.6646834",
"0.6427418",
"0.63983107",
"0.63968563",
"0.6384971",
"0.63526756",
"0.6332228",
"0.6303974",
"0.62948906",
"0.6294262",
"0.6242945",
"0.6242945",
"0.6237558",
"0.6229151",
"0.6218668",
"0.6218598",
"0.6202301",
"0.61824393",
"0.6163647",
"0.6148077",
"0.61289144",
"0.61043066",
"0.608555",
"0.6082475"
]
| 0.7672686 | 0 |
Test that update_blocks creates 10 blocks | def test_update_blocks(path_last_block, path_get_blocks_iterator, mocked_logger):
# Method to test
created_id = BlockchainEthService.update_blocks()
assert Block.objects.count() == 10
assert len(created_id) == 10
mocked_logger.info.assert_called_once_with("10 new blocks created.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_block_batches_order(self):\n pass",
"def test_update_library_blocks(self):\n def commit_library_and_verify(library_key):\n \"\"\"\n Commit library changes, and verify that there are no uncommited changes anymore\n \"\"\"\n last_published = ContentLibraryIndexer.get_items([library_key])[0]['last_published']\n self._commit_library_changes(str(library_key))\n response = ContentLibraryIndexer.get_items([library_key])[0]\n assert response['has_unpublished_changes'] is False\n assert response['has_unpublished_deletes'] is False\n assert response['last_published'] >= last_published\n return response\n\n def verify_uncommitted_libraries(library_key, has_unpublished_changes, has_unpublished_deletes):\n \"\"\"\n Verify uncommitted changes and deletes in the index\n \"\"\"\n response = ContentLibraryIndexer.get_items([library_key])[0]\n assert response['has_unpublished_changes'] == has_unpublished_changes\n assert response['has_unpublished_deletes'] == has_unpublished_deletes\n return response\n\n lib = self._create_library(slug=\"test-lib-update-block\", title=\"Title\", description=\"Description\")\n library_key = LibraryLocatorV2.from_string(lib['id'])\n\n # Verify uncommitted new blocks\n block = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n response = verify_uncommitted_libraries(library_key, True, False)\n assert response['last_published'] is None\n assert response['num_blocks'] == 1\n # Verify committed new blocks\n self._commit_library_changes(lib['id'])\n response = verify_uncommitted_libraries(library_key, False, False)\n assert response['num_blocks'] == 1\n # Verify uncommitted deleted blocks\n self._delete_library_block(block['id'])\n response = verify_uncommitted_libraries(library_key, True, True)\n assert response['num_blocks'] == 0\n # Verify committed deleted blocks\n self._commit_library_changes(lib['id'])\n response = verify_uncommitted_libraries(library_key, False, False)\n assert response['num_blocks'] == 0\n\n block = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n self._commit_library_changes(lib['id'])\n\n # Verify changes to blocks\n # Verify OLX updates on blocks\n self._set_library_block_olx(block[\"id\"], \"<problem/>\")\n verify_uncommitted_libraries(library_key, True, False)\n commit_library_and_verify(library_key)\n # Verify asset updates on blocks\n self._set_library_block_asset(block[\"id\"], \"whatever.png\", b\"data\")\n verify_uncommitted_libraries(library_key, True, False)\n commit_library_and_verify(library_key)\n self._delete_library_block_asset(block[\"id\"], \"whatever.png\", expect_response=204)\n verify_uncommitted_libraries(library_key, True, False)\n commit_library_and_verify(library_key)\n\n lib2 = self._create_library(slug=\"test-lib-update-block-2\", title=\"Title 2\", description=\"Description\")\n self._add_block_to_library(lib2[\"id\"], \"problem\", \"problem1\")\n self._commit_library_changes(lib2[\"id\"])\n\n #Verify new links on libraries\n self._link_to_library(lib[\"id\"], \"library_2\", lib2[\"id\"])\n verify_uncommitted_libraries(library_key, True, False)\n #Verify reverting uncommitted changes\n self._revert_library_changes(lib[\"id\"])\n verify_uncommitted_libraries(library_key, False, False)",
"def test_update_blocks_no_new_blocks(\n path_last_block, path_get_blocks_iterator, mocked_logger\n):\n # last block number is equal to the last block created\n last_block = BlockFactory(number=1234)\n assert last_block.number == 1234\n\n # Method to test\n created_id = BlockchainEthService.update_blocks()\n\n assert Block.objects.count() == 1\n assert len(created_id) == 0\n mocked_logger.info.assert_called_once_with(\"No news blocks created.\")",
"def test_block_extra_batch(self):\n pass",
"def test_add_block(self):\n txout = TxOut(tx = \"transaction_hash\",\n nout = 1,\n addr = \"bitcoin_address\",\n value = 133)\n\n block = Block(block_hash=\"block_hash\",\n height=100,\n vout=[txout,],)\n \n balance_processor = BalanceProcessor(storage=self.balance_storage)\n balance_processor.add_block(block)\n\n self.assertEqual(balance_processor.height, 100)\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n \n # Commit only commits the data already flushed into storage\n balance_processor.commit()\n\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 0)\n\n # Add empty blocks until the first block is flushed into storage\n for x in range(200):\n block = Block(block_hash=\"block_hash_{}\".format(x),\n height=x+100)\n balance_processor.add_block(block)\n\n self.assertEqual(balance_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n balance_processor.commit()\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n storage_height = self.balance_storage.height\n\n # Create a new balance_processor and check balance hasn't changed\n new_processor = BalanceProcessor(storage=self.balance_storage)\n self.assertEqual(self.balance_storage.get(\"bitcoin_address\"), 133)\n self.assertEqual(new_processor.get_balance(\"bitcoin_address\"), 133)\n self.assertEqual(new_processor.height, storage_height)",
"def test_block_split(self):\n block1 = self.geographies.find({ 'geoid': '150010210051016' }) \n self.assertEqual(block1.count(), 1)\n block1 = block1[0]\n\n split_block_pop = 448 \n block1_land_pct = float(184458) / 587158 # AREALAND_INT / AREALAND_2000\n block1_pop_2000 = int(block1_land_pct * split_block_pop)\n block1_pop_2010 = 22 \n block1_pop_delta = block1_pop_2010 - block1_pop_2000\n block1_pop_pct_change = float(block1_pop_delta) / block1_pop_2000\n\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['POPPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['HUPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['data']['2000']['P1']['P001001'], block1_pop_2000)\n self.assertAlmostEqual(float(block1['data']['2010']['P1']['P001001']), block1_pop_2010)\n self.assertAlmostEqual(float(block1['data']['delta']['P1']['P001001']), block1_pop_delta)\n self.assertAlmostEqual(float(block1['data']['pct_change']['P1']['P001001']), block1_pop_pct_change)",
"def test_update_manifold(self):\r\n locator = BlockUsageLocator(\r\n CourseLocator('testx', 'GreekHero', branch='draft'),\r\n 'problem', block_id='problem1'\r\n )\r\n original = modulestore().get_item(locator)\r\n # first add 2 children to the course for the update to manipulate\r\n locator = BlockUsageLocator(\r\n CourseLocator('guestx', 'contender', branch='draft'),\r\n 'course', block_id=\"head345679\"\r\n )\r\n category = 'problem'\r\n new_payload = \"<problem>empty</problem>\"\r\n modulestore().create_item(\r\n locator, category, 'test_update_manifold',\r\n fields={'display_name': 'problem 1', 'data': new_payload},\r\n )\r\n another_payload = \"<problem>not empty</problem>\"\r\n modulestore().create_item(\r\n locator, category, 'test_update_manifold',\r\n fields={'display_name': 'problem 2', 'data': another_payload},\r\n definition_locator=original.definition_locator,\r\n )\r\n # pylint: disable=W0212\r\n modulestore()._clear_cache()\r\n\r\n # now begin the test\r\n block = modulestore().get_item(locator)\r\n pre_def_id = block.definition_locator.definition_id\r\n pre_version_guid = block.location.version_guid\r\n\r\n self.assertNotEqual(block.grading_policy['GRADER'][0]['min_count'], 13)\r\n block.grading_policy['GRADER'][0]['min_count'] = 13\r\n block.children = block.children[1:] + [block.children[0]]\r\n block.advertised_start = \"Soon\"\r\n\r\n block.save() # decache model changes\r\n updated_block = modulestore().update_item(block, \"**replace_user**\")\r\n self.assertNotEqual(updated_block.definition_locator.definition_id, pre_def_id)\r\n self.assertNotEqual(updated_block.location.version_guid, pre_version_guid)\r\n self.assertEqual(updated_block.grading_policy['GRADER'][0]['min_count'], 13)\r\n self.assertEqual(updated_block.children[0].version_agnostic(), block.children[0].version_agnostic())\r\n self.assertEqual(updated_block.advertised_start, \"Soon\")",
"def blocks(self, blocks: int):\n\n self._blocks = blocks",
"def test_crud_block(self):\n lib = self._create_library(slug=\"test-lib-crud-block\", title=\"Title\", description=\"Description\")\n block = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n\n # Update OLX, verify updates in index\n self._set_library_block_olx(block[\"id\"], '<problem display_name=\"new_name\"/>')\n response = LibraryBlockIndexer.get_items([block['id']])[0]\n assert response['display_name'] == 'new_name'\n assert response['has_unpublished_changes'] is True\n\n # Verify has_unpublished_changes after committing library\n self._commit_library_changes(lib['id'])\n response = LibraryBlockIndexer.get_items([block['id']])[0]\n assert response['has_unpublished_changes'] is False\n\n # Verify has_unpublished_changes after reverting library\n self._set_library_block_asset(block[\"id\"], \"whatever.png\", b\"data\")\n response = LibraryBlockIndexer.get_items([block['id']])[0]\n assert response['has_unpublished_changes'] is True\n\n self._revert_library_changes(lib['id'])\n response = LibraryBlockIndexer.get_items([block['id']])[0]\n assert response['has_unpublished_changes'] is False\n\n # Verify that deleting block removes it from index\n self._delete_library_block(block['id'])\n assert LibraryBlockIndexer.get_items([block['id']]) == []\n\n # Verify that deleting a library removes its blocks from index too\n self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n LibraryBlockIndexer.get_items([block['id']])\n self._delete_library(lib['id'])\n assert LibraryBlockIndexer.get_items([block['id']]) == []",
"def test_blocks_created_with_current_month_start_date(self):\n group = Group.objects.create(name='free_5monthly_blocks')\n user1 = baker.make(User, first_name='Test', last_name='User1')\n user2 = baker.make(User, first_name='Test', last_name='User2')\n user3 = baker.make(User, first_name='Test', last_name='User3')\n\n for user in [user1, user2, user3]:\n user.groups.add(group)\n\n # make blocks on 1st Jan\n with patch(\n 'booking.management.commands.create_free_monthly_blocks'\n '.timezone.now',\n return_value=datetime(2016, 1, 15, tzinfo=dt_timezone.utc)\n ):\n management.call_command('create_free_monthly_blocks')\n\n assert Block.objects.count() == 3\n for block in Block.objects.all():\n assert block.start_date.date() == date(2016, 1, 1)\n\n # make blocks on 1st Feb\n with patch(\n 'booking.management.commands.create_free_monthly_blocks'\n '.timezone.now',\n return_value=datetime(2016, 2, 1, tzinfo=dt_timezone.utc)\n ):\n # blocks are not full, and are paid, so active unless expired\n assert [bl for bl in Block.objects.all() if bl.full] == []\n assert Block.objects.filter(paid=True).count() == 3\n assert [bl for bl in Block.objects.all() if bl.active_block()] == []\n management.call_command('create_free_monthly_blocks')\n # 3 new blocks created\n assert Block.objects.count() == 6",
"def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']",
"def num_blocks(self): # -> int:\n ...",
"def test_update_blocks_empty_last_created_response(\n path_last_block, path_get_blocks_iterator, mocked_logger\n):\n\n # Method to test\n created_id = BlockchainEthService.update_blocks()\n\n assert Block.objects.count() == 0\n assert len(created_id) == 0\n mocked_logger.info.assert_called_once_with(\"0 new blocks created.\")",
"def test_block_bad_batch(self):\n pass",
"def test_height_mod(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n handler = self.new_handler()\n ret = check_heights(handler.root)\n if ret:\n successes += 1\n else:\n failures += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly modify heights! ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated height adjustment for all nodes in {successes} trees.{BColors.ENDC}\")",
"def generate_test_blocks(self):\n def generate_blocks_for_floor(block_names, floor_name, data):\n item_dict = {}\n for name in block_names:\n item_dict['{}_{}'.format(name, floor_name)] = {\n 'name': name,\n 'floor': floor_name,\n **data,\n }\n return item_dict\n\n block_data = {\n 'pixels_to_m_x': 40,\n 'pixels_to_m_y': 40,\n 'floor_map': self.get_test_floor_map_image(),\n }\n self.bs_f0_l1_o1_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2', 'b3_del', 'b4_del', 'b5_del', 'b6_del'],\n 'f0_l1_o1',\n block_data)\n\n self.bs_f1_l1_o1_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2'],\n 'f1_l1_o1',\n block_data)\n\n self.bs_f0_l1_sub1_o1_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2', 'b3_del', 'b4_del', 'b5_del'],\n 'f0_l1_sub1_o1',\n block_data)\n\n self.bs_f0_l1_o2_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2_del', 'b3_del'],\n 'f0_l1_o2',\n block_data)\n\n self.bs_f0_l1_sub1_o2_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2_del', 'b3_del'],\n 'f0_l1_sub1_o2',\n block_data)\n\n self.bs_dict = {\n **self.bs_f0_l1_o1_dict,\n **self.bs_f1_l1_o1_dict,\n **self.bs_f0_l1_sub1_o1_dict,\n **self.bs_f0_l1_o2_dict,\n **self.bs_f0_l1_sub1_o2_dict\n }\n\n # generate blocks in database\n self.blocks = self.create_blocks_from_data(self.bs_dict, self.floors)",
"def test_simple_block(self):\n if config.SUMLEV_BLOCK not in config.SUMLEVS:\n pass\n\n blocks = self.geographies.find({ 'geoid': '150010210053029' })\n\n self.assertEqual(blocks.count(), 1)\n\n block = blocks[0]\n\n self.assertEqual(block['sumlev'], config.SUMLEV_BLOCK)\n self.assertEqual(block['metadata']['NAME'], 'Block 3029')\n self.assertEqual(block['metadata']['STATE'], '15')\n self.assertEqual(block['metadata']['COUNTY'], '001')\n self.assertEqual(block['metadata']['TRACT'], '021005')\n\n pop_2000 = 33 \n pop_2010 = 93 \n self._test_totalpop(block, pop_2000, pop_2010)",
"def populate_blocks_with_blockheights(self):\n for (height, block) in enumerate(self.blocks):\n block[\"height\"] = height",
"def test_schema_updates(self):\n lib = self._create_library(slug=\"test-lib--block-schemaupdates-1\", title=\"Title 1\", description=\"Description\")\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.LibraryBlockIndexer.SCHEMA_VERSION\",\n new=0):\n block = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 1\n\n with patch(\"openedx.core.djangoapps.content_libraries.libraries_index.LibraryBlockIndexer.SCHEMA_VERSION\",\n new=1):\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 0\n\n call_command(\"reindex_content_library\", all=True, force=True)\n\n assert len(LibraryBlockIndexer.get_items([block['id']])) == 1",
"def test_adding_multiple_blocks(self, blockchain, genesis, block1, block2, block3):\n assert blockchain.get_depth(hash(block1)) == -float('inf')\n assert blockchain.get_depth(hash(block2)) == -float('inf')\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block1)\n # graph should look like this:\n # 0 <- 1\n assert hash(block1) in blockchain\n assert blockchain[hash(block1)] == block1\n assert blockchain._leaves == {hash(block1)}\n assert blockchain.get_virtual_block_parents() == {hash(block1)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1}\n assert blockchain._longest_chain == {hash(genesis), hash(block1)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.get_depth(hash(genesis)) == 1\n assert blockchain.get_depth(hash(block1)) == 0\n assert blockchain.get_depth(hash(block2)) == -float('inf')\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block2)\n # graph should look like this:\n # 0 <- 1\n # 0 <- 2\n assert hash(block2) in blockchain\n assert blockchain[hash(block2)] == block2\n assert blockchain._leaves == {hash(block1), hash(block2)}\n assert blockchain.get_virtual_block_parents() == {min(hash(block1), hash(block2))}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1}\n assert blockchain._longest_chain == {hash(genesis), hash(block1)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.is_a_before_b(hash(block1), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block2)) is True\n assert blockchain.get_depth(hash(genesis)) == 1\n assert blockchain.get_depth(hash(block1)) == 0\n assert blockchain.get_depth(hash(block2)) == 0\n assert blockchain.get_depth(hash(block3)) == -float('inf')\n\n blockchain.add(block3)\n # graph should look like this:\n # 0 <- 1 <- 3\n # 0 <- 2\n assert hash(block3) in blockchain\n assert blockchain[hash(block3)] == block3\n assert blockchain._leaves == {hash(block2), hash(block3)}\n assert blockchain.get_virtual_block_parents() == {hash(block3)}\n assert blockchain._G.node[hash(genesis)][Blockchain._CHAIN_LENGTH_KEY] == 1\n assert blockchain._G.node[hash(block1)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block2)][Blockchain._CHAIN_LENGTH_KEY] == 2\n assert blockchain._G.node[hash(block3)][Blockchain._CHAIN_LENGTH_KEY] == 3\n assert blockchain._get_chain() == {hash(genesis): 0, hash(block1): 1, hash(block3): 2}\n assert blockchain._longest_chain == {hash(genesis), hash(block1), hash(block3)}\n assert blockchain.is_a_before_b(hash(genesis), hash(block1)) is True\n assert blockchain.is_a_before_b(hash(block1), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(block3), hash(block2)) is True\n assert blockchain.is_a_before_b(hash(genesis), hash(block3)) is True\n assert blockchain.get_depth(hash(genesis)) == 2\n assert blockchain.get_depth(hash(block1)) == 1\n assert blockchain.get_depth(hash(block2)) == 0\n assert blockchain.get_depth(hash(block3)) == 0",
"def test_index_block(self):\n lib = self._create_library(slug=\"test-lib-index-1\", title=\"Title 1\", description=\"Description\")\n block1 = self._add_block_to_library(lib['id'], \"problem\", \"problem1\")\n block2 = self._add_block_to_library(lib['id'], \"problem\", \"problem2\")\n\n assert len(LibraryBlockIndexer.get_items()) == 2\n\n for block in [block1, block2]:\n usage_key = LibraryUsageLocatorV2.from_string(block['id'])\n response = LibraryBlockIndexer.get_items([usage_key])[0]\n\n assert response['id'] == block['id']\n assert response['def_key'] == block['def_key']\n assert response['block_type'] == block['block_type']\n assert response['display_name'] == block['display_name']\n assert response['has_unpublished_changes'] == block['has_unpublished_changes']",
"def test_01(self):\n assert 'True' == Api.requestBlock('test-01')",
"def test_block_missing_batch(self):\n pass",
"def test_balance_mod(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n handler = self.new_handler()\n ret = check_balance(handler.root)\n if ret == handler.balanced:\n successes += 1\n else:\n failures += 1\n handler.debug_wrapper()\n\n if failures != 0:\n print('\\n=================================================\\n')\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to modify balance factor correctly! ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated that balance bool is updated in {successes} trees.{BColors.ENDC}\")",
"def test_update_game(self): \n for i in range(3):\n for j in range(3):\n self.game.board[i][j] = self.game.patterns[\"Blinker\"][i][j]\n self.game.update_game()\n self.assertEqual(self.game.board[0][1], 1)\n self.assertEqual(self.game.board[1][1], 1)\n self.assertEqual(self.game.board[2][1], 1)",
"def test_31(self):\n assert 'True' == Api.requestBlock('test-31')",
"def test_10(self):\n assert 'False' == Api.requestBlock('test-10')",
"def test_29(self):\n assert 'True' == Api.requestBlock('test-29')",
"def test_05(self):\n assert 'True' == Api.requestBlock('test-05', charOrder=50)",
"def test_single_chain(self):\n self.assertEqual(len(self.genesis_blocks), 1)\n manager = self.create_peer('testnet', tx_storage=self.tx_storage)\n\n # The initial score is the sum of the genesis\n score = self.genesis_blocks[0].weight\n for tx in self.genesis_txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 100 blocks in a row with no transaction but the genesis\n blocks = add_new_blocks(manager, 100, advance_clock=15)\n for i, block in enumerate(blocks):\n meta = block.get_metadata(force_reload=True)\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n\n # Add some transactions between blocks\n txs = add_new_transactions(manager, 30, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n # Mine 50 more blocks in a row with no transactions between them\n blocks = add_new_blocks(manager, 50)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n # Mine 15 more blocks with 10 transactions between each block\n for _ in range(15):\n txs = add_new_transactions(manager, 10, advance_clock=15)\n for tx in txs:\n score = sum_weights(score, tx.weight)\n\n blocks = add_new_blocks(manager, 1)\n for i, block in enumerate(blocks):\n meta = block.get_metadata()\n score = sum_weights(score, block.weight)\n self.assertAlmostEqual(score, meta.score)\n consensus_context = manager.consensus_algorithm.create_context()\n self.assertAlmostEqual(consensus_context.block_algorithm.calculate_score(block), meta.score)\n\n self.assertConsensusValid(manager)"
]
| [
"0.6776867",
"0.67313904",
"0.6672238",
"0.66562814",
"0.6521418",
"0.6519071",
"0.65140855",
"0.65059924",
"0.6473834",
"0.63975424",
"0.6349964",
"0.6346332",
"0.6339556",
"0.62601537",
"0.61715144",
"0.6147653",
"0.613493",
"0.60646725",
"0.6061347",
"0.60496294",
"0.6046911",
"0.6028176",
"0.6007594",
"0.60075164",
"0.59990716",
"0.5995483",
"0.59704566",
"0.5952398",
"0.59407014",
"0.593888"
]
| 0.77074355 | 0 |
bind all controls with facade | def bind_controls(self):
self.add_comment_button.Bind(wx.EVT_BUTTON, self.add_comment)
self.del_comment_button.Bind(wx.EVT_BUTTON, self.remove_comment)
self.upload_button.Bind(wx.EVT_BUTTON, self.upload_change)
self.Bind(wx.EVT_CLOSE, self.on_close) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _bind(self):\n\n pass",
"def bind(self):\n super(QtBaseWidgetComponent, self).bind()",
"def bindControlMenu(self):\r\n self.frames[\"ControlMenu\"].unitBut.configure(\r\n command=self.changeUnits)\r\n self.frames[\"ControlMenu\"].bind(\r\n self.controlKeys[self.frames[\"ControlMenu\"].unitBut], lambda U: self.changeUnits())\r\n self.frames[\"ControlMenu\"].jogContinFBut.configure(\r\n command=self.jogForward)\r\n self.frames[\"ControlMenu\"].bind(\r\n self.controlKeys[self.frames[\"ControlMenu\"].jogContinFBut], lambda F: self.jogForward())\r\n self.frames[\"ControlMenu\"].jogContinRBut.configure(\r\n command=self.jogReverse)\r\n self.frames[\"ControlMenu\"].bind(\r\n self.controlKeys[self.frames[\"ControlMenu\"].jogContinRBut], lambda R: self.jogReverse())\r\n self.frames[\"ControlMenu\"].stopBut.configure(command=self.stop)\r\n self.frames[\"ControlMenu\"].bind(\r\n self.controlKeys[self.frames[\"ControlMenu\"].stopBut], lambda S: self.stop())\r\n self.frames[\"ControlMenu\"].slewBut.configure(command=self.slew)\r\n self.frames[\"ControlMenu\"].bind(\r\n self.controlKeys[self.frames[\"ControlMenu\"].slewBut], lambda L: self.slew())\r\n self.frames[\"ControlMenu\"].back.configure(\r\n command=lambda: self.showFrame(\"MainMenu\"))",
"def fromControls(self,widget):",
"def toControls(self,widget):",
"def _bind_events(self):\n \n slice_slider = self._view_frame._image_control_panel.slider\n slice_slider.Bind(wx.EVT_SLIDER, self._handler_slice_slider)\n \n new_measurement_button = \\\n self._view_frame._measurement_panel.create_button\n new_measurement_button.Bind(wx.EVT_BUTTON, self._handler_new_measurement_button)\n\n rb = self._view_frame._measurement_panel.rename_button\n rb.Bind(wx.EVT_BUTTON,\n self._handler_rename_measurement_button)\n\n db = self._view_frame._measurement_panel.delete_button\n db.Bind(wx.EVT_BUTTON,\n self._handler_delete_measurement_button)\n\n eb = self._view_frame._measurement_panel.enable_button\n eb.Bind(wx.EVT_BUTTON,\n self._handler_enable_measurement_button)\n\n db = self._view_frame._measurement_panel.disable_button\n db.Bind(wx.EVT_BUTTON,\n self._handler_disable_measurement_button)",
"def controls_setup(self):\n pass",
"def setupBinds(self):\n\n\t\tself.menu_window.exitButton.bind(\"<Button-1>\", self.closeGame)\n\t\tself.menu_window.exitButton.bind(\"<Return>\", self.closeGame)\n\n\t\tself.menu_window.playButton.bind(\"<Button-1>\", self.playGame)\n\t\tself.menu_window.playButton.bind(\"<Return>\", self.playGame)\n\n\t\tself.menu_window.scoreButton.bind(\"<Button-1>\", self.openScore)\n\t\tself.menu_window.scoreButton.bind(\"<Return>\", self.openScore)\n\n\t\tself.menu_window.instButton.bind(\"<Button-1>\", self.openInstructions)\n\t\tself.menu_window.instButton.bind(\"<Return>\", self.openInstructions)\n\n\t\tself.menu_window.optionsButton.bind(\"<Button-1>\", self.openOptions)\n\t\tself.menu_window.optionsButton.bind(\"<Return>\", self.openOptions)\n\n\t\tself.score_window.close_score.bind(\"<Button-1>\", self.openScore)\n\t\tself.score_window.close_score.bind(\"<Return>\", self.openScore)\n\n\t\tself.instructions_window.close.bind(\"<Button-1>\", self.openInstructions)\n\t\tself.instructions_window.close.bind(\"<Return>\", self.openInstructions)\n\n\t\tself.menu_window.logout_button.bind(\"<Button-1>\", self.logOut)\n\t\tself.menu_window.logout_button.bind(\"<Return>\", self.logOut)",
"def bind_all(self, sequence=None, func=None, add=None):\n return super().bind_all(sequence, func, add)",
"def set_binds(self,val):\r\n if val:\r\n self.bind(key_codes.EKeyUpArrow, self.up_key)\r\n self.bind(key_codes.EKeyDownArrow, self.down_key)\r\n self.bind(key_codes.EKeyLeftArrow, self.left_key)\r\n self.bind(key_codes.EKeyRightArrow, self.right_key)\r\n else:\r\n self.bind(key_codes.EKeyUpArrow, None)\r\n self.bind(key_codes.EKeyDownArrow, None)\r\n self.bind(key_codes.EKeyLeftArrow, None)\r\n self.bind(key_codes.EKeyRightArrow, None)",
"def bind_clicks(self):\n self.bind(\"<Button-1>\", lambda e: self._handle_left_click((e.x, e.y)))\n self.bind(\"<Button-2>\", lambda e: self._handle_right_click((e.x, e.y)))\n self.bind(\"<Button-3>\", lambda e: self._handle_right_click((e.x, e.y)))",
"def _widget_rebind_externals(self, **kw):\n if not hasattr(self, \"_funcids_d\"):\n setattr(self, \"_funcids_d\", {})\n values_l = self._funcids_d.values() # bypasses seqs\n events_l = (list(v.values())[0] for v in values_l)\n external_events_l = (\n e for e in events_l if e.get(\"internal\") is False\n ) # == 0)\n for e in external_events_l:\n self._widget_bind(\n e[\"sequence\"],\n e[\"func\"],\n e[\"add\"],\n internal=e[\"internal\"],\n kidsonly=e[\"self\"],\n **kw\n )",
"def bind(self,*args):\n if len(args)>2:\n self.tk.call(self._tkname,args[0]._w,'-index',args[1],args[2])\n else:\n self.tk.call(self._tkname,*args)",
"def bind(self, sequence=None, func=None, add=None):\n return self._widget_bind(sequence, func, add, internal=False)",
"def bindKeys(self):\r\n self.c.bind(\"<Button-1>\",self.seek)\r\n self.c.bind(\"<MouseWheel>\",self.app.zoom)\r\n self.c.bind(\"<Button-3>\",self.peek)",
"def bind(self, **kwds):\n # repair convenient usage that breaks my representation constraints: make sure my value\n # processors are iterable\n self.converters = self.listify(self.converters)\n self.normalizers = self.listify(self.normalizers)\n self.validators = self.listify(self.validators)\n\n # chain up\n return super().bind(**kwds)",
"def bind(self):\n super(AbstractButton, self).bind()\n attrs = ('text', 'checkable', 'checked', 'icon_size', 'icon_source')\n self.publish_attributes(*attrs)",
"def controls_setup(self):\n\n raise NotImplemented(\"Override this function by adding elements\")",
"def controls_setup(self):\n\n self.email = element.TextBox(self, dom_id='mailing-list-email', alias='E-mail Textbox')\n self.close = element.Button(self, button_type='button', css_selector='.mailing-list-confirm .btn-close',\n alias='Close Button')\n self.signup = element.Button(self, css_selector='form.slide-left button[type=submit]', alias='Subscribe Button')",
"def containerBind(*args, allNames: bool=True, bindingSet: Union[AnyStr, bool]=\"\",\n bindingSetConditions: bool=True, bindingSetList: bool=True, force: bool=True,\n preview: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass",
"def bind(self, *args, **kwargs):\n self._canvas.bind(*args, **kwargs)",
"def BindEvents(self):\n self.bind(\"<Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 0, 0))\n self.bind(\"<Control-Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 1, 0))\n self.bind(\"<Shift-Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 1, 1))\n self.bind(\"<Control-Shift-Motion>\",\n lambda e, s=self: s.MouseMoveEvent(e, 0, 1))\n\n # Left Button\n self.bind(\"<ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-1>\",\n lambda e, s=self: s.LeftButtonPressEvent(e, 1, 1))\n self.bind(\"<ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 0, 0))\n self.bind(\"<Control-ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonRelease-1>\",\n lambda e, s=self: s.LeftButtonReleaseEvent(e, 1, 1))\n\n # Middle Button\n self.bind(\"<ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-2>\",\n lambda e, s=self: s.MiddleButtonPressEvent(e, 1, 1))\n self.bind(\"<ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 0, 0))\n self.bind(\"<Control-ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonRelease-2>\",\n lambda e, s=self: s.MiddleButtonReleaseEvent(e, 1, 1))\n\n # Right Button\n self.bind(\"<ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-3>\",\n lambda e, s=self: s.RightButtonPressEvent(e, 1, 1))\n self.bind(\"<ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 0, 0))\n self.bind(\"<Control-ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonRelease-3>\",\n lambda e, s=self: s.RightButtonReleaseEvent(e, 1, 1))\n\n if sys.platform == 'win32':\n self.bind(\"<MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 0, 0))\n self.bind(\"<Control-MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 1, 0))\n self.bind(\"<Shift-MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 0, 1))\n self.bind(\"<Control-Shift-MouseWheel>\",\n lambda e, s=self: s.MouseWheelEvent(e, 1, 1))\n else:\n # Mouse wheel forward event\n self.bind(\"<ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-4>\",\n lambda e, s=self: s.MouseWheelForwardEvent(e, 1, 1))\n\n # Mouse wheel backward event\n self.bind(\"<ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 0, 0))\n self.bind(\"<Control-ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 1, 0))\n self.bind(\"<Shift-ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 0, 1))\n self.bind(\"<Control-Shift-ButtonPress-5>\",\n lambda e, s=self: s.MouseWheelBackwardEvent(e, 1, 1))\n\n # Key related events\n self.bind(\"<KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 0, 0))\n self.bind(\"<Control-KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 1, 0))\n self.bind(\"<Shift-KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 0, 1))\n self.bind(\"<Control-Shift-KeyPress>\",\n lambda e, s=self: s.KeyPressEvent(e, 1, 1))\n\n self.bind(\"<KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 0, 0))\n self.bind(\"<Control-KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 1, 0))\n self.bind(\"<Shift-KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 0, 1))\n self.bind(\"<Control-Shift-KeyRelease>\",\n lambda e, s=self: s.KeyReleaseEvent(e, 1, 1))\n\n self.bind(\"<Enter>\",\n lambda e, s=self: s.EnterEvent(e, 0, 0))\n self.bind(\"<Control-Enter>\",\n lambda e, s=self: s.EnterEvent(e, 1, 0))\n self.bind(\"<Shift-Enter>\",\n lambda e, s=self: s.EnterEvent(e, 0, 1))\n self.bind(\"<Control-Shift-Enter>\",\n lambda e, s=self: s.EnterEvent(e, 1, 1))\n self.bind(\"<Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 0, 0))\n self.bind(\"<Control-Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 1, 0))\n self.bind(\"<Shift-Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 0, 1))\n self.bind(\"<Control-Shift-Leave>\",\n lambda e, s=self: s.LeaveEvent(e, 1, 1))\n\n self.bind(\"<Configure>\", self.ConfigureEvent)\n self.bind(\"<Expose>\",lambda e,s=self: s.ExposeEvent())",
"def controls_setup(self):\n\n self.subbie_name = element.Link(self, css_selector='th:nth-child(1) > a', alias=\"Admin Subbie Name Link\")\n self.type = element.Caption(self, css_selector='td:nth-child(2)', alias=\"Subbie Type\")\n self.username = element.Caption(self, css_selector='td:nth-child(3)', alias=\"Username\")\n self.email = element.Caption(self, css_selector='td:nth-child(4)', alias=\"Email\")\n self.active_start_date = element.Link(self, css_selector='td:nth-child(5)', alias=\"Active Start Date Text\")\n self.active_end_date = element.Link(self, css_selector='td:nth-child(6)', alias=\"Active End Date Text\")",
"def bind(self, tags=None):\n tags = tags or self.tags\n items = self.canvas.find_withtag(tags)\n for item in items:\n self.canvas.tag_bind(item, \"<Button-1>\", self.down)",
"def controls_setup(self):\n\n self.client_name = element.Link(self, css_selector='th:nth-child(1) > a', alias=\"Admin Client Name Link\")\n self.xero_customer = element.Caption(self, css_selector='td:nth-child(2)', alias=\"Xero Customer\")\n self.send_invoices = element.Image(self, css_selector='td:nth-child(3) img', alias=\"Send Invoices Check Mark\")\n self.part_a_required = element.Image(self, css_selector='td:nth-child(4) img',\n alias=\"Part A Required Check Mark\")\n self.they_supply_pump = element.Image(self, css_selector='td:nth-child(5) img',\n alias=\"They Supply Pump Check Mark\")\n self.active_start_date = element.Link(self, css_selector='td:nth-child(6)', alias=\"Active Start Date Text\")\n self.active_end_date = element.Link(self, css_selector='td:nth-child(6)', alias=\"Active End Date Text\")",
"def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/tvh.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# Label information\n image = pyxbmct.Image(addonfolder+artsfolder+'/users.png')\n self.placeControl(image, 8, 1, rowspan=1, columnspan=14)\n\t\t\n\t\t# Username input\n image = pyxbmct.Image(addonfolder+artsfolder+'/username.png')\n self.placeControl(image, 10, 1, rowspan=1, columnspan=3)\n self.username_input = pyxbmct.Edit('')\n self.placeControl(self.username_input, 10, 4, rowspan=1, columnspan=4)\n\n\t\t# Password input\n image = pyxbmct.Image(addonfolder+artsfolder+'/password.png')\n self.placeControl(image, 11, 1, rowspan=1, columnspan=3)\n self.password_input = pyxbmct.Edit('', isPassword=True)\n self.placeControl(self.password_input, 11, 4, rowspan=1, columnspan=4)\n\n\t\t# Next button\n self.next_button = pyxbmct.Button('Next')\n self.placeControl(self.next_button, 13, 14, rowspan=1, columnspan=1)\n # Connect close button\n self.connect(self.next_button, lambda: self.page())\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())",
"def bind_formset(formset):\n if formset.is_bound:\n # do nothing if the formset is already bound\n return formset\n \n bindData={}\n # the formset.get_default_prefix() and form.add_prefix() methods add in the \n # dict keys that uniquely identify the various form fields with the individual \n # instance data\n \n # add formset management form data\n bindData[formset.get_default_prefix()+\"-TOTAL_FORMS\"]=str(formset.management_form['TOTAL_FORMS'].value())\n bindData[formset.get_default_prefix()+\"-INITIAL_FORMS\"]=str(formset.management_form['INITIAL_FORMS'].value())\n bindData[formset.get_default_prefix()+\"-MIN_NUM_FORMS\"]=str(formset.management_form['MIN_NUM_FORMS'].value())\n bindData[formset.get_default_prefix()+\"-MAX_NUM_FORMS\"]=str(formset.management_form['MAX_NUM_FORMS'].value())\n for form in formset:\n if form.instance:\n # field data, get these values from the instance\n for fieldName,fieldValue in form.fields.iteritems():\n try:\n bindData[form.add_prefix(fieldName)]=getattr(form.instance,\n fieldName)\n except AttributeError:\n # this is an added field (i.e. DELETE), not derived from the\n # model, do nothing with it, since we are only binding instance\n # data to the form\n pass\n # hidden field data, get these from the field initial values set\n # when the form was created\n for field in form.hidden_fields():\n bindData[form.add_prefix(field.name)]=field.field.initial\n # create a new bound formset by passing in the bindData dict, this looks\n # to the formset constructor like a request.POST dict \n newFormset=formset.__class__(bindData,instance=formset.instance,\n error_class=formset.error_class)\n return newFormset",
"def _addWidgets(self):\n self.temperatureToConvertLabel = tkinter.Label(self,\n text='Temperature to Convert',\n height=4).grid(row=0, column=0,\n sticky=tkinter.W) # Add 'Temperature to Convert' Label\n self.temperatureBox = tkinter.Entry(self,\n textvariable=self.controller.temperatureToConvert,\n width=15).grid(row=0, column=1) # Add 'Temperature to Convert' Entry\n\n self.temperatureBoxLabel = tkinter.Label(self,\n textvariable=self.controller.temperatureBoxLabelVar).grid(row=0,\n column=2,\n sticky=tkinter.E) # Add 'Temperature to Convert' Units\n\n self.FtoCRadioButton = tkinter.Radiobutton(self,\n text=self.controller.FAHRENHEIT + ' to ' + self.controller.CELSIUS,\n variable=self.controller.conversionVar,\n command=self.controller.conversionDirectionChanged,\n value=self.controller.FtoC).grid(row=1, column=0,\n sticky=tkinter.W) # Add Fahrenheit to Celsius Conversion Radio Button\n\n self.CtoFRadioButton = tkinter.Radiobutton(self,\n text=self.controller.CELSIUS + ' to ' + self.controller.FAHRENHEIT,\n variable=self.controller.conversionVar,\n command=self.controller.conversionDirectionChanged,\n value=self.controller.CtoF).grid(row=2, column=0,\n sticky=tkinter.W) # Add Celsius to Fahrenheit Conversion Radio Button\n\n self.convertedTemperatureLabel = tkinter.Label(self,\n text='Converted Temperature',\n height=4).grid(row=3, column=0,\n sticky=tkinter.W) # Add 'Converted Temperature' Label\n self.convertedTemperatureBox = tkinter.Entry(self,\n textvariable=self.controller.convertedTemperature,\n width=15).grid(row=3,\n column=1) # Add 'Converted Temperature' Entry\n self.convertedTemperatureBoxLabel = tkinter.Label(self,\n textvariable=self.controller.convertedTemperatureBoxLabelVar).grid(\n row=3, column=2, sticky=tkinter.E) # Add 'Converted Temperature' Units\n\n self.convertButton = tkinter.Button(self,\n text='Convert',\n command=self.controller.convertPressed).grid(row=4, column=0,\n sticky=tkinter.E) # Add 'Convert'Button\n self.quitButton = tkinter.Button(self,\n text='Quit',\n command=self.controller.quit).grid(row=4, column=1,\n sticky=tkinter.E) # Add 'Quit'Button",
"def assignWidgets(self):\n self.buttonBox.accepted.connect(self.runAll)\n self.buttonBox.rejected.connect(self.exitSystem)\n self.FilesButton.clicked.connect(self.FileToRun)\n self.OutputButton.clicked.connect(self.OutPutLocation)",
"def controls_setup(self):\n\n self.login = element.Link(self, class_name='nav-login', alias='Navbar->Login Link')\n self.register = element.Link(self, class_name='nav-register', alias='Navbar->Register Link')\n self.logout = element.Link(self, class_name='nav-logout', alias='Navbar->Logout Link')\n self.be_a_merchant = element.Link(self, class_name='nav-partner-join',\n alias='Navbar->Be A Merchant Link')\n self.wishlist = element.Link(self, class_name='nav-wishlist', alias='Navbar->Wishlist Icon Button')\n self.cart = element.Link(self, class_name='nav-cart', alias='Navbar->Cart Icon Button')\n self.account = element.Link(self, css_selector='a.nav-account', alias='Navbar->Account Link')\n self.messages = element.Link(self, class_name='nav-messages', alias='Navbar->Messages Link')\n self.dashboard = element.Link(self, class_name='nav-dashboard', alias='Navbar->Dashboard Link')\n self.messages_count = element.Element(self, dom_id='postman_unread_count',\n alias='Navbar->Unread Messages Count Label')\n\n self.search_query = element.TextBox(self, name='q', alias='Navbar->Search Box')\n self.search_button = element.Button(self, css_selector='form.search button[type=submit]',\n alias='Navbar->Search Icon Button')"
]
| [
"0.6240202",
"0.6223124",
"0.6148494",
"0.6035581",
"0.59745187",
"0.586234",
"0.5814697",
"0.5779209",
"0.5718157",
"0.569714",
"0.5687127",
"0.56239295",
"0.5562846",
"0.55018246",
"0.5459139",
"0.54509723",
"0.5446135",
"0.5402932",
"0.5399839",
"0.537862",
"0.5360238",
"0.5335858",
"0.52325714",
"0.5232444",
"0.52232796",
"0.5213644",
"0.5209475",
"0.5207131",
"0.52065516",
"0.51839966"
]
| 0.66183096 | 0 |
Evaluate the population with respect to input data X and expected output. | def _evaluate(self, X, Y):
# evaluate all networks
#
# evaluations = torch.zeros(self.population_size, device=self.device)
evaluations = torch.zeros(self.population_size, device=self.device)
for i in range(self.population_size):
selected_pheno = self.population[i].cpu()
# if IDCT is to be used first transform the vector, then use it to assemble the network
if self.IDCT_from is not None:
selected_pheno = torch.tensor(
fftpack.idct(np.array(selected_pheno), n=self.model.total_parameters(), norm="ortho"))
fill_weights(self.model, selected_pheno.to(self.device))
# evaluate
predicted = self.model.forward(X)
evaluations[i] = self.loss_function(predicted, Y)
return evaluations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self, X):\n\n raise NotImplementedError(\"not implemented!\")",
"def evaluate(self, X):\n\n\t\tpass",
"def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)",
"def evaluate(population, context=None):\n for individual in population:\n individual.evaluate()\n\n return population, context",
"def evaluate(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n output = self.fct(X, *self.args)\n\n Y = output[0].reshape(X.shape[0], self.ydim)\n C = (\n output[1].reshape(X.shape[0], self.cdim)\n if self.cdim > 0\n else np.zeros((X.shape[0], self.cdim))\n )\n\n # Build penalty function\n Py = Optimizer.boundaryCheck(Y, self.ylb, self.yub)\n Px = Optimizer.boundaryCheck(X, self.xlb, self.xub)\n Pc = Optimizer.boundaryCheck(C, self.clb, self.cub)\n\n # Assemble all penalties\n P = Py + Px + Pc\n\n # callback:\n if self.callback is not None:\n self.callback(X, Y, C, self.currentIteration)\n\n # Return to optimizer\n return Y, C, P",
"def evaluate(self, X_test, X, batch_size=None):\n # TODO: test\n return self.get_repr(X_test, X, batch_size=batch_size)",
"def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()",
"def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)",
"def evaluate(env, X_data, y_data, batch_size=1):\n print('\\nEvaluating')\n n_sample = X_data.shape[0]\n n_batch = int((n_sample+batch_size-1) / batch_size)\n loss, acc = 0, 0\n for batch in range(n_batch):\n print(' batch {0}/{1}'.format(batch + 1, n_batch))\n print('\\r')\n start = batch * batch_size\n end = min(n_sample, start + batch_size)\n cnt = end - start\n batch_loss, batch_acc = env.sess.run(\n [env.loss, env.acc],\n feed_dict={env.x: X_data[start:end],\n env.y: y_data[start:end]})\n loss += batch_loss * cnt\n acc += batch_acc * cnt\n loss /= n_sample\n acc /= n_sample\n print(' loss: {0:.4f} acc: {1:.4f}'.format(loss, acc))\n return loss, acc",
"def evaluate(self, dataset):\n\t\tpass",
"def evaluate(self, X, y=None, batch_size=64):\n return self.clf.evaluate(X, y, batch_size)",
"def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)",
"def _evaluate_fitness(self, population: Population):\n for n, individual in enumerate(population.individuals):\n\n # Dataset extraction using individual features\n X_data = self._create_dataset(individual, self._X)\n\n # Get scores for each fitness strategy (each objective)\n scores = [fitness_func.eval_fitness(X=X_data, y=self._y, num_feats=len(population.features))\n for fitness_func in self.fitness]\n\n # If the number of features is an objective\n if self.optimize_features:\n scores.append(self.features_function(individual=individual,\n total_feats=len(self._population.features)))\n\n # Create a solution\n individual.fitness = Solution(scores)\n\n return population",
"def evaluate(self, x, y):\n x = np.asanyarray(x, dtype=float)\n y = np.asanyarray(y, dtype=float)\n parvals = self.parvals(x)\n return self._evaluate_y(y, parvals)",
"def evaluate(input_data, output_data):\n transition_scores = get_transitions_scores(input_data, output_data)\n print(transition_scores)\n return True, sum(transition_scores)",
"def evaluate(self, x, y, batch_size=None, **kwargs):\n if not batch_size:\n batch_size = self.batch_size\n return self.model.evaluate(x, y, batch_size, **kwargs)",
"def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)",
"def evaluate(self, X, Y, N):\n num_data = min(len(X),len(Y))\n samples = np.random.randint(num_data,size=N)\n results = [(self.predict(x), np.argmax(y)) for (x,y) in zip(X[samples],Y[samples])]\n return sum(int(x==y) for (x,y) in results)/N",
"def Evaluate(self, input_data: np.ndarray) -> np.ndarray:\n if input_data.shape[0] != self.input_layer_size:\n raise IndexError(f\"Input data length is {input_data.shape[0]}, must match length of input layer size {self.input_layer_size}\")\n\n # Evaulate hidden layer given input values\n hidden_layer_values = np.zeros(self.hidden_layer_size, dtype=np.float32)\n for hidden_node_index in range(self.hidden_layer_size):\n node_value = 0\n for input_node_index in range(self.input_layer_size):\n node_value += input_data[input_node_index] * self.input_to_hidden_weights[input_node_index, hidden_node_index]\n hidden_layer_values[hidden_node_index] = sigmoid(node_value + self.hidden_layer_biases[hidden_node_index])\n\n # Evaulate output layer given hidden layer values\n output_layer_values = np.zeros(self.output_layer_size, dtype=np.float32)\n for output_node_index in range(self.output_layer_size):\n node_value = 0\n for hidden_node_index in range(self.hidden_layer_size):\n node_value += hidden_layer_values[hidden_node_index] * self.hidden_to_output_weights[hidden_node_index, output_node_index]\n output_layer_values[output_node_index] = sigmoid(node_value + self.output_layer_biases[output_node_index])\n\n return output_layer_values",
"def _evaluate(self, x):\n out_bot, out_top = self.out_of_bounds(x)\n\n return self._eval_helper(x, out_bot, out_top)",
"def c_test_eval_inp(self, population, run_locals):\r\n return 1",
"def evaluate(self, X_test, Y_test):\n \n test_data = zip(X_test, Y_test)\n test_results = [(np.argmax(self.feedforward(x)), y)\n for (x, y) in test_data]\n \n# Updated for the testing\n# ========================\n return (sum(int(x == y) for (x, y) in test_results) / 100)",
"def evaluate(self, X1, X2):\r\n raise NotImplementedError()",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def evaluate(self):\n # initialize delta_weights\n Loss = 0\n for i, x_test in enumerate(self.X_test):\n Loss += (self.sigmoid(np.dot(self.weights,x_test))-self.y_test[i])**2\n return Loss",
"def _evaluate(self, x):\n raise NotImplementedError()",
"def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy",
"def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score",
"def evaluate(self, test_data, test_labels):\n raise NotImplementedError"
]
| [
"0.7122433",
"0.68909365",
"0.65685827",
"0.6454497",
"0.6450034",
"0.64358103",
"0.63450545",
"0.63008964",
"0.62551874",
"0.6255135",
"0.6235048",
"0.62307996",
"0.61885816",
"0.61829305",
"0.61514705",
"0.6123133",
"0.61000067",
"0.6092697",
"0.6065284",
"0.6055261",
"0.60491556",
"0.6023823",
"0.60155517",
"0.59766525",
"0.59766525",
"0.59728354",
"0.59497845",
"0.5947725",
"0.59444875",
"0.5943281"
]
| 0.71402115 | 0 |
given a list of evaluation scores, selects best_rate percentage of individuals by applying one of selection methods passed as parameter | def _selection(self, evaluations, selection, method="truncated", best_rate=0.2):
if selection:
end_range_for_parents = max(1, int(self.population_size * best_rate))
evaluations_sorted = torch.sort(evaluations)
population_sorted = self.population[evaluations_sorted[1]]
if self.best_individual is None:
self.best_individual = population_sorted[0]
self.best_eval = evaluations_sorted[0][0]
elif self.best_eval > evaluations_sorted[0][0]:
self.best_individual = population_sorted[0]
self.best_eval = evaluations_sorted[0][0]
best_population = torch.zeros([end_range_for_parents, len(self.population[0])], device=self.device)
if method == "truncated":
"""
returns best individuals
"""
best_population = population_sorted[:end_range_for_parents]
elif method == "fitness_based":
"""
probability of each individual to be selected is proportional to its fitness value
"""
tot = sum(evaluations)
probabilities = evaluations / tot
for i in range(end_range_for_parents):
best_idx = torch.distributions.categorical.Categorical(
probabilities.clone().detach()).sample()
best_population[i] = self.population[best_idx]
# avoid repetitions
probabilities[best_idx] = 0
elif method == "rank_based":
"""
probability of each individual to be selected is proportional to its rank value
"""
tot = ((1 + len(evaluations)) / 2) * len(evaluations)
ranks = torch.linspace(1, len(evaluations), steps=len(evaluations), device=self.device)
sorted_probabilities = 1 - ranks / tot
for i in range(end_range_for_parents):
best_idx = torch.distributions.categorical.Categorical(
sorted_probabilities).sample()
best_population[i] = population_sorted[best_idx]
# avoid repetitions
sorted_probabilities[best_idx] = 0
if self.elitism:
best_population[end_range_for_parents - 1] = self.best_individual
else:
best_population = self.population
return best_population | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\treturn percentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask",
"def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector",
"def evaluation(self):\n rows_list = []\n for name in self.single_classifier_best.keys():\n row = {}\n row['algorithm'] = name \n row[self.scoring_metric] = self.single_classifier_best[name].best_score_\n rows_list.append(row)\n \n scoring_df = pd.DataFrame(rows_list)\n scoring_sorted = scoring_df.sort_values(self.scoring_metric, ascending=False)\n print()\n print('*'*shutil.get_terminal_size().columns)\n print(scoring_sorted)\n print('*'*shutil.get_terminal_size().columns)\n self.evaluation_scores = scoring_sorted",
"def score(priority_list, totalItemCount, itemUsageDict, threshold):\n scored = list()\n for item in priority_list:\n scored.append((item, itemUsageDict[item][\"winRatio\"] * (itemUsageDict[item][\"totalCount\"]/ totalItemCount) * threshold))\n return scored",
"def _get_best_ratios(self, context, sensitivities, target_ratio):\n _logger.info('_get_best_ratios for pruning ratie: {}'.format(\n target_ratio))\n\n def func(params, x):\n a, b, c, d = params\n return a * x * x * x + b * x * x + c * x + d\n\n def error(params, x, y):\n return func(params, x) - y\n\n def slove_coefficient(x, y):\n init_coefficient = [10, 10, 10, 10]\n coefficient, loss = leastsq(error, init_coefficient, args=(x, y))\n return coefficient\n\n min_loss = 0.\n max_loss = 0.\n\n # step 1: fit curve by sensitivities\n coefficients = {}\n for param in sensitivities:\n losses = np.array([0] * 5 + sensitivities[param]['loss'])\n precents = np.array([0] * 5 + sensitivities[param][\n 'pruned_percent'])\n coefficients[param] = slove_coefficient(precents, losses)\n loss = np.max(losses)\n max_loss = np.max([max_loss, loss])\n\n # step 2: Find a group of ratios by binary searching.\n flops = context.eval_graph.flops()\n model_size = context.eval_graph.numel_params()\n ratios = []\n while min_loss < max_loss:\n loss = (max_loss + min_loss) / 2\n _logger.info(\n '-----------Try pruned ratios while acc loss={:.4f}-----------'.\n format(loss))\n ratios = []\n # step 2.1: Get ratios according to current loss\n for param in sensitivities:\n coefficient = copy.deepcopy(coefficients[param])\n coefficient[-1] = coefficient[-1] - loss\n roots = np.roots(coefficient)\n for root in roots:\n min_root = 1\n if np.isreal(root) and root > 0 and root < 1:\n selected_root = min(root.real, min_root)\n ratios.append(selected_root)\n _logger.info('Pruned ratios={}'.format(\n [round(ratio, 3) for ratio in ratios]))\n # step 2.2: Pruning by current ratios\n param_shape_backup = {}\n self._prune_parameters(\n context.eval_graph,\n context.scope,\n sensitivities.keys(),\n ratios,\n context.place,\n only_graph=True,\n param_shape_backup=param_shape_backup)\n\n pruned_flops = 1 - (float(context.eval_graph.flops()) / flops)\n pruned_size = 1 - (float(context.eval_graph.numel_params()) /\n model_size)\n _logger.info('Pruned flops: {:.4f}'.format(pruned_flops))\n _logger.info('Pruned model size: {:.4f}'.format(pruned_size))\n for param in param_shape_backup.keys():\n context.eval_graph.var(param).set_shape(param_shape_backup[\n param])\n\n # step 2.3: Check whether current ratios is enough\n if abs(pruned_flops - target_ratio) < 0.015:\n break\n if pruned_flops > target_ratio:\n max_loss = loss\n else:\n min_loss = loss\n return sensitivities.keys(), ratios",
"def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask",
"def selection(population, rate):\n\n\tmating_pool = []\n\tscores = []\n\n\tdef fitness(timetable_info):\n\t\t\"\"\" Calculates the fitness of an individual \"\"\"\n\n\t\treturn calc_score(timetable_info[0][0],\n\t\t\t\t\t\t timetable_info[0][1],\n\t\t\t\t\t\t timetable_info[0][2])\n\n\t# choose the fittest individuals\n\tpopulation = sorted(population, key=fitness, reverse=True)\n\n\t# set max and range\n\trate = int(rate * 100)\n\n\tfor i in range(rate):\n\n\t\t# fittest schedules have highest probabilities\n\t\tscores.append(calc_score(population[i][0][0], population[i][0][1], population[i][0][2]))\n\t\tmating_pool.append(population[i])\n\n\treturn mating_pool",
"def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction",
"def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask",
"def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator",
"def bestOf(predictorList):\n assert predictorList != [], \"Predictor list is empty!\"\n bestList = []\n bestRate = -1.0\n for p in predictorList:\n if p.successRate > bestRate:\n bestList = [p]\n bestRate = p.successRate\n elif p.successRate == bestRate:\n bestList.append(p)\n return bestList",
"def personal_best(scores):\n return max(scores)",
"def _get_best(self, populations, func):\n best = None\n for population in populations:\n for item in population:\n if not best:\n best = item\n elif func.fit(*item) > func.fit(*best):\n best = item\n return best",
"def multi_score(self, r, s, f):\n return [None] if r is None else [0.0] if s is None else [f(x, y)\n for y in self.ensure_list(s)\n for x in self.ensure_list(r)]",
"def fitness_proportional(population, scores, next_gen_number, random_seed=42):\n\n np.random.seed(random_seed)\n\n score_array = np.array(scores)\n score_array = -score_array + abs(np.max(score_array))\n\n probabilities = score_array / np.sum(score_array)\n\n indices = list(range(len(population)))\n indices_array = np.array(indices)\n\n selected_indices = np.random.choice(\n indices_array, size=next_gen_number, p=probabilities\n )\n\n selected = []\n for indx in selected_indices:\n selected.append(population[indx])\n\n return selected",
"def evaluate(clf, dataset, feature_list, features, labels, num_iter, params):\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42)\n\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n print clf\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n print '*****************************'\n print clf.best_estimator_\n print clf.best_params_\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)",
"def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances",
"def tune(self, load_fn, split_fn, train_fn, eval_fn, ratio, **options):\n print('Tuning', train_fn.__name__)\n self._print_params()\n\n ratings = load_fn(self.input_path)\n training, testing = split_fn(ratings, ratio)\n\n results = []\n for param_values in itertools.product(*self.params.values()):\n # equivalent to nested for-loops for all parameter ranges\n selected_params = dict(zip(self.params.keys(), param_values))\n print_dict(selected_params, prefix='Running with: ')\n model = train_fn(training, **{**selected_params, **options})\n rmse = eval_fn(model, testing)\n print('RMSE:', rmse)\n results.append((selected_params, rmse))\n\n best_params, best_rmse = min(results, key=lambda t: t[1])\n print('RMSE:', best_rmse)\n print('Results:')\n for param, val in best_params.items():\n print(' - {param}: {val}'.format(param=param, val=val))",
"def personal_best(scores: list) -> int:\n return max(scores)",
"def feature_selection(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\t# percentile selector\n\tpercentile_selector, percentile_score, percentile_train_features_selected, percentile_test_features_selected, percentile_mask = best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# model based selector\n\tmodel_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask = best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\t# iterative based selector\n\titerative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask = best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor)\n\n\tall_scores = []\n\n\tregressor.fit(train_features, train_similarity_target)\n\tprint(\"The score on all features: %.3f\" % regressor.score(test_features, test_similarity_target))\n\tall_scores.append(regressor.score(test_features, test_similarity_target))\n\n\t# show results for the percentile selector\n\tall_scores.append(percentile_score)\n\n\t# show results for the model based selector\n\tall_scores.append(model_based_score)\n\n\t# show results for the iterative based selector\n\tall_scores.append(iterative_based_score)\n\n\tmax_value_position = all_scores.index(max(all_scores))\n\n\tif max_value_position == 0:\n\t\tprint(\"Returning all features!\\n\")\n\t\treturn train_features, test_features\n\telif max_value_position == 1:\n\t\tpercentile_mask = build_mask(percentile_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'percentile_mask.txt')\n\t\tdebug_data(percentile_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the percentile selector!\\n\")\n\t\treturn percentile_selector, percentile_train_features_selected, percentile_test_features_selected\n\telif max_value_position == 2:\n\t\tmodel_based_mask = build_mask(model_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'model_based_mask.txt')\n\t\tdebug_data(model_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the model based selector!\\n\")\n\t\treturn model_based_selector, model_based_train_features_selected, model_based_test_features_selected\n\telse:\n\t\titerative_based_mask = build_mask(iterative_based_mask, used_features)\n\t\tmask_save_path = os.path.join('feature_selection_masks', 'iterative_based_mask.txt')\n\t\tdebug_data(iterative_based_mask, mask_save_path)\n\n\t\tprint(\"Returning features selected with the iterative based selector!\\n\")\n\t\treturn iterative_based_selector, iterative_based_train_features_selected, iterative_based_test_features_selected",
"def choose_bestnext(self, round):\n board_percentage = []\n \n for i in self.possible_coords:\n iSq = round.getSq(i[0], i[1])\n \n if round.pr_hook(iSq) == ' X ':\n sq_percentage = []\n surroundings = iSq.point_neighbors()\n \n for j in surroundings:\n jSq = round.getSq(j[0], j[1])\n\n if round.as_int(jSq) != None:\n count_X = 0\n count_F = 0\n check = jSq.point_neighbors()\n\n for k in check:\n kSq = round.getSq(k[0], k[1])\n if round.pr_hook(kSq) == ' X ':\n count_X += 1\n elif round.pr_hook(kSq) == ' f ':\n count_F += 1 \n if count_X != 0:\n sq_percentage.append((jSq.mine_neighbors() - count_F)/ count_X)\n\n avg_percent = 0\n if len(sq_percentage) == 0:\n avg_percent = 0.8\n elif sq_percentage.count(1) != 0:\n avg_percent = 1\n round.flagSq(i[0], i[1])\n else:\n sum_so_far = 0\n for p in sq_percentage:\n sum_so_far += p\n avg_percent = sum_so_far / len(sq_percentage)\n \n board_percentage.append(avg_percent)\n\n else:\n board_percentage.append(100)\n\n sorted_percentages = board_percentage.copy()\n sorted_percentages.sort()\n\n best_choice = board_percentage.index(sorted_percentages[0])\n\n return self.possible_coords[best_choice]",
"def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score",
"def compute(self, idx, input_scores, input_names):\n title = self._legends[idx] if self._legends is not None else None\n headers = [\"\" or title, \"Dev. %s\" % input_names[0]]\n if self._eval and input_scores[1] is not None:\n headers.append(\"eval % s\" % input_names[1])\n if self._criterion == \"rr\":\n rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])\n dev_rr = \"%.1f%%\" % (100 * rr)\n raws = [[\"RR\", dev_rr]]\n if self._eval and input_scores[1] is not None:\n rr = bob.measure.recognition_rate(\n input_scores[1], self._thres[idx]\n )\n eval_rr = \"%.1f%%\" % (100 * rr)\n raws[0].append(eval_rr)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n elif self._criterion == \"mindcf\":\n if \"cost\" in self._ctx.meta:\n cost = self._ctx.meta.get(\"cost\", 0.99)\n threshold = (\n bob.measure.min_weighted_error_rate_threshold(\n input_scores[0][0], input_scores[0][1], cost\n )\n if self._thres is None\n else self._thres[idx]\n )\n if self._thres is None:\n click.echo(\n \"[minDCF - Cost:%f] Threshold on Development set `%s`: %e\"\n % (cost, input_names[0], threshold),\n file=self.log_file,\n )\n else:\n click.echo(\n \"[minDCF] User defined Threshold: %e\" % threshold,\n file=self.log_file,\n )\n # apply threshold to development set\n far, frr = bob.measure.farfrr(\n input_scores[0][0], input_scores[0][1], threshold\n )\n dev_far_str = \"%.1f%%\" % (100 * far)\n dev_frr_str = \"%.1f%%\" % (100 * frr)\n dev_mindcf_str = \"%.1f%%\" % (\n (cost * far + (1 - cost) * frr) * 100.0\n )\n raws = [\n [\"FAR\", dev_far_str],\n [\"FRR\", dev_frr_str],\n [\"minDCF\", dev_mindcf_str],\n ]\n if self._eval and input_scores[1] is not None:\n # apply threshold to development set\n far, frr = bob.measure.farfrr(\n input_scores[1][0], input_scores[1][1], threshold\n )\n eval_far_str = \"%.1f%%\" % (100 * far)\n eval_frr_str = \"%.1f%%\" % (100 * frr)\n eval_mindcf_str = \"%.1f%%\" % (\n (cost * far + (1 - cost) * frr) * 100.0\n )\n raws[0].append(eval_far_str)\n raws[1].append(eval_frr_str)\n raws[2].append(eval_mindcf_str)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n elif self._criterion == \"cllr\":\n cllr = bob.measure.calibration.cllr(\n input_scores[0][0], input_scores[0][1]\n )\n min_cllr = bob.measure.calibration.min_cllr(\n input_scores[0][0], input_scores[0][1]\n )\n dev_cllr_str = \"%.1f%%\" % cllr\n dev_min_cllr_str = \"%.1f%%\" % min_cllr\n raws = [[\"Cllr\", dev_cllr_str], [\"minCllr\", dev_min_cllr_str]]\n if self._eval and input_scores[1] is not None:\n cllr = bob.measure.calibration.cllr(\n input_scores[1][0], input_scores[1][1]\n )\n min_cllr = bob.measure.calibration.min_cllr(\n input_scores[1][0], input_scores[1][1]\n )\n eval_cllr_str = \"%.1f%%\" % cllr\n eval_min_cllr_str = \"%.1f%%\" % min_cllr\n raws[0].append(eval_cllr_str)\n raws[1].append(eval_min_cllr_str)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n else:\n title = self._legends[idx] if self._legends is not None else None\n all_metrics = self._get_all_metrics(idx, input_scores, input_names)\n headers = [\" \" or title, \"Development\"]\n rows = [\n [self.names[0], all_metrics[0][0]],\n [self.names[1], all_metrics[0][1]],\n [self.names[2], all_metrics[0][2]],\n [self.names[3], all_metrics[0][3]],\n [self.names[4], all_metrics[0][4]],\n [self.names[5], all_metrics[0][5]],\n ]\n\n if self._eval:\n # computes statistics for the eval set based on the threshold a\n # priori\n headers.append(\"Evaluation\")\n rows[0].append(all_metrics[1][0])\n rows[1].append(all_metrics[1][1])\n rows[2].append(all_metrics[1][2])\n rows[3].append(all_metrics[1][3])\n rows[4].append(all_metrics[1][4])\n rows[5].append(all_metrics[1][5])\n\n click.echo(\n tabulate(rows, headers, self._tablefmt), file=self.log_file\n )",
"def fitness_proportionate_selection(random, population, args):\r\n num_selected = args.setdefault('num_selected', 1)\r\n len_pop = len(population)\r\n psum = [i for i in range(len_pop)]\r\n pop_max_fit = (max(population)).fitness\r\n pop_min_fit = (min(population)).fitness\r\n \r\n # If we're actually doing minimimization,\r\n # fitness proportionate selection is not defined.\r\n if pop_max_fit < pop_min_fit:\r\n raise ValueError('Fitness proportionate selection is not valid for minimization.')\r\n \r\n # Set up the roulette wheel\r\n if pop_max_fit == pop_min_fit:\r\n psum = [(index + 1) / float(len_pop) for index in range(len_pop)]\r\n elif (pop_max_fit > 0 and pop_min_fit >= 0) or (pop_max_fit <= 0 and pop_min_fit < 0):\r\n population.sort(reverse=True)\r\n psum[0] = population[0].fitness\r\n for i in range(1, len_pop):\r\n psum[i] = population[i].fitness + psum[i-1]\r\n for i in range(len_pop):\r\n psum[i] /= float(psum[len_pop-1])\r\n \r\n # Select the individuals\r\n selected = []\r\n for _ in range(num_selected):\r\n cutoff = random.random()\r\n lower = 0\r\n upper = len_pop - 1\r\n while(upper >= lower):\r\n mid = (lower + upper) // 2\r\n if psum[mid] > cutoff: \r\n upper = mid - 1\r\n else: \r\n lower = mid + 1\r\n lower = max(0, min(len_pop-1, lower))\r\n selected.append(population[lower])\r\n return selected",
"def evaluator(self, candidates, args):\r\n fitness = []\r\n if self._use_ants:\r\n for candidate in candidates:\r\n total = 0\r\n for c in candidate:\r\n total += self.weights[c.element[0]][c.element[1]]\r\n last = (candidate[-1].element[1], candidate[0].element[0])\r\n total += self.weights[last[0]][last[1]]\r\n fitness.append(1 / total)\r\n else:\r\n for candidate in candidates:\r\n total = 0\r\n for src, dst in zip(candidate, candidate[1:] + [candidate[0]]):\r\n total += self.weights[src][dst]\r\n fitness.append(1 / total)\r\n return fitness",
"def _resolve_objective_function(self) -> Scorer:\n\n objective = self.cfg_.objective\n if objective == 'accuracy':\n return make_scorer(ex.accuracy_score_round_inputs)\n if objective.startswith('precision'):\n if objective.endswith('macro'):\n return make_scorer(ex.precision_score_round_inputs,\n average='macro')\n elif objective.endswith('weighted'):\n return make_scorer(ex.precision_score_round_inputs,\n average='weighted')\n if objective.startswith('f1'):\n if objective.endswith('macro'):\n return make_scorer(ex.f1_score_round_inputs,\n average='macro')\n elif objective.endswith('weighted'):\n return make_scorer(ex.f1_score_round_inputs,\n average='weighted')\n elif objective.endswith('least_frequent'):\n return make_scorer(ex.f1_score_least_frequent_round_inputs)\n if objective == 'pearson_r':\n return make_scorer(pearson)\n if objective == 'spearman':\n return make_scorer(spearman)\n if objective == 'kendall_tau':\n return make_scorer(kendall_tau)\n if objective.startswith('uwk'):\n if objective == 'uwk':\n return make_scorer(ex.kappa_round_inputs)\n return make_scorer(ex.kappa_round_inputs,\n allow_off_by_one=True)\n if objective.startswith('lwk'):\n if objective == 'lwk':\n return make_scorer(ex.kappa_round_inputs,\n weights='linear')\n return make_scorer(ex.kappa_round_inputs,\n weights='linear',\n allow_off_by_one=True)\n if objective.startswith('qwk'):\n if objective == 'qwk':\n return make_scorer(ex.kappa_round_inputs,\n weights='quadratic')\n return make_scorer(ex.kappa_round_inputs,\n weights='quadratic',\n allow_off_by_one=True)\n return objective",
"def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100",
"def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)",
"def get_score(submissions, problem, due_date, options):\r\n best_score = None\r\n best_percent = 0\r\n best_ts = None\r\n bonus = None\r\n partners = set()\r\n for i, s in enumerate(submissions):\r\n timestamp = s['date']\r\n submission_num = s['submission_num']\r\n detail = s['detail']\r\n if 'correct_map' in detail and not options.is_clicker:\r\n try:\r\n autograder_out = detail['correct_map']['i4x-BerkeleyX-CS188x-17-problem-' + problem + '_2_1']['msg']\r\n except Exception, e:\r\n logging.error(\"Error getting output: %s. Submission:\\n%s\" % (e, s))\r\n continue\r\n\r\n autograder_out = unescape(autograder_out)\r\n if options.output:\r\n print(\"Autograder output for submission %s on %s:\\n%s\" % (submission_num, timestamp, autograder_out))\r\n\r\n get_bonus(autograder_out)\r\n extra = bonus_pattern.findall(autograder_out)\r\n if extra:\r\n bonus = extra[0]\r\n num, denom = s['score'].split(' / ')\r\n percent = float(num) / float(denom) if 'None' not in num else 0\r\n if percent > best_percent:\r\n best_score = s['score']\r\n best_percent = percent\r\n best_ts = timestamp\r\n if 'student_answers' in detail and not options.is_clicker:\r\n partner = detail['student_answers'].get('i4x-BerkeleyX-CS188x-17-problem-' + problem + '_3_1', '')\r\n if partner:\r\n partners.add(partner)\r\n logging.info(\"Partners: %s\" % (partners))\r\n\r\n if best_ts is not None and due_date is not None:\r\n submitted = dt.datetime.strptime(best_ts.split('+')[0], '%Y-%m-%d %H:%M:%S')\r\n slip_days_used = str((submitted - due_date).days + 1)\r\n else:\r\n slip_days_used = 'N/A'\r\n return best_score, bonus, partners, slip_days_used",
"def findBestScore():\n resultList = []\n BestScore = 0\n # iterate through different max_depths from 1 to 19\n for max_depth in range(1,20):\n dtree = tree.DecisionTreeClassifier(max_depth=max_depth)\n trainng_score = []\n testing_score = []\n # run 10 different cross-validation\n for index in range(10):\n # split into cross-validation sets.\n cv_data_train, cv_data_test, cv_target_train, cv_target_test = \\\n cross_validation.train_test_split(X_train, y_train, test_size=0.1)\n # fit the model using the cross-validation data\n # and tune parameter, such as max_depth here\n dtree = dtree.fit(cv_data_train, cv_target_train)\n dtree.feature_importances_\n trainng_score += [dtree.score(cv_data_train,cv_target_train)]\n testing_score += [dtree.score(cv_data_test,cv_target_test)]\n\n # Compute the average score for both traning and testing data\n trainng_avgScore = 1.0 * sum(trainng_score)/len(trainng_score)\n testing_avgScore = 1.0 * sum(testing_score)/len(testing_score)\n\n # find the best score\n if testing_avgScore > BestScore:\n BestScore = testing_avgScore\n best_depth = max_depth\n resultList += [[best_depth, trainng_avgScore, testing_avgScore]]\n print ('The best average score and the corresponding max_depth is: ')\n return BestScore, best_depth"
]
| [
"0.626115",
"0.6206762",
"0.6100408",
"0.6076012",
"0.6060828",
"0.60530967",
"0.6026658",
"0.59901446",
"0.5987794",
"0.5902537",
"0.5900069",
"0.5790178",
"0.5772556",
"0.5743284",
"0.5731502",
"0.5718924",
"0.5696296",
"0.56870556",
"0.5678375",
"0.56745076",
"0.56602734",
"0.5653601",
"0.56291986",
"0.56174713",
"0.5589805",
"0.55843943",
"0.5582366",
"0.5582315",
"0.5577215",
"0.5553266"
]
| 0.7054224 | 0 |
given best population applies crossover with given probability, method and parents number | def _crossover(self, best_population, crossover, n_parents=2, method="uniform_swap"):
if crossover:
# randomly select parents
parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),
device=self.device)
new_population = torch.zeros(self.population.shape, device=self.device)
i = 0
for p_idx in parents_indexes:
new_population[i] = self._produce_child(best_population[p_idx], method=method)
i += 1
else:
# randomly repeat best individuals
new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)
new_population = best_population[new_pop_indexes]
return new_population | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def selection(self,parents,popSize):\n for i in range(popSize):\n idx1 = np.random.randint(0,popSize)\n idx2 = np.random.randint(0,popSize)\n if parents.individuals[idx1].violationSum < parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx1]\n elif parents.individuals[idx1].violationSum > parents.individuals[idx2].violationSum:\n self.individuals[i] = parents.individuals[idx2]\n elif parents.individuals[idx1].objectiveFunction[0] < parents.individuals[idx2].objectiveFunction[0]:\n self.individuals[i] = parents.individuals[idx1]\n else:\n self.individuals[i] = parents.individuals[idx2]\n \"\"\"\n print(\"Offsprings(self) Impresso dentro de selection (FIM).\")\n self.printPopulation(popSize)\n print(\"Parents Impresso dentro de selection (FIM).\")\n parents.printPopulation(popSize)\n \"\"\"",
"def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def run(self):\n population_p = self.create_population()\n population_p = self.sort_population(population_p)\n best_x = population_p[0]\n for k in range(self.iteration):\n population_r = []\n # random.shuffle(population_p)\n for i in range(0, self.population_length, 2):\n mother = 0\n father = 1\n children = [self.random_chromosome(), self.random_chromosome()]\n while (mother == father) or (children[0] in population_p) or (children[1] in\n population_p):\n mother = random.randint(0, self.population_length - 1)\n father = random.randint(0, self.population_length - 1)\n children = self.cross(population_p[mother], population_p[father])\n children[0] = self.mutate(children[0])\n children[1] = self.mutate(children[1])\n\n population_r.append(children[0])\n population_r.append(children[1])\n\n population_p = self.new_population(population_p, population_r)\n if self.fitness(population_p[0]) < self.fitness(best_x):\n best_x = population_p[0]\n\n # print(population_p)\n return best_x",
"def Genetic_Algorithm(Population, Lambda, l, data):\n if Population.Population_size == 1: # Used in case of different population sizes\n picked_individuals = np.random.permutation(Population.Population_size)[:4].tolist()*4\n else:\n # Selecting 4 different individuals from the population\n picked_individuals = np.random.permutation(Population.Population_size)[:4].tolist()\n\n # Initializing child of the selected individuals\n child_assets = []\n child_proportions = []\n child_weights = np.zeros(N)\n l = 0\n\n #Pool_1\n pair_1_assets = [Population.population_assets[picked_individuals[0]], Population.population_assets[picked_individuals[1]]]\n pair_1_fitness = [Population.fitness[picked_individuals[0]], Population.fitness[picked_individuals[1]]]\n pair_1_proportions = [Population.population_proportions[picked_individuals[0]], Population.population_proportions[picked_individuals[1]]]\n\n # Pool_2\n pair_2_assets = [Population.population_assets[picked_individuals[2]], Population.population_assets[picked_individuals[3]]]\n pair_2_fitness = [Population.fitness[picked_individuals[2]], Population.fitness[picked_individuals[3]]]\n pair_2_proportions = [Population.population_proportions[picked_individuals[2]], Population.population_proportions[picked_individuals[3]]]\n\n # Selecting parents for the uniform crossover\n parent_1_assets = pair_1_assets[pair_1_fitness.index(min(pair_1_fitness))]\n parent_1_proportions = pair_1_proportions[pair_1_fitness.index(min(pair_1_fitness))]\n\n parent_2_assets = pair_2_assets[pair_2_fitness.index(min(pair_2_fitness))]\n parent_2_proportions = pair_2_proportions[pair_2_fitness.index(min(pair_2_fitness))]\n\n # Looking for same assets in parents and inputting them into child\n common_assets = []\n for i in parent_1_assets:\n if i in parent_2_assets:\n common_assets.append(i)\n child_assets += common_assets\n\n # Finding out what are the indexes of those assets in parents\n indexes_1 = []\n indexes_2 = []\n for i in common_assets:\n indexes_1.append(parent_1_assets.index(i))\n indexes_2.append(parent_2_assets.index(i))\n\n # Adding the proportions of same assets to child with 50% chance\n for m, h in zip(indexes_1, indexes_2):\n rand_1 = np.random.rand()\n if rand_1 > 0.5:\n child_proportions.append(parent_1_proportions[m])\n else:\n child_proportions.append(parent_2_proportions[h])\n\n # Creating new lists with assets that each parent don't have in common\n temp_parent_1_assets = []\n temp_parent_2_assets = []\n for m, h in zip(parent_1_assets, parent_2_assets):\n temp_parent_1_assets.append(m)\n temp_parent_2_assets.append(h)\n\n for i in common_assets:\n if i in temp_parent_1_assets:\n temp_parent_1_assets.remove(i)\n\n for i in common_assets:\n if i in temp_parent_2_assets:\n temp_parent_2_assets.remove(i)\n\n # Adding other assets and their corresponding proportions to the child\n for m, h in zip(temp_parent_1_assets, temp_parent_2_assets):\n rand_2 = np.random.rand()\n if rand_2 > 0.5:\n child_assets.append(m)\n child_proportions.append(parent_1_proportions[parent_1_assets.index(m)])\n else:\n child_assets.append(h)\n child_proportions.append(parent_2_proportions[parent_2_assets.index(h)])\n\n # Creating A*\n # A* is a set of assets that are in the parents, but are not in the child (together with their associated values)\n parent_minus_child_assets = []\n parent_minus_child_proportions = []\n for m, h in zip(parent_1_assets, parent_2_assets):\n if m not in child_assets:\n parent_minus_child_assets.append(m)\n parent_minus_child_proportions.append(parent_1_proportions[parent_1_assets.index(m)])\n if h not in child_assets:\n parent_minus_child_assets.append(h)\n parent_minus_child_proportions.append(parent_2_proportions[parent_2_assets.index(h)])\n\n # Assets that can be potentially added to the child in case parent_minus_child assets (A*) are empty\n other_assets = np.random.permutation(N).tolist()\n for i in other_assets:\n if i in child_assets:\n other_assets.remove(i)\n\n # Mutation\n mutated_asset = np.random.choice(child_proportions)\n rand_3 = np.random.rand()\n if rand_3 > 0.5:\n child_proportions[child_proportions.index(mutated_asset)] = (0.9 * (data.epsilon + mutated_asset) - data.epsilon) # m=1\n else:\n child_proportions[child_proportions.index(mutated_asset)] = (1.1 * (data.epsilon + mutated_asset) - data.epsilon) # m=2\n mutated_child_proportions = child_proportions\n\n # Making sure the child does not have two identical assets\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n # Making sure all child proportion are between 0 and 1 (if not they get excluded)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n\n # Ensure that child has exactly 10 assets and proportions\n while len(child_assets) > data.K and len(mutated_child_proportions) > data.K:\n child_assets.remove(child_assets.index(min(mutated_child_proportions)))\n mutated_child_proportions.remove(min(mutated_child_proportions))\n\n # Add assets from A* to child\n while len(child_assets) < data.K and len(mutated_child_proportions) < data.K:\n if len(parent_minus_child_assets) != 0:\n rand_4 = np.random.choice(parent_minus_child_assets)\n child_assets.append(rand_4)\n mutated_child_proportions.append(parent_minus_child_proportions[parent_minus_child_assets.index(rand_4)])\n parent_minus_child_proportions.remove(parent_minus_child_proportions[parent_minus_child_assets.index(rand_4)])\n parent_minus_child_assets.remove(rand_4)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n else: #In case A* is empty\n rand_5=np.random.choice(other_assets)\n child_assets.append(rand_5)\n other_assets.remove(rand_5)\n mutated_child_proportions.append(0)\n for i in mutated_child_proportions:\n if i < 0 or i > 1:\n child_assets.remove(child_assets[mutated_child_proportions.index(i)])\n mutated_child_proportions.remove(i)\n for i in child_assets:\n if child_assets.count(i) > 1:\n mutated_child_proportions.remove(mutated_child_proportions[child_assets.index(i)])\n child_assets.remove(i)\n\n # Given large amount of iterations and randomness all child proportions could be 0 hence set 1 at random to 0.01\n # Does not influence the overall result as it ist immediately replaced by a stronger individual\n if sum(mutated_child_proportions) == 0:\n mutated_child_proportions[mutated_child_proportions.index(np.random.choice(mutated_child_proportions))]= 0.01\n\n # Evaluating child\n mutated_child_proportions = np.array(mutated_child_proportions)\n L = mutated_child_proportions.sum()\n w_temp = data.epsilon + mutated_child_proportions * data.F / L\n is_too_large = (w_temp > data.delta)\n while is_too_large.sum() > 0:\n is_not_too_large = np.logical_not(is_too_large)\n L = mutated_child_proportions[is_not_too_large].sum()\n F_temp = 1.0 - (data.epsilon * is_not_too_large.sum() + data.delta * is_too_large.sum())\n w_temp = data.epsilon + mutated_child_proportions * F_temp / L\n w_temp[is_too_large] = data.delta\n is_too_large = (w_temp > data.delta)\n\n # Assigning weights to child\n child_weights[:] = 0\n child_weights[child_assets] = w_temp\n mutated_child_proportions = w_temp - data.epsilon\n\n # Calculating child fitness\n obj1 = np.sum((child_weights * child_weights.reshape((child_weights.shape[0], 1))) * data.sigma)\n obj2 = np.sum(child_weights * data.mu)\n child_fitness = Lambda[l] * obj1 - (1 - Lambda[l]) * obj2\n\n # Checking whether child is valid\n Population.check_valid_solution(child_weights, mutated_child_proportions, child_assets, data)\n\n # Substituting child into the population and removing the weakest member\n index_worst_member = np.argmax(Population.fitness)\n Population.fitness[index_worst_member] = child_fitness\n Population.population_proportions[index_worst_member] = mutated_child_proportions\n Population.population_weights[index_worst_member] = child_weights\n Population.population_assets[index_worst_member] = child_assets\n Population.Obj1[index_worst_member] = obj1\n Population.Obj2[index_worst_member] = obj2\n\n # Finding the best member of the population\n index_best_member = np.argmin(Population.fitness)\n Population.best_fitness = Population.fitness[index_best_member]\n Population.best_proportions = Population.population_proportions[index_best_member]\n Population.best_weights = Population.population_weights[index_best_member]\n Population.best_assets = Population.population_assets[index_best_member]\n Population.best_covariance = Population.Obj1[index_best_member]\n Population.best_return = Population.Obj2[index_best_member]\n\n\n return Population.best_fitness, Population.best_proportions, Population.best_assets, Population.best_weights, Population.best_covariance, Population.best_return",
"def doCrossover(parentPop, parSize, rosterSize):\n\n firstPar = random.randint(0, parSize - 1)\n secondPar = random.randint(0, parSize - 1)\n while secondPar == firstPar:\n secondPar = random.randint(0, parSize - 1)\n\n crossOverPt = random.randint(1, rosterSize - 2) # random num between second and second-to-last entry\n\n # debugging code\n # for i in range(rosterSize):\n # parentPop[firstPar].roster[i] = 2*i\n # parentPop[secondPar].roster[i] = 2*i + 1\n\n # first parent mapping\n chromosome = [parentPop[firstPar].roster[i] for i in range(crossOverPt)]\n\n # second parent mapping\n remainingLoops = rosterSize - len(chromosome)\n for i in range(remainingLoops):\n chromosome.append(parentPop[secondPar].roster[crossOverPt + i])\n return chromosome",
"def cross(self):\n\n for i in range(self.pop_num): # Put in the first pop_num elements of the \"Parents and Sons\" array our entire input population.\n self.par_and_sons[i].A=self.population[i].A.copy()\n\n random.shuffle(self.population) # Shuffle population.\n\n tt=0 # The counter that is needed to implement a non-trivial crossing.\n for s in range(0,self.pop_num,2): # From 0 to pop_num with step 2. That is. here we take pop_num / 2 pairs of parents.\n self.mother.A=self.population[tt+int(self.pop_num/2)].A # Let the last pop_num / 2 individuals of our population be our mothers.\n self.father.A=self.population[tt].A # And let first pop_num / 2 individuals of our population be dads.\n \n tt=tt+1 \n ran=random.random()\n\n for n in range(self.length): # Crossover.\n if random.random()>0.5:\n self.son1.A[n] = self.father.A[n]\n self.son2.A[self.length-1-n] = self.father.A[n]\n self.son3.A[n] = self.mother.A[n]\n self.son4.A[self.length-1-n] = self.mother.A[n]\n else:\n self.son1.A[n] = self.mother.A[n]\n self.son2.A[self.length-1-n] = self.mother.A[n]\n self.son3.A[n] = self.father.A[n]\n self.son4.A[self.length-1-n] = self.father.A[n]\n\n self.par_and_sons[self.pop_num+2*s].A = self.son1.A.copy()\n self.par_and_sons[self.pop_num+2*s+1].A = self.son2.A.copy()\n self.par_and_sons[self.pop_num+2*s+2].A = self.son3.A.copy()\n self.par_and_sons[self.pop_num+2*s+3].A = self.son4.A.copy()",
"def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop",
"def evolve_generation(pop, probs, best_member, p_c, p_m):\n if best_member is None:\n new_pop = []\n else:\n new_pop = [best_member]\n while len(new_pop) < len(pop):\n NN1, NN2 = np.random.choice(pop, size=2, p=probs)\n new_pop.append(crossover(NN1, NN2, p_c, p_m))\n return new_pop",
"def _selection(self) -> None:\n # The size of the new population must be the same as the prev. one\n max_size_of_pop = self._pop_size\n\n # Copy 50% of best chromosomes to the next generation\n num_of_pop_to_next_gen = round(self._pop_size / 2)\n max_size_of_pop -= num_of_pop_to_next_gen\n self._population = self._population[0:num_of_pop_to_next_gen]\n\n # Mutate 25% of the prev. population and add to the next generation\n num_of_mutated_to_next_gen = round(max_size_of_pop / 2)\n max_size_of_pop -= num_of_mutated_to_next_gen\n for i in range(num_of_mutated_to_next_gen):\n # Mutate one member from the prev. generation\n img, _ = self._population[i]\n new_mutated_member = self._mutate(img)\n\n # Apply more mutation to one chromosome(from 0 to 100)\n for i in range(rand.randint(0, 100)):\n new_mutated_member = self._mutate(new_mutated_member)\n\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the mutated chromosome to the next generation\n self._population.append((new_mutated_member, fitval))\n\n # For remaining 25% of the prev. population do crossing overs\n num_of_crossing_overs_to_next_gen = max_size_of_pop\n max_size_of_pop -= num_of_crossing_overs_to_next_gen\n\n for i in range(num_of_crossing_overs_to_next_gen):\n # Choose 2 chromosomes, then do one crossing over\n img_ext_1, _ = self._population[i]\n img_ext_2, _ = self._population[rand.randint(0, num_of_pop_to_next_gen)]\n\n new_mutated_member = self._crossing_over(img_ext_1, img_ext_2)\n # Evaluate the goodness of obtained chromosome\n fitval = self._fit_test(new_mutated_member)\n # Add the derived chromosome to the next generation.\n # Form of 1 element of the population: (member, fitness value)\n self._population.append((new_mutated_member, fitval))\n\n # Sort the new generation in increasing order based on the fitness value of each chromosome\n self._population.sort(key=lambda x: x[1])\n print(f'Best chromosome fit value: {self._population[0][1]}')",
"def crossover(p1, p2):\n genotype = []\n \n #Your code here\n \n return {'genotype': genotype, 'fitness': None}",
"def crossoverFunc(parents, size, bits):\n\tchildren = np.zeros((size), np.dtype('a6'))\n\n\tfor i in range(0, int(size/2)):\n\t\tx_site = np.random.randint(0, bits - 1)\n\t\tx1 = parents[i]\n\t\tx2 = parents[size - i - 1]\n\t\tif (np.random.randint(0, 100)) > 40 :\t# Crossover Probability = 60 percent\n\t\t\tch1 = x1[0:x_site] + x2[x_site:bits]\n\t\t\tch2 = x2[0:x_site] + x1[x_site:bits]\n\n\t\t\tchildren[i] = ch1\n\t\t\tchildren[size - i - 1] = ch2\n\t\t\n\t\telse:\n\t\t\tchildren[i] = x1\n\t\t\tchildren[size - i - 1] = x2\n\n\treturn children",
"def uniform_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs uniform crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if random_number <= self.crossover_prob:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_node_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_node_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_node_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_node_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict",
"def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()",
"def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))",
"def simulated_annealing_replacement(random, population, parents, offspring, args):\r\n try:\r\n temp = args['temperature']\r\n cooling_rate = args['cooling_rate']\r\n temp = temp * cooling_rate\r\n args['temperature'] = temp\r\n except KeyError:\r\n try:\r\n num_evals = args['_ec'].num_evaluations\r\n max_evals = args['max_evaluations']\r\n temp = float(max_evals - num_evals) / float(max_evals)\r\n except KeyError:\r\n num_gens = args['_ec'].num_generations\r\n max_gens = args['max_generations']\r\n temp = 1 - float(max_gens - num_gens) / float(max_gens)\r\n \r\n new_pop = []\r\n for p, o in zip(parents, offspring):\r\n if o >= p:\r\n new_pop.append(o)\r\n elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)):\r\n new_pop.append(o)\r\n else:\r\n new_pop.append(p)\r\n \r\n return new_pop",
"def segmented_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs segmented crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n swap = False\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if swap is False:\n if random_number <= self.swap_start_prob:\n swap = True\n else:\n swap = False\n elif swap is True:\n if random_number <= self.swap_stop_prob:\n swap = False\n else:\n swap = True\n\n if swap is True:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)",
"def simulated_annealing_replacement(random, population, parents, offspring, args):\n try:\n temp = args['temperature']\n cooling_rate = args['cooling_rate']\n temp = temp * cooling_rate\n args['temperature'] = temp\n except KeyError:\n try:\n num_evals = args['_ec'].num_evaluations\n max_evals = args['max_evaluations']\n temp = float(max_evals - num_evals) / float(max_evals)\n except KeyError:\n num_gens = args['_ec'].num_generations\n max_gens = args['max_generations']\n temp = 1 - float(max_gens - num_gens) / float(max_gens)\n \n new_pop = []\n for p, o in zip(parents, offspring):\n if o >= p:\n new_pop.append(o)\n elif temp > 0 and random.random() < math.exp(-abs(p.fitness - o.fitness) / float(temp)):\n new_pop.append(o)\n else:\n new_pop.append(p)\n \n return new_pop",
"def selection(self):\n\n for i in range(self.pop_num*3): # It is important. Next, we will rank the array of parents and children in ascending order of survivability (sum (fit)).\n self.par_and_sons[i].fit = SimpleSegmentationGA.fitness_function(self.gray, self.delta_x, self.length, self.par_and_sons[i].A)\n\n # Sort.\n self.par_and_sons = sorted(self.par_and_sons, key=lambda individ: individ.fit) \n self.population=self.par_and_sons[:self.pop_num].copy()",
"def parent_selection(pop, fit_pop, rounds):\n\n # get the list of worst to best of the population\n worst_to_best = np.argsort(fit_pop)\n\n # select the parents based on which round, first 2 parents are sampled from top 40%\n p1 = pop[worst_to_best[pop.shape[0] - rounds - 1]]\n p2 = pop[worst_to_best[pop.shape[0] - rounds - 2]]\n\n # last 3 parents are randomly chosen\n p3, p4, p5 = pop[np.random.randint(0, pop.shape[0]-1, 3)]\n\n return np.array([p1, p2, p3, p4, p5])",
"def GTreeGPCrossoverSinglePoint(genome, **args):\n # print \"CrossoverAAAAAAAAAAA\"\n sister = None\n brother = None\n\n gMom = args[\"mom\"].clone()\n gDad = args[\"dad\"].clone()\n\n gMom.resetStats()\n gDad.resetStats()\n\n max_depth = gMom.getParam(\"max_depth\", None)\n max_attempt = gMom.getParam(\"max_attempt\", 15)\n\n if max_depth is None:\n Util.raiseException(\"You must specify the max_depth genome parameter !\", ValueError)\n\n if max_depth < 0:\n Util.raiseException(\n \"The max_depth must be >= 1, if you want to use GTreeCrossoverSinglePointStrict crossover !\", ValueError)\n\n momRandom = None\n dadRandom = None\n\n for i in xrange(max_attempt):\n\n dadRandom = gDad.getRandomNode()\n\n if dadRandom.getType() == Consts.nodeType[\"TERMINAL\"]:\n momRandom = gMom.getRandomNode(1)\n elif dadRandom.getType() == Consts.nodeType[\"NONTERMINAL\"]:\n momRandom = gMom.getRandomNode(2)\n\n mD = gMom.getNodeDepth(momRandom)\n dD = gDad.getNodeDepth(dadRandom)\n\n # Two nodes are root\n if mD == 0 and dD == 0:\n continue\n\n mH = gMom.getNodeHeight(momRandom)\n if dD + mH > max_depth:\n continue\n\n dH = gDad.getNodeHeight(dadRandom)\n if mD + dH > max_depth:\n continue\n\n break\n\n if i == (max_attempt - 1):\n assert gMom.getHeight() <= max_depth\n return gMom, gDad\n else:\n nodeMom, nodeDad = momRandom, dadRandom\n\n nodeMom_parent = nodeMom.getParent()\n nodeDad_parent = nodeDad.getParent()\n\n # Sister\n if args[\"count\"] >= 1:\n sister = gMom\n nodeDad.setParent(nodeMom_parent)\n\n if nodeMom_parent is None:\n sister.setRoot(nodeDad)\n else:\n nodeMom_parent.replaceChild(nodeMom, nodeDad)\n sister.processNodes()\n assert sister.getHeight() <= max_depth\n\n # Brother\n if args[\"count\"] == 2:\n brother = gDad\n nodeMom.setParent(nodeDad_parent)\n\n if nodeDad_parent is None:\n brother.setRoot(nodeMom)\n else:\n nodeDad_parent.replaceChild(nodeDad, nodeMom)\n brother.processNodes()\n assert brother.getHeight() <= max_depth\n\n return sister, brother",
"def genetic_algorithm(population, lamda):\n maxGenerations = 5000\n generations_count = 0\n while generations_count <= maxGenerations:\n new_population = []\n generations_count += 1\n for i in range(0, len(population)):\n x = random_select(population, lamda)\n y = random_select(population, lamda)\n child = cross_over(x, y)\n child = mutate(child)\n new_population.append(child)\n population = new_population\n # Test for result\n conflicts = find_conflicts(population[i])\n if conflicts == 0:\n return True, population[i], generations_count\n return False, None, maxGenerations",
"def cross_curr2pbest1(pop, ic, f, cr, rng, p_num, archive, arc_ind_cnt, task, **_kwargs):\n # Note: the population passed in the argument must be sorted by fitness!\n x_pbest = rng.integers(p_num)\n # a random individual is selected from the best p_num individuals of the population rng.integers\n p = [1 / (len(pop) - 1.0) if i != ic else 0 for i in range(len(pop))]\n r1 = rng.choice(len(pop), p=p) # a random individual != to the current individual is selected from the population\n p = [1 / (len(pop) + arc_ind_cnt - 2.0) if i != ic and i != r1 else 0 for i in range(len(pop) + arc_ind_cnt)]\n r2 = rng.choice(len(pop) + arc_ind_cnt, p=p)\n # a second random individual != to the current individual and r1 is selected from the population U archive\n j = rng.integers(task.dimension)\n if r2 >= len(pop):\n r2 -= len(pop)\n v = [pop[ic][i] + f * (pop[x_pbest][i] - pop[ic][i]) + f * (pop[r1][i] - archive[r2][i])\n if rng.random() < cr or i == j else pop[ic][i] for i in range(task.dimension)]\n return parent_medium(np.asarray(v), pop[ic].x, task.lower, task.upper)\n # the mutant vector is repaired if needed\n\n else:\n v = [pop[ic][i] + f * (pop[x_pbest][i] - pop[ic][i]) + f * (pop[r1][i] - pop[r2][i])\n if rng.random() < cr or i == j else pop[ic][i] for i in range(task.dimension)]\n return parent_medium(np.asarray(v), pop[ic].x, task.lower, task.upper)\n # the mutant vector is repaired if needed",
"def stochastic_universal_selection(self, fitness, num_parents):\n\n fitness_sum = numpy.sum(fitness)\n if fitness_sum == 0:\n self.logger.error(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n raise ZeroDivisionError(\"Cannot proceed because the sum of fitness values is zero. Cannot divide by zero.\")\n probs = fitness / fitness_sum\n probs_start = numpy.zeros(probs.shape, dtype=float) # An array holding the start values of the ranges of probabilities.\n probs_end = numpy.zeros(probs.shape, dtype=float) # An array holding the end values of the ranges of probabilities.\n\n curr = 0.0\n\n # Calculating the probabilities of the solutions to form a roulette wheel.\n for _ in range(probs.shape[0]):\n min_probs_idx = numpy.where(probs == numpy.min(probs))[0][0]\n probs_start[min_probs_idx] = curr\n curr = curr + probs[min_probs_idx]\n probs_end[min_probs_idx] = curr\n probs[min_probs_idx] = 99999999999\n\n pointers_distance = 1.0 / self.num_parents_mating # Distance between different pointers.\n first_pointer = numpy.random.uniform(low=0.0, \n high=pointers_distance, \n size=1)[0] # Location of the first pointer.\n\n # Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.\n if self.gene_type_single == True:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=self.gene_type[0])\n else:\n parents = numpy.empty((num_parents, self.population.shape[1]), dtype=object)\n\n parents_indices = []\n\n for parent_num in range(num_parents):\n rand_pointer = first_pointer + parent_num*pointers_distance\n for idx in range(probs.shape[0]):\n if (rand_pointer >= probs_start[idx] and rand_pointer < probs_end[idx]):\n parents[parent_num, :] = self.population[idx, :].copy()\n parents_indices.append(idx)\n break\n\n return parents, numpy.array(parents_indices)",
"def nsga_replacement(random, population, parents, offspring, args):\n survivors = []\n combined = list(population)\n combined.extend(offspring)\n \n # Perform the non-dominated sorting to determine the fronts.\n fronts = []\n pop = set(range(len(combined)))\n while len(pop) > 0:\n front = []\n for p in pop:\n dominated = False\n for q in pop:\n if combined[p] < combined[q]:\n dominated = True\n break\n if not dominated:\n front.append(p)\n fronts.append([dict(individual=combined[f], index=f) for f in front])\n pop = pop - set(front)\n \n # Go through each front and add all the elements until doing so\n # would put you above the population limit. At that point, fall\n # back to the crowding distance to determine who to put into the\n # next population. Individuals with higher crowding distances\n # (i.e., more distance between neighbors) are preferred.\n for i, front in enumerate(fronts):\n if len(survivors) + len(front) > len(population):\n # Determine the crowding distance.\n distance = [0 for _ in range(len(combined))]\n individuals = list(front)\n num_individuals = len(individuals)\n num_objectives = len(individuals[0]['individual'].fitness)\n for obj in range(num_objectives):\n individuals.sort(key=lambda x: x['individual'].fitness[obj])\n distance[individuals[0]['index']] = float('inf')\n distance[individuals[-1]['index']] = float('inf')\n for i in range(1, num_individuals-1):\n distance[individuals[i]['index']] = (distance[individuals[i]['index']] + \n (individuals[i+1]['individual'].fitness[obj] - \n individuals[i-1]['individual'].fitness[obj]))\n \n crowd = [dict(dist=distance[f['index']], index=f['index']) for f in front]\n crowd.sort(key=lambda x: x['dist'], reverse=True)\n last_rank = [combined[c['index']] for c in crowd]\n r = 0\n num_added = 0\n num_left_to_add = len(population) - len(survivors)\n while r < len(last_rank) and num_added < num_left_to_add:\n if last_rank[r] not in survivors:\n survivors.append(last_rank[r])\n num_added += 1\n r += 1\n # If we've filled out our survivor list, then stop.\n # Otherwise, process the next front in the list.\n if len(survivors) == len(population):\n break\n else:\n for f in front:\n if f['individual'] not in survivors:\n survivors.append(f['individual'])\n return survivors",
"def crowding_replacement(random, population, parents, offspring, args):\r\n def distance(x, y):\r\n return math.sqrt(sum([(a - b)**2 for a, b in zip(x, y)]))\r\n try:\r\n distance_function = args['distance_function']\r\n except KeyError:\r\n distance_function = distance\r\n args['distance_function'] = distance_function\r\n crowding_distance = args.setdefault('crowding_distance', 2)\r\n survivors = population\r\n for o in offspring:\r\n pool = random.sample(survivors, crowding_distance)\r\n closest = min(pool, key=lambda x: distance_function(o.candidate, x.candidate))\r\n if o > closest:\r\n survivors.remove(closest)\r\n survivors.append(o)\r\n return survivors",
"def crowding_replacement(random, population, parents, offspring, args):\n def distance(x, y):\n return math.sqrt(sum([(a - b)**2 for a, b in zip(x, y)]))\n try:\n distance_function = args['distance_function']\n except KeyError:\n distance_function = distance\n args['distance_function'] = distance_function\n crowding_distance = args.setdefault('crowding_distance', 2)\n survivors = population\n for o in offspring:\n pool = random.sample(survivors, crowding_distance)\n closest = min(pool, key=lambda x: distance_function(o.candidate, x.candidate))\n if o > closest:\n survivors.remove(closest)\n survivors.append(o)\n return survivors",
"def procreate(self, parents: List[Chromosome]) -> List[Chromosome]:\r\n super(UniformCrossoverProcreator, self).procreate(parents)\r\n # TODO: cleanup to a single clean block within the 80 margins\r\n # generate the left index as a series of 1s and 0s with the 1s\r\n # distributed with probability P = probability\r\n left_index = random.choice(2, p=[self.probability, 1 - self.probability],\r\n size=len(parents[0].genes))\r\n # the right index is the inverse (probablity) of the left index\r\n right_index = 1 - left_index\r\n # multiplying the indecies 0s out the removed genes from either side\r\n # then adding these two vectors gives the child\r\n return [parents[0].copy(genes=(parents[0].genes * left_index) + (parents[1].genes * right_index))]"
]
| [
"0.67263",
"0.67171764",
"0.66393787",
"0.6588233",
"0.65817004",
"0.6564607",
"0.6547238",
"0.65291065",
"0.6482871",
"0.6474727",
"0.64088905",
"0.6387641",
"0.6386324",
"0.6346811",
"0.63191617",
"0.63031244",
"0.6301391",
"0.6278659",
"0.6244412",
"0.6227246",
"0.62252873",
"0.6206629",
"0.61704355",
"0.6152776",
"0.6134481",
"0.61289096",
"0.61070913",
"0.6085397",
"0.6082992",
"0.6067164"
]
| 0.7517525 | 0 |
Returns a dict of reasonable random parameters that can be used to do parameter search. | def random_parameters():
res = dict()
res["population_size"] = random.randrange(2, 21)
res["mutation_prob"] = random.choice([0.02, 0.05, 0.10, 0.20, 0.30, 0.40, 0.50])
res["crossover"] = random.choice([True, False])
res["selection"] = random.choice([True, False])
res["sigma"] = random.choice([0.1, 0.25, 0.5, 1])
res["crossover_method"] = random.choice(["single_swap", "uniform_swap", "arithmetic"])
res["selection_method"] = random.choice(["truncated", "fitness_based", "rank_based"])
res["best_rate"] = random.choice([0.2, 0.3, 0.5])
res["n_parents"] = random.choice([2, 3, 4])
res["elitism"] = random.choice([True, False])
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_params(self, randomize=True):\n pass",
"def set_rand_params(self) -> Dict:\n new_params: Dict = self.gen_params()\n self.set_params(new_params)\n return new_params",
"def random():\n pars = dict(\n scale=10**np.random.uniform(1, 3),\n gamma=np.random.uniform(0, 6),\n q_0=10**np.random.uniform(-3, -1),\n )\n return pars",
"def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'M': d.pop('population_size', self.population_size),\n 'num_tests': self.num_tests,\n 'num_searches': self.num_searches,\n 'num_searches_best': self.num_searches_best,\n 'bonus1': self.bonus1,\n 'bonus2': self.bonus2,\n 'num_enabled': self.num_enabled,\n 'local_searches': self.local_searches\n })\n return d",
"def get_params_from_dict(param_dict, random_number):\n temp = list(itertools.product(*param_dict.values()))\n param_choose = temp[int(random_number*len(temp))]\n param = {}\n for i in range(len(param_choose)):\n param[param_dict.keys()[i]] = param_choose[i]\n return param",
"def get_params(self):\n return {'classifier': self.classifier,\n 'grid_param': self.grid_param,\n 'n_param_comb': self.n_param_comb,\n 'top_bagging': self.bagging,\n 'bagging_param': self.bagging_param,\n 'comb_seed': self.comb_seed}",
"def sample(params):\n\n config = {}\n\n for param, value in params.items():\n if hasattr(value, 'rvs'):\n # this is a scipy.stats distribution\n config[param] = value.rvs()\n else:\n # this is a tuple\n config[param] = random.choice(value)\n\n return config",
"def good_params():\n from scraper import PARAMS\n params = PARAMS.copy()\n params['Business_Name'] = 'CodeFellows'\n params['City'] = 'Seattle'\n return params",
"def get_random_params(self, excludes: Set[int]) -> OrderedDict:\n return self.space.get_categorical_params(self.get_random_index(excludes))",
"def get_prob_params():\n prob = Namespace()\n prob.study_name = STUDY_NAME\n if IS_DEBUG:\n prob.num_trials = 3\n prob.max_capital = 10\n else:\n prob.num_trials = NUM_TRIALS\n prob.max_capital = MAX_CAPITAL\n # Common\n prob.time_distro = TIME_DISTRO\n prob.num_workers = NUM_WORKERS\n _study_params = {\n 'branin': ('synthetic/branin/config_mf.json',\n branin_mf, cost_branin_mf, 0.1, 0, 1),\n 'hartmann3_2': ('synthetic/hartmann3_2/config_mf.json',\n hartmann3_2_mf, cost_hartmann3_2_mf, 0.1, 0, 1),\n 'hartmann6_4': ('synthetic/hartmann6_4/config_mf.json',\n hartmann6_4_mf, cost_hartmann6_4_mf, 0.1, 0, 1),\n 'borehole_6': ('synthetic/borehole_6/config_mf.json',\n borehole_6_mf, cost_borehole_6_mf, 1, 0, 1),\n 'park2_4': ('synthetic/park2_4/config_mf.json',\n park2_4_mf, cost_park2_4_mf, 0.3, 0, 1),\n 'park2_3': ('synthetic/park2_3/config_mf.json',\n park2_3_mf, cost_park2_3_mf, 0.1, 0, 1),\n 'park1_3': ('synthetic/park1_3/config_mf.json',\n park1_3_mf, cost_park1_3_mf, 0.5, 0, 1),\n }\n (domain_config_file_suffix, raw_func, raw_fidel_cost_func, _fc_noise_scale,\n _initial_pool_size, _) = _study_params[prob.study_name]\n domain_config_file = os.path.join(DRAGONFLY_EXPERIMENTS_DIR, domain_config_file_suffix)\n # noisy\n prob.noisy_evals = NOISY_EVALS\n if NOISY_EVALS:\n noise_type = 'gauss'\n noise_scale = _fc_noise_scale\n else:\n noise_type = 'no_noise'\n noise_scale = None\n # Create domain, function_caller and worker_manager\n config = load_config_file(domain_config_file)\n func_caller = get_multifunction_caller_from_config(raw_func, config,\n raw_fidel_cost_func=raw_fidel_cost_func, noise_type=noise_type,\n noise_scale=noise_scale)\n # Set max_capital\n if hasattr(func_caller, 'fidel_cost_func'):\n prob.max_capital = prob.max_capital * \\\n func_caller.fidel_cost_func(func_caller.fidel_to_opt)\n else:\n prob.max_capital = prob.max_capital\n # Store everything in prob\n prob.func_caller = func_caller\n prob.worker_manager = SyntheticWorkerManager(prob.num_workers,\n time_distro='caller_eval_cost')\n prob.save_file_prefix = prob.study_name + ('-debug' if IS_DEBUG else '')\n prob.methods = METHODS\n prob.save_results_dir = SAVE_RESULTS_DIR\n prob.reporter = get_reporter('default')\n # evaluation options\n prob.evaluation_options = Namespace(prev_eval_points='none',\n initial_pool_size=_initial_pool_size)\n return prob",
"def dictOfRandomVariables(self):\n return dict()",
"def get_random_fit_parameters(self):\n all_random = self.par_obj.get_random_all()\n return np.asarray([all_random[p] for p in self.fit_parameters])",
"def get_random_individual():\r\n return [ random.random() for _ in range(PARAMETERS_COUNT) ]",
"def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config",
"def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low' ] = self.low\n paramDict['high' ] = self.high\n paramDict['alpha'] = self.alpha\n paramDict['beta' ] = self.beta\n return paramDict",
"def get_test_params(cls, parameter_set=\"default\"):\n params1 = {\n \"n_clusters\": 3,\n \"n_init\": 2,\n \"max_iter\": 2,\n \"tol\": 1e-3,\n \"verbose\": False,\n \"random_state\": 2,\n }\n params2 = {\n \"n_clusters\": 2,\n \"init_algorithm\": \"random\",\n \"n_init\": 1,\n \"max_iter\": 1,\n \"tol\": 1e-4,\n \"verbose\": False,\n \"random_state\": 1,\n }\n return [params1, params2]",
"def get_params(self, deep=False):\n sampling_params = {'n_dim': self.n_dim,\n 'simplex_sampling': self.simplex_sampling,\n 'within_simplex_sampling': self.within_simplex_sampling,\n 'gaussian_component': self.gaussian_component}\n return {'ss_params': sampling_params,\n **RandomStateMixin.get_params(self, deep)}",
"def getInitParams(self):\n paramDict = Distribution.getInitParams(self)\n paramDict['mapping'] = self.mapping\n paramDict['values'] = self.values\n return paramDict",
"def random_configuration(self):\n q = {}\n for key in self.structure:\n if key != \"p0\":\n q[key] = [\n -pi + 2 * pi * np.random.rand(),\n np.abs(\n wraptopi(\n self.lb[key]\n + (self.ub[key] - self.lb[key]) * np.random.rand()\n )\n ),\n ]\n return q",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['low'] = self.low\n paramDict['alpha'] = self.alpha\n paramDict['beta'] = self.beta\n return paramDict",
"def get_random_nuisance_parameters(self):\n all_random = self.par_obj.get_random_all()\n return np.asarray([all_random[p] for p in self.nuisance_parameters])",
"def sample_parameters_given_hyper(self, gen_seed=0):\n if type(gen_seed) is not int:\n raise TypeError(\"gen_seed should be an int\")\n\n rng = random.Random(gen_seed)\n\n hypers = self.get_hypers()\n s = hypers[b's']\n r = hypers[b'r']\n nu = hypers[b'nu']\n m = hypers[b'mu']\n\n rho = rng.gammavariate(nu/2.0, s)\n mu = rng.normalvariate(m, (r/rho)**.5)\n\n assert(rho > 0)\n\n params = {'mu': mu, 'rho': rho}\n\n return params",
"def getInitParams(self):\n paramDict = BoostDistribution.getInitParams(self)\n paramDict['n' ] = self.n\n paramDict['p' ] = self.p\n return paramDict",
"def get_params(self):\n return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}",
"def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}",
"def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }",
"def random():\n # only care about the value of second_moment:\n # curve = scale * e**(-second_moment^2 q^2)/q^2\n # scale = 6 pi/100 (contrast/density*absorbed_amount)^2 * Vf/radius\n # the remaining parameters can be randomly generated from zero to\n # twice the default value as done by default in compare.py\n pars = dict(\n scale=1,\n second_moment=10**np.random.uniform(1, 3),\n )\n return pars",
"def get_params(self, deep=False):\n return {\"alpha\": self.alpha, \"beta\": self.beta, \"gamma\": self.gamma, \"W\": self.W, \"bias\": self.bias, \"add_bias\": self.add_bias, \"opts\": self.opts}",
"def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct"
]
| [
"0.70701885",
"0.706165",
"0.7013774",
"0.6762008",
"0.6735659",
"0.6637217",
"0.6519411",
"0.6440548",
"0.63888425",
"0.6324186",
"0.63205343",
"0.63086057",
"0.62929904",
"0.6286846",
"0.6242564",
"0.6231981",
"0.6220546",
"0.6217865",
"0.6199372",
"0.61903065",
"0.6187879",
"0.6181536",
"0.6181286",
"0.61754304",
"0.61739594",
"0.61694765",
"0.6151806",
"0.6147126",
"0.61401415",
"0.6133146"
]
| 0.8022944 | 0 |
This function takes two equal sized sets and returns a set of tupples. Each tuple is made out of the one element of each two starting sets. >>> male={'mgerbil1','mgerbil2','mgerbil3','mgerbil4','mgerbil5'} >>> female={'fgerbil1','fgerbil2','fgerbil3','fgerbil4','fgerbil5'} >>> mating_pairs(male, female) {('mgerbil2', 'fgerbil4'), ('mgerbil1', 'fgerbil2'), ('mgerbil4', 'fgerbil3'), ('mgerbil5', 'fgerbil5'), ('mgerbil3', 'fgerbil1')} | def mating_pairs(male: set, female: set) -> Compoundset:
set_of_pairs=set()
found =True
while found:
if len(male)>0 and len(female)>0:
malegerbil=male.pop()
femalegerbil=female.pop()
pairs=(malegerbil,femalegerbil)
set_of_pairs.add(pairs)
else:
found=False
return set_of_pairs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def friend_pairs_and_other_friends(friend_tuple): \n x=friend_tuple[0]\n y=friend_tuple[1]\n def auxfun(w):\n return (frozenset({x,w}),y)\n return list(map(auxfun,y))#returns [({x,y[0]},y),...]",
"def create_pairs(listA, listB):\n\n pairs = []\n labels = []\n for i in range(0,len(listA)):\n pairs += [[listA[i],listB[i]]] # same\n\n compare_to = i\n while compare_to == i: #not comparing to itself\n compare_to = random.randint(0,len(listA)-1)\n\n pairs += [[listA[i], listB[compare_to]]] # different\n\n labels += [1, 0]\n return np.array(pairs), np.array(labels)",
"def combine_permutations(p1, p2):\n p = tuple(map(p2.__getitem__, p1))\n return p",
"def gen_comparison_pairs(self, a, b, subset=None):\n # union of the keys of the two records\n # the ordering of the first record takes precedence\n # an alternative option would be to sort them, lexicographically or with a custom criteria\n keys_union = {**a, **b}.keys()\n\n if subset:\n keys_comp_a_b = self.gen_comparison_keys_subset(subset)\n else:\n keys_comp_a_b = self.gen_comparison_keys_common(keys_union)\n\n for key_comp, key_a, key_b in keys_comp_a_b:\n yield key_comp, (a.get(key_a, {}), b.get(key_b, {}))",
"def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)",
"def make_pairs(innames):\n names = copy.deepcopy(innames)\n pairs = []\n if len(names) % 2 == 1:\n m = random.randint(0, len(names) - 1)\n singleton = names[m]\n del names[m]\n else:\n singleton = None\n while len(names) > 0:\n i = 0\n j = 0\n while i == j:\n i = random.randint(0, len(names) - 1)\n j = random.randint(0, len(names) - 1)\n # print(\"i is\", i, \"j is\", j)\n k = names[i]\n l = names[j]\n # print(\"k is\", k, \"l is\", l)\n if i > j:\n del names[i]\n del names[j]\n else:\n del names[j]\n del names[i]\n # print(\"names is\", repr(names))\n if singleton is None:\n pairs.append(set([k, l]))\n else:\n pairs.append(set([k, l, singleton]))\n singleton = None\n return pairs",
"def unigram_pairs(id1, id2):\n uni_pairs = []\n for word1 in id1.tolist():\n for word2 in id2.tolist():\n uni_pairs.append([word1, word2])\n # print(\"uni_pair is {}\".format(uni_pairs))\n # print(\"the # of uni pair is {}\".format(len(uni_pairs)))\n return uni_pairs",
"def find_pairs(corpus, a, b):\n pairs = []\n it1 = CorpusIterator(corpus, a)\n it2 = CorpusIterator(corpus, b)\n\n def _is_pair(x, y):\n return x.document_id == y.document_id and x.index == y.index - 1\n\n def _cmp(x, y):\n if x.document_id == y.document_id:\n if x.index == y.index:\n return 0\n elif x.index > y.index:\n return 1\n else:\n return -1\n elif x.document_id > y.document_id:\n return 1\n else:\n return -1\n\n w1 = it1.get_next()\n w2 = it2.get_next()\n while w1 is not None and w2 is not None:\n cmp_value = _cmp(w1, w2)\n if cmp_value == -1:\n # w1 behind w2\n if _is_pair(w1, w2):\n pairs.append(w1)\n w1 = it1.get_next()\n w2 = it2.get_next()\n else:\n if w2.index == 0:\n it1.skip(w2)\n else:\n it1.skip(CorpusPosition(w2.document_id, w2.index - 1))\n w1 = it1.get_next()\n elif cmp_value == 1:\n # w1 ahead of w2\n it2.skip(w1)\n w2 = it2.get_next()\n else:\n raise ValueError('Iterators are tracking same token')\n\n return pairs",
"def mpairs(seq1, seq2, key1, key2=None):\n key2 = key2 or key1\n\n seq1, seq2 = iter(seq1), iter(seq2)\n\n s1, s2 = next(seq1), next(seq2)\n k1, k2 = key1(s1), key2(s2)\n\n while True:\n try:\n if k1 == k2:\n yield (s1, s2)\n s1, s2 = next(seq1), next(seq2)\n k1, k2 = key1(s1), key2(s2)\n elif k1 < k2:\n s1 = next(dropwhile(lambda x: key1(x) < k2, seq1))\n k1 = key1(s1)\n else:\n s2 = next(dropwhile(lambda x: key2(x) < k1, seq2))\n k2 = key2(s2)\n\n except StopIteration:\n break",
"def get_pairs_to_compare(id_token_pairs):\n token_sku_id_pairs = dict()\n for sku_id, tokens in id_token_pairs.items():\n for token in tokens:\n token_sku_id_pairs[token] = token_sku_id_pairs.get(token, []) + [sku_id]\n\n pairs_to_compare = set()\n for token, sku_ids in token_sku_id_pairs.items():\n id_combinations = combinations(sku_ids, 2)\n for (id1, id2) in id_combinations:\n if (id2, id1) not in pairs_to_compare:\n pairs_to_compare.add((id1, id2))\n\n return pairs_to_compare",
"def _iter_pairs(graph):\n for u, v in set(graph.edges_iter()):\n yield u, v",
"def get_context_pairs(tokens):\n data = set()\n ngrams = get_ngrams(tokens, 4)\n if not ngrams:\n ngrams = [tokens]\n for ngrams_batch in ngrams:\n for pair in combinations(ngrams_batch, 2):\n diff_index = abs(tokens.index(pair[0]) - abs(tokens.index(pair[1])))\n if len(pair[0]) < 2 or len(pair[1]) < 2:\n continue\n data.add((pair, diff_index))\n return data",
"def generate_pairs(names, emails, seed=123):\n gift_givers = names\n gift_receivers = names\n reqs_met = False\n random.seed(seed)\n count = 0\n while not reqs_met:\n count += 1\n gift_receivers = random.sample(gift_receivers, len(gift_receivers))\n reqs_met = requirements(gift_givers, gift_receivers)\n if count > 100:\n print(\n \"*\" * 70,\n \"\\nTried over 100 times... Could not find a suitable match.\"\n \"\\nExiting ... Try again with a different seed?\",\n )\n sys.exit()\n break\n return gift_givers, emails, gift_receivers",
"def draw_bs_pairs(x, y):\n inds = np.arange(len(x))\n bs_inds = draw_bs_sample(inds)\n\n return x[bs_inds], y[bs_inds]",
"def swissPairings():\n standings = playerStandings()\n if not standings:\n raise Exception(\"no players have registered\")\n elif len(standings)%2 != 0:\n raise Exception(\"there are an odd number of players registered.\" +\n \"Please register an even number\")\n evenStandings = standings[::2]\n oddStandings = standings[1::2]\n pairings = zip(evenStandings, oddStandings)\n # remove unnecessary info from standings and create necessary pairings formatting\n pairings = map(lambda x: (x[0][0], x[0][1], x[1][0], x[1][1]),pairings)\n return pairings",
"def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))",
"def get_pairs(terms):\n return itertools.combinations(terms, 2)",
"def find_pairs(words): \n pass",
"def generate_pairs(self, all_walks):\n logging.info(['edge_types before generate pairs', self.edge_types])\n\n pairs = []\n skip_window = self.config['win_size'] // 2\n for layer_id, e_type in enumerate(self.edge_types):\n walks = all_walks[e_type]\n for walk in tqdm.tqdm(walks):\n for i in range(len(walk)):\n for j in range(1, skip_window + 1):\n if i - j >= 0 and walk[i] != walk[i - j]:\n neg_nodes = self.graph[e_type].sample_nodes(\n self.config['neg_num'])\n pairs.append(\n (walk[i], walk[i - j], *neg_nodes, layer_id))\n if i + j < len(walk) and walk[i] != walk[i + j]:\n neg_nodes = self.graph[e_type].sample_nodes(\n self.config['neg_num'])\n pairs.append(\n (walk[i], walk[i + j], *neg_nodes, layer_id))\n return pairs",
"def get_mutual_friends(person1_friends, person2_friends):\n return list(set(person1_friends) & set(person2_friends))",
"def associate(first_list, second_list, offset, max_difference):\n ## obatin all keys\n first_keys = list(first_list)\n second_keys = list(second_list)\n potential_matches = [(abs(a - (b + offset)), a, b)\n for a in first_keys\n for b in second_keys\n if abs(a - (b + offset)) < max_difference]\n potential_matches.sort()\n matches = []\n for diff, a, b in potential_matches:\n if a in first_keys and b in second_keys:\n first_keys.remove(a)\n second_keys.remove(b)\n matches.append((a, b))\n \n matches.sort()\n return matches",
"def shuffle_pair(x,y):\n xy = list(zip(x,y))\n np.random.shuffle(xy)\n x, y = zip(*xy)\n x = np.array(x)\n y = np.array(y)",
"def get_2pairs():\n\n done = 0\n while not done:\n r0 = int(random(GRID_CELLS))\n c0 = int(random(GRID_CELLS))\n\n r1 = int(random(GRID_CELLS))\n c1 = int(random(GRID_CELLS))\n done = 1\n\n if random(1) < 0.5:\n # move one cell right\n ra1 = r0 + 1\n rb1 = r1 + 1\n ra0, rb0 = r0, r1\n ca0, cb0 = c0, c1\n ca1, cb1 = c0, c1\n\n if ra1 >= GRID_CELLS or rb1 >= GRID_CELLS:\n done = 0\n else: # move down:\n ca1 = c0 + 1\n cb1 = c1 + 1\n ca0, cb0 = c0, c1\n ra0, rb0 = r0, r1\n ra1, rb1 = r0, r1\n if ca1 >= GRID_CELLS or cb1 >= GRID_CELLS:\n done = 0\n\n return [((ra0, ca0), (rb0, cb0)), ((ra1, ca1), (rb1, cb1))]",
"def pick_pairs(amount):\n return [(i,i+1,2) for i in range(0, amount, 2)]",
"def get_all_pairs(G):\n # list all (start,dest) pairs between which the route must be computed\n pairs_list = [(start, dest) for dest in G.nodes for start in G.nodes]\n\n # shuffle all elements in-place\n random.shuffle(pairs_list)\n\n # generate a set from the list\n pairs_set = set(pairs_list)\n\n return pairs_list, pairs_set",
"def interleave(iter1, iter2):\n for pairs in zip(iter1, iter2):\n yield from pairs",
"def two_pair(ranks):\n pairlist = ()\n for r in ranks:\n if ranks.count(r) == 2: pairlist = pairlist +(r, )\n set(pairlist)\n pairlist = tuple(set(pairlist))\n if len(pairlist) == 2:\n return pairlist\n else:\n return None",
"def setSplit (set1, set2):\n common = []\n rem1 = []\n rem2 = []\n for elem in set1:\n if elem in set2:\n common.append (elem)\n else:\n rem1.append (elem)\n for elem in set2:\n if elem in set1:\n pass\n else:\n rem2.append (elem)\n return rem1, common, rem2",
"def get_synset_pairs(synset: Synset) -> list:\n # Remove phrasal expressions from the literals\n literals = remove_phrases(synset.literals)\n\n # Generate a list of unique pairs representing the cartesian product of the list of literals of the single synset\n pairs = unique([tuple(sorted((w1, w2), key=itemgetter(0))) for w1 in literals for w2 in literals if not w1 == w2])\n return pairs",
"def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list"
]
| [
"0.62168926",
"0.5791811",
"0.57299113",
"0.5724014",
"0.56990707",
"0.56952417",
"0.56083345",
"0.55963457",
"0.55837464",
"0.54992604",
"0.5494936",
"0.54810447",
"0.54719776",
"0.5393596",
"0.5345833",
"0.5294748",
"0.5250942",
"0.5230622",
"0.5226189",
"0.51821125",
"0.5147406",
"0.5127146",
"0.5126933",
"0.51256025",
"0.51039803",
"0.51029253",
"0.5091622",
"0.5082537",
"0.5080831",
"0.50693786"
]
| 0.78777766 | 0 |
This method will update attributes of the model passed by kwargs. | def update(self, **kwargs):
print("Updating model")
print(kwargs)
for key in kwargs:
setattr(self, key, kwargs[key]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, model, **kwargs):\n self._isinstance(model)\n for k, v in self._preprocess_params(kwargs).items():\n setattr(model, k, v)\n self.save(model)\n return model",
"def update(self, datastore, model, **kwargs):\n for k, v in self._preprocess_params(kwargs).items():\n setattr(model, k, v)\n self.save(datastore, model)\n return model",
"def update(self, **kwargs):\n for k, v in kwargs.iteritems():\n if hasattr(self, k):\n setattr(self, k, v)",
"def updateAttrs(self, kwargs):\n for k, v in kwargs.iteritems():\n setattr(self, k, v)",
"def update(self, **kwargs):\n return self._object.update(meta=kwargs)",
"def update(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def update(self, *args, **kwargs):\n if kwargs is not None:\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def update(self, **kwargs):\n for key, value in sorted(kwargs.items()):\n if value:\n if hasattr(self, key):\n setattr(self, key, value)",
"def updateModel(self):\n pass",
"def update(self, *args, **kwargs):\n if args is not () and args is not None:\n attr_names = [\"id\", \"size\", \"x\", \"y\"]\n for index, attr in enumerate(args):\n setattr(self, attr_names[index], attr)\n else:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self",
"def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self",
"def update(self, commit=True, **kwargs):\n for attr, value in kwargs.items():\n setattr(self, attr, value)\n return commit and self.save() or self",
"def update(self, commit=True, **kwargs):\n for attr, value in kwargs.iteritems():\n setattr(self, attr, value)\n return commit and self.save() or self",
"def update_from_kwargs(self, **kwargs):\n for (key, value) in kwargs.items():\n setattr(self, key, value)",
"def update_model(self):\n pass",
"def update(self, *args, **kwargs):\n if args:\n self.__update(*args)\n elif kwargs:\n self.__update(**kwargs)",
"def update(self, **kwargs):\n return self.parent.update_instance(self.name, kwargs)",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n pass",
"def update(self, *args, **kwargs):\n if args:\n arg_order = [\"id\", \"size\", \"x\", \"y\"]\n for index, arg in enumerate(args):\n setattr(self, arg_order[index], arg)\n elif kwargs:\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)",
"def update(self, **kwargs):\n self.manager.update(self, **kwargs)",
"def update(self, **kwargs):\n self.manager.update(self, **kwargs)",
"def update_model(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_params = ['optimizer_params', 'optimizer']\n if any(item in kwargs.keys() for item in opt_params):\n self.get_unet_model()",
"def update_model(database, model, **kwargs):\n with get_model_lock(model[\"_id\"]):\n model = database.get('model',model[\"_id\"])\n for name, value in list(kwargs.items()):\n if name in [\"state\", \"result\", \"started\", \"finished\", \"progress\", \"message\"]:\n model[name] = value\n database.save(model)",
"def update(self, **kwargs):\n for key, value in kwargs.items():\n if key not in self.VALID_NAMES:\n continue\n\n setattr(self, key, value)",
"def update_model(self):\n pass # TODO: Implement this.",
"def update(self, *args, **kwargs):\n raise NotImplementedError",
"def _set_attributes(self, model):\n\n if model:\n self._get_dict(model)"
]
| [
"0.8240944",
"0.80810755",
"0.7585011",
"0.7517154",
"0.748801",
"0.7450906",
"0.7448534",
"0.7121363",
"0.71153945",
"0.71095514",
"0.7042544",
"0.7042544",
"0.7035119",
"0.70347744",
"0.70154786",
"0.6976838",
"0.69683146",
"0.69545335",
"0.6943849",
"0.6943849",
"0.6943849",
"0.6935972",
"0.69101936",
"0.69101936",
"0.6864296",
"0.6857524",
"0.68561006",
"0.6801239",
"0.6800853",
"0.6781329"
]
| 0.8798378 | 0 |
Extracts features from chunk, concatenates feature_train (onpower_train, offpower_train and duration_train) with new features and retrains feature models. Updates stats attribute. | def train_on_chunk(self, chunk, meter):
# EXTRACT FEATURES:
# find units:
self.__setattr__('units', chunk.columns[0])
# Loading treshold for getting events:
thDelta = getattr(self, 'thDelta')
chunk.index.name = 'date_time'
# To prevent learning many samples at the middle of a edge:
chunk.ix[:, 0][chunk.ix[:, 0] < thDelta] = 0
# Learning edges
chunk['delta'] = chunk.ix[:, 0].diff()
chunk.delta.fillna(0, inplace=True)
edges = chunk[np.abs(chunk['delta']) > thDelta].delta
# Pairing on/off events
#print(chunk)
if len(edges) > 1:
offpower = edges[edges.apply(np.sign).diff() == -2]
onpower = edges[edges.apply(np.sign).diff(-1) == 2]
duration = offpower.reset_index().date_time - \
onpower.reset_index().date_time
duration = duration.astype('timedelta64[s]')
# Set consistent index for concatenation:
onpower = pd.DataFrame(onpower).reset_index(drop=True)
onpower.columns = ['onpower']
offpower = pd.DataFrame(offpower).reset_index(drop=True)
offpower.columns = ['offpower']
duration = pd.DataFrame(duration).reset_index(drop=True)
duration.columns = ['duration']
# Len of samples:
print("Samples of onpower: " + str(len(onpower)))
print("Samples of offpower: " + str(len(offpower)))
print("Samples of duration: " + str(len(duration)))
number_of_events = len(onpower)
# Features (concatenation)
self.onpower_train = pd.concat(
[self.onpower_train, onpower]).reset_index(drop=True)
self.offpower_train = pd.concat(
[self.offpower_train, offpower]).reset_index(drop=True)
self.duration_train = pd.concat(
[self.duration_train, duration]).reset_index(drop=True)
else:
number_of_events = 0
print("""WARNING: No paired events found on this chunk.
Is it thDelta too high?""")
self.duration_train = self.duration_train[self.duration_train.duration<400]
# RE-TRAIN FEATURE MODELS:
self.__retrain(self.onpower, self.onpower_train)
self.__retrain(self.offpower, self.offpower_train)
self.__retrain(self.duration, self.duration_train)
# UPDATE STATS:
stat_dict = {'appliance': meter.identifier[
0], 'instance': meter.identifier[1], 'Nevents': number_of_events}
instanceFound = False
if len(self.stats) == 0:
self.stats.append(stat_dict)
else:
for stat in self.stats:
if ((stat['appliance'] == stat_dict['appliance']) and
(stat['instance'] == stat_dict['instance'])):
index = self.stats.index(stat)
self.stats[index]['Nevents'] = self.stats[
index]['Nevents'] + number_of_events
instanceFound = True
if not instanceFound:
self.stats.append(stat_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def train(self, metergroup):\n # Inizialise stats and training data:\n self.stats = []\n self.onpower_train = pd.DataFrame(columns=['onpower'])\n self.offpower_train = pd.DataFrame(columns=['offpower'])\n self.duration_train = pd.DataFrame(columns=['duration'])\n\n # Calling train_on_chunk by meter:\n instance = 1 # initial instance.\n for meter in metergroup.meters:\n for chunk in meter.power_series():\n if chunk.empty:\n print(\"Chunk empty\")\n else:\n print(\"Training on chunk\")\n if self.sampling_method is not None:\n how = lambda df: getattr(df, self.sampling_method)()\n else:\n how = lambda df: df.mean()\n \n self.train_on_chunk(how(pd.DataFrame(chunk.resample(\n self.sample_period))),\n meter\n )\n\n instance += 1",
"def new_features(train, gby_feat, name, is_listen_type_feature, context_features, flow_features, fillna):\n \n # count and ratio on the all train\n count = gby_feat['is_listened'].transform('count')\n train[name + '_count'] = count\n train[name + '_count_bis'] = count\n train[name + '_ratio'] = gby_feat['is_listened'].transform('mean')\n \n if context_features:\n # Count and ratio for context observations\n count = gby_feat['is_context'].transform('sum')\n train[name + '_context_count'] = count\n train[name + '_context_count_bis'] = count\n train[name + '_context_ratio'] = gby_feat['is_listened_context'].transform('sum')/(1.*count)\n # Note that there should be NaN values if count=0.\n if fillna:\n train[name + '_context_ratio'].fillna(0.5, inplace=True)\n \n # Count and ration fot the flow observations\n if is_listen_type_feature:\n if flow_features:\n count = gby_feat['listen_type'].transform('sum')\n train[name + '_flow_count'] = count\n train[name + '_flow_count_bis'] = count\n train[name + '_flow_ratio'] = gby_feat['is_listened_flow'].transform('sum')/(1.*count)\n if fillna:\n train[name + '_flow_ratio'].fillna(0.5, inplace=True)\n \n count = gby_feat['is_context_flow'].transform('sum')\n train[name + '_context_flow_count'] = count\n train[name + '_context_flow_count_bis'] = count\n train[name + '_context_flow_ratio'] = gby_feat['is_listened_context_flow'].transform('sum')/(1.*count)\n if fillna:\n train[name + '_context_flow_ratio'].fillna(0.5, inplace=True)",
"def prepare_features(self, wavs, stage):\n wavs, lens = wavs\n if stage == sb.Stage.TRAIN:\n if hasattr(self.modules, \"env_corrupt\"):\n wavs_noise = self.modules.env_corrupt(wavs, lens)\n wavs = torch.cat([wavs, wavs_noise], dim=0)\n lens = torch.cat([lens, lens])\n\n if hasattr(self.hparams, \"augmentation\"):\n wavs = self.hparams.augmentation(wavs, lens)\n\n # Choose what features we want to use\n # todo: support multiple features and feature concat\n target_feats = self.hparams.embedding_features\n\n FEATURE_EXTRACTOR = {\n # 'cqt': self.modules.cqt,\n # 'fbanks': self.modules.fbanks\n 'fastaudiogauss': self.modules.fastaudiogauss\n # 'ifr': self.modules.ifr\n # 'mag': self.modules.mag\n # 'mfcc': self.modules.mfcc\n # 'leaf': self.modules.leaf\n # 'tdfbanks': self.modules.tdfbanks\n # 'pcen': self.modules.pcen\n # 'sincnet': self.modules.sincnet\n # 'trainable_fbanks': self.modules.trainable_fbanks\n }\n\n if len(target_feats) == 1:\n # wavs = wavs.unsqueeze(1).cuda()\n feats = FEATURE_EXTRACTOR[target_feats[0]](wavs)\n # feats = torch.unsqueeze(feats, 1)\n # feats = torch.transpose(feats, 1,2)\n if target_feats[0]=='cqt':\n log_spec = 10.0 * torch.log10(torch.clamp(feats, min=1e-30))\n log_spec -= 10.0\n feats=log_spec\n feats = torch.transpose(feats, 1,2)\n else:\n feats = []\n for target in target_feats:\n temp = FEATURE_EXTRACTOR[target](wavs)\n if target=='cqt':\n temp = torch.transpose(temp, 1,2)\n feats.append(temp)\n f =feats[0]\n for i in range(1, len(feats)):\n f = torch.cat((f, feats[i]), dim=2)\n feats = f\n feats = self.modules.mean_var_norm(feats, lens)\n return feats, lens",
"def _update(self, features: DataFrameLike) -> None:\n # add features\n self._features = (\n pd.concat([self._features, features], axis=1, sort=True)\n # fill nans resulting from concatenation where features does not\n # contain neighborless nodes (out-degree=0) on its axis\n .fillna(0)\n )\n # prune redundant features\n pruner = FeaturePruner(self._final_features, self._feature_group_thresh)\n features_to_drop = pruner.prune_features(self._features)\n self._features = self._features.drop(features_to_drop, axis=1)\n # save features that remain after pruning and that\n # have not previously been saved as final features\n retained = features.columns.difference(features_to_drop)\n feature_dict = as_frame(self._features[retained]).to_dict()\n self._final_features[self.generation_count] = feature_dict",
"def _extract_features(self, times):\n times[1] = time()\n data = {n:self._extract_feature(f) for (n,f) in self.features.items()} \n times[2] = time()\n return (data, times, os.getpid())",
"def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)",
"def prepare_data(self):\n if not os.path.exists(self.hparams.data_cache_dir):\n os.mkdir(self.hparams.data_cache_dir)\n for mode, filepath in zip(['train', 'val', 'test'],\n [self.hparams.train_path, self.hparams.val_path, self.hparams.test_path]):\n if mode == 'train':\n label_mode = self.hparams.label_mode\n else:\n label_mode = 'major'\n cached_features_file = self._feature_file(mode, label_mode)\n\n if not os.path.exists(cached_features_file):\n logger.info('Creating features from dataset file at %s', filepath)\n examples = read_examples_from_file(filepath, mode, label_mode)\n features = convert_examples_to_features(\n examples,\n self.labels,\n self.hparams.max_seq_length,\n self.tokenizer,\n cls_token_at_end=bool(self.hparams.model_type in ['xlnet']),\n cls_token=self.tokenizer.cls_token,\n cls_token_segment_id=2 if self.hparams.model_type in ['xlnet'] else 0,\n sep_token=self.tokenizer.sep_token,\n sep_token_extra=bool(self.hparams.model_type in ['roberta']),\n pad_on_left=bool(self.hparams.model_type in ['xlnet']),\n pad_token=self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0],\n pad_token_segment_id=4 if self.hparams.model_type in ['xlnet'] else 0,\n pad_token_label_id=self.pad_token_label_id,\n )\n logger.info('Saving features into cached file %s', cached_features_file)\n torch.save(features, cached_features_file)",
"def _extract_features(self, all_batches, patch_size, train=True):\n # manually derive basic intensities features\n # takes 20 sec / 1048 images batch on my laptop in 4 cores //\n p = patch_size\n r = 512 // p\n labels = np.empty(0)\n feats = np.empty(0)\n for counter, tmp in enumerate(all_batches):\n # if counter == 2:\n # break\n if train:\n batch_img, batch_label = tmp\n else:\n batch_img = tmp\n batch_label = np.empty(0)\n # just for testing just use 20 batch as training set\n print('processing batch {}'.format(counter))\n t1 = time.time()\n batch_feats = np.asarray(\n parmap.map(\n self._get_features_from_batch_images,\n batch_img,\n r,\n p,\n pm_pbar=True))\n print(time.time() - t1)\n labels = np.concatenate(\n (labels, batch_label)) if labels.size else batch_label\n feats = np.concatenate(\n (feats, batch_feats)) if feats.size else batch_feats\n if train:\n return feats, labels\n else:\n return feats",
"def load_features(self, features):\n pass\n # self.features = features",
"def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return",
"def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)",
"def combine_high_level_instance_features(crawl_dir):\n crawl_name = basename(normpath(crawl_dir))\n hi_level_feats_csv = join(dirname(dirname((dirname(realpath(__file__))))),\n \"data\",\n \"%s_hi_level_feats.csv\" % crawl_name)\n fability_scores_csv = join(dirname(dirname((dirname(realpath(__file__))))),\n \"data\",\n \"%s_fp_regression_labels.csv\" % crawl_name)\n # headers: url avg_f1 max_f1 avg_tpr max_tpr\n fability_df = pd.read_csv(fability_scores_csv, sep=',')\n df = pd.read_csv(hi_level_feats_csv, sep='\\t')\n # list of unique .onion domains\n domains = fability_df.url.unique()\n aggreage_feats = defaultdict(list)\n for domain in domains:\n instance_feats = df[df.i_site_domain == domain]\n # print domain, \"fability\", fability_df[fability_df.url == domain]\n for feat_name in instance_feats.columns:\n # Ignore features that starts with i_\n if feat_name.startswith(\"i_\"):\n continue\n feat_var_name = feat_name.replace(\"mo_\", \"var_\").replace(\"med_\", \"var_\")\n feat_std_dev_name = feat_name.replace(\"mo_\", \"stddev_\").replace(\"med_\", \"stddev_\")\n # print feat_name, \"STDDEV\", domain, instance_feats[feat_name].std()\n # add the variance\n # aggreage_feats[feat_var_name].append(instance_feats[feat_name].var())\n aggreage_feats[feat_std_dev_name].append(instance_feats[feat_name].std())\n if feat_name.startswith(\"mo_\"): # mode of the feature\n feat_mode = stats.mode(instance_feats[feat_name])[0][0]\n aggreage_feats[feat_name].append(feat_mode)\n elif feat_name.startswith(\"med_\"): # median of the feature\n aggreage_feats[feat_name].append(instance_feats[feat_name].median())\n else:\n print \"ERROR: Unrecognized high level feature name\", feat_name\n sys.exit(1)\n # add aggregate features to fability dataframe\n for feat in sorted(aggreage_feats.keys()):\n assert len(aggreage_feats[feat]) == 482\n fability_df[feat] = aggreage_feats[feat]\n # write the _aggregated high level feature file csv\n fability_df.to_csv(hi_level_feats_csv.replace(\".csv\", \"_aggregated.csv\"),\n sep=\"\\t\", index=False, index_label=False)",
"def generate_features(self):\n\n # For each STFT timebin, divide data into three bins and get mean power\n data_array = np.array([])\n bl_array = np.array([])\n\n for trial in range(self.data_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.data_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.data_stft_norm.shape[0]):\n data_array = np.append(data_array,[\n np.mean(self.data_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.data_stft_norm[ch, 9:27, tbin, trial])])\n\n data_array = np.reshape(data_array, (-1, 18))\n\n for trial in range(self.bl_stft_norm.shape[-1]): # Each trial\n for tbin in range(self.bl_stft_norm.shape[-2]): # Each timebin\n for ch in range(self.bl_stft_norm.shape[0]):\n bl_array = np.append(bl_array, [\n np.mean(self.bl_stft_norm[ch, :2, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 3:8, tbin, trial]),\n np.mean(self.bl_stft_norm[ch, 9:27, tbin, trial])])\n bl_array = np.reshape(bl_array, (-1, 18))\n\n X = np.append(data_array, bl_array, axis=0)\n y = np.append(np.ones(data_array.shape[0]), np.zeros(bl_array.shape[0]))\n\n return X, y",
"def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model",
"def update(self) -> None:\n self.faithful = self.is_faithful()\n if self.faithful:\n old_class_names = self.class_names\n old_training_image_names = self.training_image_names\n self.class_names = self.find_class_names()\n self.training_image_names = self.find_training_image_names()\n self.extracted_features = list()\n if old_class_names != self.class_names or old_training_image_names != self.training_image_names:\n self.generate_csv_dictionary()\n return",
"def compute_features(loader, model, dataset_type, pooled_output,\n batch_size, num_workers, \n shuffle=False, device='cuda', \n filename=None, chunk_threshold=20000, balance=False):\n \n if filename is None or not os.path.exists(os.path.join(filename, f'0_features.npy')):\n\n all_latents, all_targets = [], []\n Nsamples, chunk_id = 0, 0 \n \n for batch_idx, batch in tqdm(enumerate(loader), total=len(loader)): \n \n with ch.no_grad():\n latents, targets = get_features_batch(batch, model, dataset_type, \n pooled_output=pooled_output, \n device=device)\n \n if batch_idx == 0:\n print(\"Latents shape\", latents.shape)\n \n \n Nsamples += latents.size(0)\n\n all_latents.append(latents.cpu())\n all_targets.append(targets.cpu())\n\n if filename is not None and Nsamples > chunk_threshold: \n if not os.path.exists(filename): os.makedirs(filename)\n np.save(os.path.join(filename, f'{chunk_id}_features.npy'), ch.cat(all_latents).numpy())\n np.save(os.path.join(filename, f'{chunk_id}_labels.npy'), ch.cat(all_targets).numpy())\n all_latents, all_targets, Nsamples = [], [], 0\n chunk_id += 1\n \n if filename is not None and Nsamples > 0:\n if not os.path.exists(filename): os.makedirs(filename)\n np.save(os.path.join(filename, f'{chunk_id}_features.npy'), ch.cat(all_latents).numpy())\n np.save(os.path.join(filename, f'{chunk_id}_labels.npy'), ch.cat(all_targets).numpy())\n\n\n feature_dataset = load_features(filename) if filename is not None else \\\n ch.utils.data.TensorDataset(ch.cat(all_latents), ch.cat(all_targets))\n if balance: \n feature_dataset = balance_dataset(feature_dataset)\n \n feature_loader = ch.utils.data.DataLoader(feature_dataset, \n num_workers=num_workers,\n batch_size=batch_size, \n shuffle=shuffle)\n \n return feature_dataset, feature_loader",
"def _featurize_data(train_examples, dev_examples, train_featurizer, dev_featuarizer):\n tic = time.time()\n print('Train examples [{}] featurization started.'.format(len(train_examples)))\n train_ready = [train_featurizer.build_features(example)\n for example in tqdm.tqdm(train_examples, total=len(train_examples))]\n print('Train examples featurized [{}] in {:.3f} sec'.format(len(train_examples),\n time.time() - tic))\n tic = time.time()\n print('Dev examples [{}] featurization started.'.format(len(dev_examples)))\n dev_ready = [dev_featuarizer.build_features(example)\n for example in tqdm.tqdm(dev_examples, total=len(dev_examples))]\n print('Dev examples featurized [{}] in {:.3f} sec'.format(len(dev_examples),\n time.time() - tic))\n return train_ready, dev_ready",
"def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"] * 60 + self.df_poly[\"minute\"]\n\n # Empty list to hold the feature names\n poly_feature_names = []\n\n # Add the poly columns to the df_poly\n for degree in [0, 1, 2, 3, 4, 5]:\n self.df_poly = poly(self.df_poly, degree)\n poly_feature_names.append(\"poly_\" + str(degree))\n\n # filterout + - inf, nan\n self.df_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ]\n\n # Save the poly feature name\n self.poly_feature_names = poly_feature_names\n feature_names = []\n\n #########################################################################################\n train_index_poly = self.df_poly[\n ~self.df_poly.isin([np.nan, np.inf, -np.inf]).any(1)\n ].index\n X_train_poly, y_train_poly = (\n self.df_poly[self.poly_feature_names].loc[train_index_poly],\n self.df_poly[\"y\"].loc[train_index_poly],\n )\n\n # Build the Polynomial Regression Model\n lin_reg = LinearRegression()\n lin_reg.fit(X_train_poly, y_train_poly)\n self.poly_model = lin_reg\n y_train_season = lin_reg.predict(X_train_poly)\n self.y_train_season_obj = y_train_season\n #########################################################################################\n\n for n in [10, 15, 20, 25, 30]:\n self.df = MOM(self.df, n)\n feature_names.append(\"MOM_\" + str(n))\n for n in [10, 15, 20, 25, 30]:\n self.df = ROC(self.df, n)\n feature_names.append(\"ROC_\" + str(n))\n for n in [1, 2, 3, 4, 5]:\n self.df = LAG(self.df, n)\n feature_names.append(\"LAG_\" + str(n))\n for n in [10, 20, 30]:\n self.df = MA(self.df, n)\n feature_names.append(\"MA_\" + str(n))\n\n self.df = self.df[\n ~self.df.isin([np.nan, np.inf, -np.inf]).any(1)\n ] # filterout + - inf, nan\n self.feature_names = feature_names",
"def feature_list(user_id: str, session: str, tap_feature: str, task_name: str, window: DataFrame):\n if window.shape[0] == 0:\n return None\n #Add user ID, session, task name\n features = [user_id, session, task_name]\n\n #Add orientation\n orientation = mode(window['Phone_orientation_accel'])\n features.append(orientation)\n\n #Add tap type\n features.append(tap_feature)\n\n lead_file = 'Accelerometer.csv'\n\n time_col = x_columns[lead_file]\n\n before_start = window[window[tap_feature] == 4].index[0]\n during_start = window[window[tap_feature] == 2].index[0]\n after_start = window[window[tap_feature] == 3].index[0] + 1\n after_end = window[window[tap_feature] == 5].index[0]\n\n before = window.loc[before_start : during_start]\n during = window.loc[during_start : after_start]\n after = window.loc[after_start : after_end + 1]\n\n if during.shape[0] < 2:\n # If there were none or one measurements during the tap,\n # add the closest ones\n during = window[during_start - 1 : after_start + 1]\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n # Feature 1: Mean during\n mean_during = mean(during[y])\n\n # Feature 2: SD during\n sd_during = sd(during[y])\n\n # Feature 3: Difference before/after\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n difference_before_after = mean_after - mean_before\n\n # Feature 4: Net change from tap\n net_change_due_to_tap = mean_during - mean_before\n\n # Feature 5: Maximal change from tap\n max_tap = max(during[y])\n max_change = max_tap - mean_before\n\n # Feature 6: Restoration time\n avgDiffs = []\n for j in range(after[y].shape[0]):\n subsequentValues = after[y].iloc[j:]\n subsequentDistances = subsequentValues.map(lambda x: abs(x - mean_before))\n averageDistance = mean(subsequentDistances)\n avgDiffs.append(averageDistance)\n time_of_earliest_restoration = min(avgDiffs)\n restoration_time = time_of_earliest_restoration - during[time_col].iloc[-1]\n\n # Feature 7: Normalized duration\n t_before_center = (before[time_col].iloc[0] + before[time_col].iloc[-1]) / 2 \n t_after_center = (after[time_col].iloc[0] + after[time_col].iloc[-1]) / 2\n normalized_duration = (t_after_center - t_before_center) / (mean_after - mean_before)\n \n # Feature 8: Ndormalized duration max\n t_max_in_tap = during[during[y] == max_tap][time_col].iloc[0]\n normalized_duration_max = (t_after_center - t_max_in_tap) / (mean_after - max_tap)\n\n\n features += [mean_during, sd_during, difference_before_after,\n net_change_due_to_tap, max_change, restoration_time,\n normalized_duration, normalized_duration_max]\n\n if random.choice(range(100))== 0:\n plot_tap('Plots/Project/' + session, before, during, after, time_col)\n \n return features",
"def _extract_features(self, row):\n ncep_data = self.ncep_data\n ncep_sfc_data = self.ncep_sfc_data\n date = row['date']\n features = dict(row)\n #reduce the dimensions of ncep_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_data = ncep_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['level','time'])\n #reduce the dimensions of ncep_sfc_data(xarray dataset) by fixing coordinates(lon,lat)\n #and then convert it to dataframe\n ncep_sfc_data = ncep_sfc_data[date.year] \\\n .sel(lon=row['longitude'], lat=row['latitude'], method='nearest') \\\n .to_dask_dataframe() \\\n .compute() \\\n .set_index(['time'])\n\n for level in self.levels:\n #features at different pressure level\n point = ncep_data.loc[level]\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_data_vars:\n features[\"{0}_0w_lvl_{1}\".format(data_var,level)] = v0w[data_var]\n features[\"{0}_1w_lvl_{1}\".format(data_var,level)] = v1w[data_var]\n features[\"{0}_2w_lvl_{1}\".format(data_var,level)] = v2w[data_var]\n features[\"{0}_3w_lvl_{1}\".format(data_var,level)] = v3w[data_var]\n #features at surface level\n point = ncep_sfc_data\n p1w = point.rolling(7).mean() # 1 Week mean\n p2w = point.rolling(14).mean() # 2 Week mean\n p3w = point.rolling(21).mean() # 3 Week mean\n # \n v0w = point.loc[date]\n v1w = p1w.loc[date]\n v2w = p2w.loc[date]\n v3w = p3w.loc[date]\n #\n for data_var in self.ncep_sfc_data_vars:\n features[\"{0}_0w\".format(data_var)] = v0w[data_var]\n features[\"{0}_1w\".format(data_var)] = v1w[data_var]\n features[\"{0}_2w\".format(data_var)] = v2w[data_var]\n features[\"{0}_3w\".format(data_var)] = v3w[data_var] \n\n return features",
"def mutation(self, base_offsprings, model_features_count) :",
"def compute_features_one_round(\n train_base_df,\n train_delta_df,\n test_df,\n df_config,\n feature_config_list,\n feature_map,\n filter_by_month,\n compute_load_ratio=False,\n):\n\n train_round_df = pd.concat([train_base_df, train_delta_df])\n max_train_timestamp = train_round_df[df_config[\"time_col_name\"]].max()\n max_test_timestamp = test_df[df_config[\"time_col_name\"]].max()\n train_test_diff = max_test_timestamp - max_train_timestamp\n max_horizon = ceil(train_test_diff.days * 24 + train_test_diff.seconds / 3600)\n train_features, feature_pipeline = compute_training_features(\n train_round_df, df_config, feature_config_list, feature_map, max_horizon,\n )\n\n test_features = compute_testing_features(test_df, feature_pipeline, feature_config_list, train_round_df)\n\n if compute_load_ratio:\n rolling_window_args = LOAD_RATIO_CONFIG[\"same_day_of_week_rolling_args\"]\n previous_years_lag_args = LOAD_RATIO_CONFIG[\"same_week_of_year_lag_args\"]\n same_week_day_hour_rolling_featurizer = SameDayOfWeekRollingWindowFeaturizer(\n df_config, input_col_names=df_config[\"target_col_name\"], max_horizon=max_horizon, **rolling_window_args\n )\n train_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(train_round_df)\n same_week_day_hour_rolling_featurizer.train_df = train_round_df\n test_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(test_df)\n\n time_col_name = df_config[\"time_col_name\"]\n ts_id_col_names = df_config[\"ts_id_col_names\"]\n keep_col_names = [time_col_name]\n if ts_id_col_names is not None:\n if isinstance(ts_id_col_names, list):\n keep_col_names = keep_col_names + ts_id_col_names\n else:\n keep_col_names.append(ts_id_col_names)\n lag_df_list = []\n start_week = rolling_window_args[\"start_week\"]\n end_week = start_week + rolling_window_args[\"agg_count\"]\n for i in range(start_week, end_week):\n col_old = df_config[\"target_col_name\"] + \"_\" + rolling_window_args[\"output_col_suffix\"] + \"_\" + str(i)\n col_new = col_old + \"_\" + previous_years_lag_args[\"output_col_suffix\"]\n col_ratio = \"recent_load_ratio_\" + str(i)\n\n same_week_day_hour_lag_featurizer = SameWeekOfYearLagFeaturizer(\n df_config,\n input_col_names=col_old,\n train_df=train_df_with_recent_load,\n max_horizon=max_horizon,\n **previous_years_lag_args\n )\n\n lag_df = same_week_day_hour_lag_featurizer.transform(test_df_with_recent_load)\n lag_df[col_ratio] = lag_df[col_old] / lag_df[col_new]\n lag_df_list.append(lag_df[keep_col_names + [col_ratio]].copy())\n\n test_features = reduce(\n lambda left, right: pd.merge(left, right, on=keep_col_names), [test_features] + lag_df_list,\n )\n\n if filter_by_month:\n test_month = test_features[\"month_of_year\"].values[0]\n train_features = train_features.loc[train_features[\"month_of_year\"] == test_month,].copy()\n\n train_features.dropna(inplace=True)\n\n return train_features, test_features",
"def _extract_features(self, ti, tf):\n makedir(self.featdir)\n\n # number of windows in feature request\n Nw = int(np.floor(((tf-ti)/self.dt)/(self.iw-self.io)))\n\n # features to compute\n cfp = ComprehensiveFCParameters()\n if self.compute_only_features:\n cfp = dict([(k, cfp[k]) for k in cfp.keys() if k in self.compute_only_features])\n else:\n # drop features if relevant\n _ = [cfp.pop(df) for df in self.drop_features if df in list(cfp.keys())]\n\n # check if feature matrix already exists and what it contains\n if os.path.isfile(self.featfile):\n t = pd.to_datetime(pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], usecols=['time'], infer_datetime_format=True).index.values)\n ti0,tf0 = t[0],t[-1]\n Nw0 = len(t)\n hds = pd.read_csv(self.featfile, index_col=0, nrows=1)\n hds = list(set([hd.split('__')[1] for hd in hds]))\n\n # option 1, expand rows\n pad_left = int((ti0-ti)/self.dto)# if ti < ti0 else 0\n pad_right = int(((ti+(Nw-1)*self.dto)-tf0)/self.dto)# if tf > tf0 else 0\n i0 = abs(pad_left) if pad_left<0 else 0\n i1 = Nw0 + max([pad_left,0]) + pad_right\n \n # option 2, expand columns\n existing_cols = set(hds) # these features already calculated, in file\n new_cols = set(cfp.keys()) - existing_cols # these features to be added\n more_cols = bool(new_cols)\n all_cols = existing_cols|new_cols\n cfp = ComprehensiveFCParameters()\n cfp = dict([(k, cfp[k]) for k in cfp.keys() if k in all_cols])\n\n # option 3, expand both\n if any([more_cols, pad_left > 0, pad_right > 0]) and self.update_feature_matrix:\n fm = pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], infer_datetime_format=True)\n if more_cols:\n # expand columns now\n df0, wd = self._construct_windows(Nw0, ti0)\n cfp0 = ComprehensiveFCParameters()\n cfp0 = dict([(k, cfp0[k]) for k in cfp0.keys() if k in new_cols])\n fm2 = extract_features(df0, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp0, impute_function=impute)\n fm2.index = pd.Series(wd)\n \n fm = pd.concat([fm,fm2], axis=1, sort=False)\n\n # check if updates required because training period expanded\n # expanded earlier\n if pad_left > 0:\n df, wd = self._construct_windows(Nw, ti, i1=pad_left)\n fm2 = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm2.index = pd.Series(wd)\n fm = pd.concat([fm2,fm], sort=False)\n # expanded later\n if pad_right > 0:\n df, wd = self._construct_windows(Nw, ti, i0=Nw - pad_right)\n fm2 = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm2.index = pd.Series(wd)\n fm = pd.concat([fm,fm2], sort=False)\n \n # write updated file output\n fm.to_csv(self.featfile, index=True, index_label='time')\n # trim output\n fm = fm.iloc[i0:i1] \n else:\n # read relevant part of matrix\n fm = pd.read_csv(self.featfile, index_col=0, parse_dates=['time'], infer_datetime_format=True, header=0, skiprows=range(1,i0+1), nrows=i1-i0)\n else:\n # create feature matrix from scratch \n df, wd = self._construct_windows(Nw, ti)\n fm = extract_features(df, column_id='id', n_jobs=self.n_jobs, default_fc_parameters=cfp, impute_function=impute)\n fm.index = pd.Series(wd)\n fm.to_csv(self.featfile, index=True, index_label='time')\n \n ys = pd.DataFrame(self._get_label(fm.index.values), columns=['label'], index=fm.index)\n return fm, ys",
"def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means",
"def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df",
"def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)",
"def add_all_features(df):\n df.reset_index(drop=True, inplace=True)\n df = target_indicators(df)\n df = momentum_indicators(df)\n df = trend_indicators(df)\n df = volatility_indicators(df)\n df = volume_indicators(df)\n df = special_indicators(df)\n return df",
"def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return",
"def trainModel( self, featureTrain, classTrain):",
"def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize"
]
| [
"0.5968119",
"0.5825508",
"0.5797949",
"0.57781553",
"0.57452273",
"0.56985676",
"0.5671647",
"0.56669515",
"0.56569123",
"0.56368",
"0.56169164",
"0.56151366",
"0.56011236",
"0.55730826",
"0.5531826",
"0.55290115",
"0.54975414",
"0.5486447",
"0.5474995",
"0.5474167",
"0.5437403",
"0.542742",
"0.53977853",
"0.53938204",
"0.53912413",
"0.5378955",
"0.5349944",
"0.5340591",
"0.5338673",
"0.5334134"
]
| 0.64443403 | 0 |
Passes each chunk from mains generator to disaggregate_chunk() and passes the output to _write_disaggregated_chunk_to_datastore() Will have a default implementation in super class. Can be overridden for more simple inmemory disaggregation, or more complex outofcore disaggregation. | def disaggregate(self, mains, output_datastore):
building_path = '/building{}'.format(mains.building())
# only writes one appliance and meter per building
meter_instance = 2
mains_data_location = '{}/elec/meter1'.format(building_path)
#dis_main = pd.DataFrame()
chunk_number = 0
timeframes = []
for chunk in mains.power_series():
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
cols = pd.MultiIndex.from_tuples([chunk.name])
dis_chunk = self.disaggregate_chunk(
pd.DataFrame(chunk.resample(self.sample_period, how=self.sampling_method)))
#dis_main = pd.concat([dis_main, dis_chunk])
chunk_number += 1
print(str(chunk_number) + " chunks disaggregated")
# Write appliance data to disag output
key = '{}/elec/meter{}'.format(building_path, meter_instance)
df = pd.DataFrame(
dis_chunk.values, index=dis_chunk.index,
columns=cols)
output_datastore.append(key, df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols))
# Saving output datastore:
#output_datastore.append(key=mains.key, value=dis_main)
##################################
# Add metadata to output_datastore
# TODO: `preprocessing_applied` for all meters
# TODO: split this metadata code into a separate function
# TODO: submeter measurement should probably be the mains
# measurement we used to train on, not the mains measurement.
date_now = datetime.now().isoformat().split('.')[0]
output_name = 'NILMTK_MLE_' + date_now
resample_seconds = 10
mains_data_location = '{}/elec/meter1'.format(building_path)
# DataSet and MeterDevice metadata:
meter_devices = {
'MLE': {
'model': 'MLE',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
},
'mains': {
'model': 'mains',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
}
}
merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)
total_timeframe = TimeFrame(merged_timeframes[0].start,
merged_timeframes[-1].end)
dataset_metadata = {'name': output_name, 'date': date_now,
'meter_devices': meter_devices,
'timeframe': total_timeframe.to_dict()}
output_datastore.save_metadata('/', dataset_metadata)
# Building metadata
# Mains meter:
elec_meters = {
1: {
'device_model': 'mains',
'site_meter': True,
'data_location': mains_data_location,
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
}
# Appliances and submeters:
appliances = []
appliance = {
'meters': [meter_instance],
'type': 'kettle',
'instance': 1
# TODO this `instance` will only be correct when the
# model is trained on the same house as it is tested on.
# https://github.com/nilmtk/nilmtk/issues/194
}
appliances.append(appliance)
elec_meters.update({
meter_instance: {
'device_model': 'MLE',
'submeter_of': 1,
'data_location': ('{}/elec/meter{}'
.format(building_path, meter_instance)),
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
})
elec_meters[meter_instance]['name'] = 'kettle'
building_metadata = {
'instance': mains.building(),
'elec_meters': elec_meters,
'appliances': appliances
}
output_datastore.save_metadata(building_path, building_metadata) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disaggregate_chunk(self, test_mains):\n raise NotImplementedError()",
"def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)",
"def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch",
"def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]",
"def dedisperse(self, dm, gulp=10000, **kwargs):\n chan_delays = self.header.getDMdelays(dm)\n max_delay = int(chan_delays.max())\n gulp = max(2 * max_delay, gulp)\n tim_len = self.header.nsamples - max_delay\n tim_ar = np.zeros(tim_len, dtype=\"float32\")\n for nsamps, ii, data in self.readPlan(gulp, skipback=max_delay, **kwargs):\n lib.dedisperse(\n data,\n tim_ar,\n chan_delays,\n max_delay,\n self.header.nchans,\n nsamps,\n ii * (gulp - max_delay),\n )\n return TimeSeries(tim_ar, self.header.newHeader({\"nchans\": 1, \"refdm\": dm}))",
"def __iter__(self):\n\n # collector will fetch chunksize array for each 'get' call\n collector = FIFOArray(self.chunksize, self.axis)\n\n # make tmp array to hold generated subarrs\n tmp = []\n tmp_size = 0\n for subarr in self.data(**self.kwargs):\n\n tmp.append(subarr)\n tmp_size += subarr.shape[self.axis]\n\n # if tmp exceeds chunksize put in collector\n if tmp_size >= self.chunksize:\n arr = np.concatenate(tmp, axis=self.axis)\n collector.put(arr)\n\n # fetch chunksize till not full\n while collector.full():\n yield collector.get()\n\n # place leftover back into tmp and empty collector\n tmp = [collector.queue]\n tmp_size = collector.qsize()\n collector.queue = np.array([])\n\n else:\n\n # append to tmp again\n continue\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n # yield whatever is left in tmp (its below chunksize)\n remaining = np.concatenate(tmp, axis=self.axis)\n if remaining.size > 0:\n yield remaining",
"def unchunk():\n\n @filters\n def _dagpype_internal_fn_act(target):\n try:\n while True:\n a = (yield)\n if len(a) == 0:\n continue\n if a.ndim == 1:\n for i in range(a.shape[0]):\n target.send(a[i]) \n else:\n for i in range(a.shape[0]):\n target.send(tuple(a[i]))\n except GeneratorExit:\n if len(l) > 0:\n target.send(numpy.array(l, dtype = dtype_)) \n \n return _dagpype_internal_fn_act",
"def disaggregate_chunk(self, chunk):\n\n # An resistive element has active power equal to apparent power.\n # Checking power units.\n units = self.__physical_quantity(chunk)\n\n # EVENTS OUT OF THE CHUNK:\n # Delta values:\n column_name = 'diff_' + units[1]\n chunk[column_name] = chunk.loc[:, units].diff()\n\n # Filter the noise.\n chunk['onpower'] = (chunk[column_name] > self.powerNoise)\n chunk['offpower'] = (chunk[column_name] < -self.powerNoise)\n events = chunk[(chunk.onpower == True) | (chunk.offpower == True)]\n\n detection_list = []\n singleOnevent = 0\n # Max Likelihood algorithm (optimized):\n for onevent in events[events.onpower == True].iterrows():\n # onTime = onevent[0]\n # deltaOn = onevent[1][1]\n # windowning:\n offevents = events[(events.offpower == True) & (events.index > onevent[0]) & (\n events.index < onevent[0] + timedelta(seconds=self.timeWindow))]\n # Filter paired events:\n offevents = offevents[\n abs(onevent[1][1] - offevents[column_name].abs()) < self.powerPair]\n\n # Max likelihood computation:\n if not offevents.empty:\n # pon = self.__pdf(self.onpower, onevent[1][1])\n for offevent in offevents.iterrows():\n # offTime = offevent[0]\n # deltaOff = offevent[1][1]\n # poff = self.__pdf(self.offpower, offevent[1][1])\n # duration = offevent[0] - onTime\n # pduration = self.__pdf(self.duration, (offevent[0] - onTime).total_seconds())\n likelihood = self.__pdf(self.onpower, onevent[1][1]) * \\\n self.__pdf(self.offpower, offevent[1][1]) * \\\n self.__pdf(self.duration, (offevent[0] - \\\n onevent[0]).total_seconds())\n detection_list.append(\n {'likelihood': likelihood, 'onTime': onevent[0], \n 'offTime': offevent[0], 'deltaOn': onevent[1][1]})\n else:\n singleOnevent += 1\n\n # Passing detections to a pandas.DataFrame\n detections = pd.DataFrame(\n columns=('onTime', 'offTime', 'likelihood', 'deltaOn'))\n\n for i in range(len(detection_list)):\n detections.loc[i] = [detection_list[i]['onTime'], detection_list[i][\n 'offTime'], detection_list[i]['likelihood'], detection_list[i]['deltaOn']]\n\n detections = detections[detections.likelihood >= self.thLikelihood]\n\n # Constructing dis_chunk (power of disaggregated appliance)\n dis_chunk = pd.DataFrame(\n index=chunk.index, columns=[str(units[0]) + '_' + str(units[1])])\n dis_chunk.fillna(0, inplace=True)\n\n # Ruling out overlapped detecttions ordering by likelihood value.\n detections = detections.sort('likelihood', ascending=False)\n for row in detections.iterrows():\n # onTime = row[1][0] offTime = row[1][1] deltaOn = row[1][3]\n #import ipdb\n #ipdb.set_trace()\n if ((dis_chunk[(dis_chunk.index >= row[1][0]) &\n (dis_chunk.index < row[1][1])].sum().values[0]) == 0):\n # delta = chunk[chunk.index == onTime][column_name].values[0]\n dis_chunk[(dis_chunk.index >= row[1][0]) & (\n dis_chunk.index < row[1][1])] = row[1][3]\n\n # Stat information:\n print(str(len(events)) + \" events found.\")\n print(str(len(events[events.onpower == True])) + \" onEvents found\")\n print(str(singleOnevent) + \" onEvents no paired.\")\n\n return dis_chunk",
"def runingest(sdms):\n\n NotImplementedError",
"def generator(self):\n\n # generates speech turns long enough to contain at least one segment\n speech_turns = super(SpeechTurnSubSegmentGenerator, self).generator()\n\n # number of speech turns per \"speech turn batch\"\n if self.per_fold is not None:\n n_speech_turns = self.per_label * self.per_fold\n else:\n n_speech_turns = self.per_label * len(self.data_)\n\n endOfBatch = EndOfBatch()\n while True:\n\n # for each speech turn in batch\n for z in range(n_speech_turns):\n speech_turn = next(speech_turns)\n\n # for each segment in speech turn\n for X in self.iter_segments_(speech_turn['X']):\n\n # all but 'X' fields are left unchanged\n segment = dict(speech_turn)\n segment['X'] = X\n\n # remember that this segment belongs to this speech turn\n segment['z'] = z\n\n yield segment\n\n # let `batchify` know that the \"segment batch\" is complete\n yield endOfBatch",
"def as_generator(self, shuffle=False, n_workers=0):\n\n data_loader = DataLoader(\n dataset=self, shuffle=shuffle, num_workers=n_workers\n )\n for sample in cycle(data_loader):\n sample_batch_dim_removed = {}\n for key, val in sample.items():\n sample_batch_dim_removed[key] = val[0]\n yield sample_batch_dim_removed",
"def flow(self, batch_size=32, output='both', crops=0):\n while True:\n for dataset in self.input_sets:\n X = self.training_set['input/'+dataset]\n y = self.training_set['target/'+dataset]\n y_seg = self.training_set['seg_map/'+dataset]\n\n for i in range(int(math.ceil(X.shape[0]/2000))):\n index = list(range(0,X.shape[0]))\n sample = random.sample(index, batch_size)\n sample.sort()\n X_batch = X[sample, ...]\n y_batch = y[sample, ...]\n y_seg_batch = y_seg[sample, ...]\n X_batch = self.augment(X_batch)\n\n if crops > 0:\n (X_batch, y_batch,\n y_seg_batch) = _augmentors.random_crops(\n X_batch, y_batch, y_seg_batch, n_crops=crops, crop_dim=20)\n\n if output=='both':\n yield (X_batch, [y_batch, y_seg_batch])\n elif output=='seg':\n yield (X_batch, y_seg)\n elif output=='density':\n yield (X_batch, y_batch)\n else:\n raise Exception('output must be \"density\", \"seg\" or \"both\"')",
"def __iter__(self):\n for b in self.dl: \n yield to_device(b, self.device) # yield pauses the execution, not store values in memory, forgets about them once iterated\n # no need to remove batch of data from device, done automatically",
"def collect(self):\n while self.proc is not None:\n self.read()\n if not len(self.datalines):\n return\n while len(self.datalines):\n # pop the first node of list\n yield self.datalines.pop(0)",
"def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]",
"def _dataset_split_generators(self):\n raise NotImplementedError()",
"def _process(self):\n export_collect_medias(self.kwargs[\"collect\"])",
"def chunks(X, y, batch_size=32, augmentation_times=4, thickness=0,\n data_generator=ImageDataGenerator(dim_ordering=\"th\"), is_training=True):\n while 1:\n prct_pop, prct1 = 0.2, 0.2 # (1) of all the training set, how much we keep (2) % of 1's\n idx_1 = [i for i in range(len(y)) if y[i] == 1]\n idx_1 = random.sample(idx_1, int(prct_pop * len(idx_1)))\n idx_0 = [i for i in range(len(y)) if y[i] == 0]\n idx_0 = random.sample(idx_0, int(len(idx_1) / prct1))\n selected_samples = idx_0 + idx_1\n random.shuffle(selected_samples)\n logging.info(\"Final downsampled dataset stats: TP:%d, FP:%d\" % (\n sum(y[selected_samples]), len(y[selected_samples]) - sum(y[selected_samples])))\n\n i, good = 0, 0\n lenX = len(selected_samples)\n for X_batch, y_batch in data_generator.flow(X[selected_samples], y[selected_samples], batch_size=batch_size,\n shuffle=is_training):\n i += 1\n if good * batch_size > lenX * augmentation_times or i > 100: # stop when we have augmented enough the batch\n break\n if X_batch.shape[0] != batch_size: # ensure correct batch size\n continue\n good += 1\n yield X_batch, y_batch",
"def _process(self):\n export_collect_data(self.kwargs[\"collect\"])",
"def rechunk_bgen(\n ds: Dataset,\n output: Union[PathType, MutableMapping[str, bytes]],\n *,\n chunk_length: int = 10_000,\n chunk_width: int = 1_000,\n compressor: Optional[Any] = zarr.Blosc(cname=\"zstd\", clevel=7, shuffle=2),\n probability_dtype: Optional[DType] = \"uint8\",\n max_mem: str = \"4GB\",\n pack: bool = True,\n tempdir: Optional[PathType] = None,\n) -> Dataset:\n if isinstance(output, Path):\n output = str(output)\n\n chunk_length = min(chunk_length, ds.dims[\"variants\"])\n chunk_width = min(chunk_width, ds.dims[\"samples\"])\n\n if pack:\n ds = pack_variables(ds)\n\n encoding = encode_variables(\n ds,\n chunk_length=chunk_length,\n chunk_width=chunk_width,\n compressor=compressor,\n probability_dtype=probability_dtype,\n )\n target_chunks = {\n var: encoding[var][\"chunks\"] for var in encoding if \"chunks\" in encoding[var]\n }\n target_options = {\n var: {k: v for k, v in encoding[var].items() if k != \"chunks\"}\n for var in encoding\n }\n with tempfile.TemporaryDirectory(\n prefix=\"bgen_to_zarr_\", suffix=\".zarr\", dir=tempdir\n ) as tmpdir:\n rechunked = rechunker_api.rechunk(\n ds,\n max_mem=max_mem,\n target_chunks=target_chunks,\n target_store=output,\n target_options=target_options,\n temp_store=tmpdir,\n executor=\"dask\",\n )\n rechunked.execute()\n\n ds: Dataset = xr.open_zarr(output, concat_characters=False) # type: ignore[no-untyped-call]\n if pack:\n ds = unpack_variables(ds)\n\n return ds",
"def unshift(self, num_chunks):\n for _ in xrange(num_chunks):\n self.probability /= self.graph.ftp(self[-2], self[-1])\n num_leaves = len(self[-1].leaves)\n del self.leaves[-num_leaves:]\n del self[-1]",
"def batch(self, lo=None, hi=None, max_recs=None, max_bytes=None,\n preserve=True, packer=None, txn=None, max_phys=None,\n grouper=None):\n assert max_bytes or max_recs, 'max_bytes and/or max_recs is required.'\n txn = txn or self.engine\n packer = packer or self.packer\n it = self._iter(txn, None, lo, hi, False, None, True, max_phys)\n groupval = None\n items = []\n\n for batch, key, data in it:\n if preserve and batch:\n self._write_batch(txn, items, packer)\n else:\n txn.delete(encode_keys(self.prefix, key))\n items.append((key, data))\n if max_bytes:\n _, encoded = self._prepare_batch(items, packer)\n if len(encoded) > max_bytes:\n items.pop()\n self._write_batch(txn, items, packer)\n items.append((key, data))\n done = max_recs and len(items) == max_recs\n if (not done) and grouper:\n val = grouper(self.encoder.unpack(data))\n done = val != groupval\n groupval = val\n if done:\n self._write_batch(txn, items, packer)\n self._write_batch(txn, items, packer)",
"def postprocess_chunk(self, chunk):\n\n # Apply the current season to the chunk.\n if self.season:\n self.season.transform(chunk)\n\n # Since this chunk hasn't been given to any player yet, there's no\n # conceivable way that any meaningful damage has been accumulated;\n # anybody loading any part of this chunk will want the entire thing.\n # Thus, it should start out undamaged.\n chunk.clear_damage()\n\n # Register the chunk's entities with our parent factory.\n for entity in chunk.entities:\n self.factory.register_entity(entity)\n\n # Return the chunk, in case we are in a Deferred chain.\n return chunk",
"def __call__(self, d):\n try:\n with self.statistics_lock:\n self.statistics_data['input']+=1\n if self.batch_size<=1:\n try:\n self.output(self._work(d))\n except Discard:\n with self.statistics_lock:\n self.statistics_data['discarded']+=1\n except Exception, e:\n logging.exception(\"Caught exception, Data discarded\")\n with self.statistics_lock:\n self.statistics_data['discarded']+=1\n finally:\n self.task_done()\n else:\n self.batch_repo.append(d)\n if len(self.batch_repo)>=self.batch_size:\n self._batch_work()\n except:\n pass",
"def update_mapping(self, flush=False, override_chunks=[]):\n\n # get set of filenames from File objects that have already been mapped\n already_mapped_inputs = set(map(lambda x: x.get_name(),self.get_inputs(flatten=True)))\n already_mapped_outputs = map(lambda x: x.get_index(),self.get_outputs())\n nextidx = 1\n if already_mapped_outputs:\n nextidx = max(already_mapped_outputs)+1\n original_nextidx = nextidx+0\n # if dataset is \"closed\" and we already have some inputs, then\n # don't bother doing get_files() again (wastes a DBS query)\n if (len(already_mapped_inputs) > 0 and not self.open_dataset):\n files = []\n else:\n files = [f for f in self.sample.get_files() if f.get_name() not in already_mapped_inputs]\n self.queried_nevents = self.sample.get_nevents()\n\n flush = (not self.open_dataset) or flush\n prefix, suffix = self.output_name.rsplit(\".\",1)\n if self.split_within_files:\n if self.total_nevents < 1 or self.events_per_output < 1:\n raise Exception(\"If splitting within files (presumably for LHE), need to specify total_nevents and events_per_output\")\n nchunks = int(self.total_nevents / self.events_per_output)\n chunks = [files for _ in range(nchunks)]\n leftoverchunk = []\n else:\n chunks, leftoverchunk = Utils.file_chunker(files, events_per_output=self.events_per_output, files_per_output=self.files_per_output, flush=flush)\n if len(override_chunks) > 0:\n self.logger.info(\"Manual override to have {0} chunks\".format(len(override_chunks)))\n chunks = override_chunks\n leftoverchunk = []\n for chunk in chunks:\n if not chunk: continue\n output_path = \"{0}/{1}_{2}.{3}\".format(self.get_outputdir(),prefix,nextidx,suffix)\n output_file = EventsFile(output_path)\n nevents_in_output = sum(map(lambda x: x.get_nevents(), chunk))\n output_file.set_nevents(nevents_in_output)\n self.io_mapping.append([chunk, output_file])\n nextidx += 1\n if (nextidx-original_nextidx > 0):\n self.logger.info(\"Updated mapping to have {0} more entries\".format(nextidx-original_nextidx))",
"def _group_hook_memory_cleanup(\n self,\n accumulation,\n group,\n keep_gram_mat,\n keep_gram_evals,\n keep_gram_evecs,\n keep_gammas,\n keep_lambdas,\n keep_batch_size,\n ):\n buffers = []\n\n if not keep_gram_mat:\n buffers.append(\"_gram_mat\")\n if not keep_gram_evals:\n buffers.append(\"_gram_evals\")\n if not keep_gram_evecs:\n buffers.append(\"_gram_evecs\")\n if not keep_gammas and self._compute_gammas:\n buffers.append(\"_gammas\")\n if not keep_lambdas and self._compute_lambdas:\n buffers.append(\"_lambdas\")\n if not keep_batch_size:\n buffers.append(\"_batch_size\")\n\n group_id = id(group)\n for b in buffers:\n\n if self._verbose:\n print(f\"Group {group_id}: Delete '{b}'\")\n\n getattr(self, b).pop(group_id)",
"def change_chunks(\n self,\n before: typing.Union[typing.Tuple[int, int], None],\n after: typing.Union[typing.Tuple[int, int], None],\n generate_chunks=True,\n load_immediate=True,\n dimension=None,\n ):\n if shared.IS_CLIENT and self.get_active_dimension() is None:\n return\n\n if dimension is None:\n dimension = self.get_active_dimension()\n\n before_set = set()\n after_set = set()\n pad = 4\n for dx in range(-pad, pad + 1):\n for dz in range(-pad, pad + 1):\n if before is not None:\n x, z = before\n if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:\n before_set.add((x + dx, z + dz))\n if after is not None:\n x, z = after\n if (dx + x) ** 2 + (dz + z) ** 2 <= (pad + 1) ** 2:\n after_set.add((x + dx, z + dz))\n\n # show = after_set - before_set\n hide = before_set - after_set\n for chunk in hide:\n # todo: fix this, this was previously hiding chunks randomly....\n pyglet.clock.schedule_once(wrap_method(dimension.hide_chunk, chunk), 0.1)\n c = dimension.get_chunk(*chunk, generate=False, create=False)\n\n if c and c.is_loaded() and not shared.IS_NETWORKING:\n shared.tick_handler.schedule_once(\n shared.world.save_file.dump_async(\n None,\n \"minecraft:chunk\",\n dimension=self.active_dimension,\n chunk=chunk,\n )\n )\n\n for chunk in after_set:\n c = dimension.get_chunk(*chunk, generate=False, create=False)\n\n if c and c.is_visible():\n continue\n\n c = dimension.get_chunk(*chunk, generate=False)\n pyglet.clock.schedule_once(wrap_method(dimension.show_chunk, c), 0.1)\n\n if not shared.IS_NETWORKING and shared.world.save_file:\n if not load_immediate:\n pyglet.clock.schedule_once(\n lambda _: shared.world.save_file.read(\n \"minecraft:chunk\",\n dimension=self.active_dimension,\n chunk=chunk,\n ),\n 0.1,\n )\n else:\n shared.world.save_file.read(\n \"minecraft:chunk\", dimension=self.active_dimension, chunk=chunk\n )\n else:\n dimension.get_chunk(*chunk, generate=False)\n\n if not after or shared.IS_NETWORKING:\n return\n\n for dx in range(-pad, pad + 1):\n for dz in range(-pad, pad + 1):\n if (\n generate_chunks\n and abs(dx) <= mcpython.common.config.CHUNK_GENERATION_RANGE\n and abs(dz) <= mcpython.common.config.CHUNK_GENERATION_RANGE\n and self.config[\"enable_auto_gen\"]\n ):\n chunk = dimension.get_chunk(\n dx + after[0], dz + after[1], generate=False\n )\n if not chunk.is_generated():\n shared.world_generation_handler.add_chunk_to_generation_list(\n chunk\n )",
"def __iter__(self):\n batch = []\n for sample in self.dataset:\n batch.append(sample)\n if len(batch) == self.size:\n yield self.transform(batch)\n batch = []\n if batch:\n # the last batch may be less then batch size.\n yield self.transform(batch)",
"def run(self, year):\r\n cache_directory = self.config['cache_directory']\r\n simulation_state = SimulationState()\r\n simulation_state.set_cache_directory(cache_directory)\r\n simulation_state.set_current_time(year)\r\n attribute_cache = AttributeCache()\r\n sc = SessionConfiguration(new_instance=True,\r\n package_order=self.config['dataset_pool_configuration'].package_order,\r\n in_storage=attribute_cache)\r\n dataset_pool = sc.get_dataset_pool()\r\n\r\n hh_set = dataset_pool.get_dataset('household')\r\n zone_set = dataset_pool.get_dataset('zone')\r\n job_set = dataset_pool.get_dataset('job')\r\n locations_to_disaggregate = self.config['travel_model_configuration']['locations_to_disaggregate']\r\n len_locations_to_disaggregate = len(locations_to_disaggregate)\r\n if len_locations_to_disaggregate > 0:\r\n primary_location = locations_to_disaggregate[0]\r\n if len_locations_to_disaggregate > 1:\r\n intermediates_string = \", intermediates=[\"\r\n for i in range(1, len_locations_to_disaggregate):\r\n intermediates_string = \"%s%s, \" % (intermediates_string, locations_to_disaggregate[i])\r\n intermediates_string = \"%s]\" % intermediates_string\r\n else:\r\n intermediates_string = \"\"\r\n hh_set.compute_variables(['%s = household.disaggregate(%s.%s %s)' % (zone_set.get_id_name()[0],\r\n primary_location, zone_set.get_id_name()[0],\r\n intermediates_string)], \r\n dataset_pool=dataset_pool)\r\n job_set.compute_variables(['%s = job.disaggregate(%s.%s %s)' % (zone_set.get_id_name()[0],\r\n primary_location, zone_set.get_id_name()[0],\r\n intermediates_string)], \r\n dataset_pool=dataset_pool)\r\n \r\n return self._call_input_file_writer(year, dataset_pool)",
"def process_dis_batch(config, shared, batch_size, device, dis, hnet, hnet_theta,\n dist=None):\n\n if dist is not None:\n samples = dist.sample([batch_size])\n if hnet is not None:\n assert np.all(np.equal(samples.shape,\n [batch_size, hnet.num_outputs]))\n else:\n assert hnet is not None\n\n z = torch.normal(torch.zeros(batch_size, shared.noise_dim),\n config.latent_std).to(device)\n\n samples = hnet.forward(uncond_input=z, weights=hnet_theta,\n ret_format='flattened')\n\n if config.use_batchstats:\n samples = gan.concat_mean_stats(samples)\n\n return dis.forward(samples), samples"
]
| [
"0.66142166",
"0.5519045",
"0.5402537",
"0.5224618",
"0.5013061",
"0.5001216",
"0.49794587",
"0.49451426",
"0.49277872",
"0.49046662",
"0.49005345",
"0.48834217",
"0.4878524",
"0.48638603",
"0.4815047",
"0.4813966",
"0.47849753",
"0.47823474",
"0.4755759",
"0.47323096",
"0.4712639",
"0.47003555",
"0.46894693",
"0.46238944",
"0.46106938",
"0.45889136",
"0.45796865",
"0.4563584",
"0.45564273",
"0.45563784"
]
| 0.7225137 | 0 |
Checks units. Disaggregates "chunk" with MaximumLikelihood algorithm. | def disaggregate_chunk(self, chunk):
# An resistive element has active power equal to apparent power.
# Checking power units.
units = self.__physical_quantity(chunk)
# EVENTS OUT OF THE CHUNK:
# Delta values:
column_name = 'diff_' + units[1]
chunk[column_name] = chunk.loc[:, units].diff()
# Filter the noise.
chunk['onpower'] = (chunk[column_name] > self.powerNoise)
chunk['offpower'] = (chunk[column_name] < -self.powerNoise)
events = chunk[(chunk.onpower == True) | (chunk.offpower == True)]
detection_list = []
singleOnevent = 0
# Max Likelihood algorithm (optimized):
for onevent in events[events.onpower == True].iterrows():
# onTime = onevent[0]
# deltaOn = onevent[1][1]
# windowning:
offevents = events[(events.offpower == True) & (events.index > onevent[0]) & (
events.index < onevent[0] + timedelta(seconds=self.timeWindow))]
# Filter paired events:
offevents = offevents[
abs(onevent[1][1] - offevents[column_name].abs()) < self.powerPair]
# Max likelihood computation:
if not offevents.empty:
# pon = self.__pdf(self.onpower, onevent[1][1])
for offevent in offevents.iterrows():
# offTime = offevent[0]
# deltaOff = offevent[1][1]
# poff = self.__pdf(self.offpower, offevent[1][1])
# duration = offevent[0] - onTime
# pduration = self.__pdf(self.duration, (offevent[0] - onTime).total_seconds())
likelihood = self.__pdf(self.onpower, onevent[1][1]) * \
self.__pdf(self.offpower, offevent[1][1]) * \
self.__pdf(self.duration, (offevent[0] - \
onevent[0]).total_seconds())
detection_list.append(
{'likelihood': likelihood, 'onTime': onevent[0],
'offTime': offevent[0], 'deltaOn': onevent[1][1]})
else:
singleOnevent += 1
# Passing detections to a pandas.DataFrame
detections = pd.DataFrame(
columns=('onTime', 'offTime', 'likelihood', 'deltaOn'))
for i in range(len(detection_list)):
detections.loc[i] = [detection_list[i]['onTime'], detection_list[i][
'offTime'], detection_list[i]['likelihood'], detection_list[i]['deltaOn']]
detections = detections[detections.likelihood >= self.thLikelihood]
# Constructing dis_chunk (power of disaggregated appliance)
dis_chunk = pd.DataFrame(
index=chunk.index, columns=[str(units[0]) + '_' + str(units[1])])
dis_chunk.fillna(0, inplace=True)
# Ruling out overlapped detecttions ordering by likelihood value.
detections = detections.sort('likelihood', ascending=False)
for row in detections.iterrows():
# onTime = row[1][0] offTime = row[1][1] deltaOn = row[1][3]
#import ipdb
#ipdb.set_trace()
if ((dis_chunk[(dis_chunk.index >= row[1][0]) &
(dis_chunk.index < row[1][1])].sum().values[0]) == 0):
# delta = chunk[chunk.index == onTime][column_name].values[0]
dis_chunk[(dis_chunk.index >= row[1][0]) & (
dis_chunk.index < row[1][1])] = row[1][3]
# Stat information:
print(str(len(events)) + " events found.")
print(str(len(events[events.onpower == True])) + " onEvents found")
print(str(singleOnevent) + " onEvents no paired.")
return dis_chunk | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def consume_units(self, units):\n pass",
"def consume_units_unconditionally(self, units):\n pass",
"def try_consume_units(self, units):\n pass",
"def disaggregate_chunk(self, test_mains):\n raise NotImplementedError()",
"def test_process_chunk_trimmed(self):\n foo = None\n chunk, order = self.chunk_procesor.process(foo, 1, 1, 1)\n expected = (12, 2, 14, 1)\n self.assertEqual(expected, chunk.shape)",
"def disaggregate(self, mains, output_datastore):\n \n building_path = '/building{}'.format(mains.building())\n # only writes one appliance and meter per building\n meter_instance = 2\n mains_data_location = '{}/elec/meter1'.format(building_path)\n \n #dis_main = pd.DataFrame()\n chunk_number = 0\n timeframes = []\n\n for chunk in mains.power_series():\n \n # Record metadata\n timeframes.append(chunk.timeframe)\n measurement = chunk.name\n cols = pd.MultiIndex.from_tuples([chunk.name])\n \n dis_chunk = self.disaggregate_chunk(\n pd.DataFrame(chunk.resample(self.sample_period, how=self.sampling_method)))\n #dis_main = pd.concat([dis_main, dis_chunk])\n chunk_number += 1\n print(str(chunk_number) + \" chunks disaggregated\")\n \n # Write appliance data to disag output\n key = '{}/elec/meter{}'.format(building_path, meter_instance)\n df = pd.DataFrame(\n dis_chunk.values, index=dis_chunk.index,\n columns=cols)\n output_datastore.append(key, df)\n\n # Copy mains data to disag output\n output_datastore.append(key=mains_data_location,\n value=pd.DataFrame(chunk, columns=cols))\n\n # Saving output datastore:\n #output_datastore.append(key=mains.key, value=dis_main)\n \n ##################################\n # Add metadata to output_datastore\n\n # TODO: `preprocessing_applied` for all meters\n # TODO: split this metadata code into a separate function\n # TODO: submeter measurement should probably be the mains\n # measurement we used to train on, not the mains measurement.\n \n date_now = datetime.now().isoformat().split('.')[0]\n output_name = 'NILMTK_MLE_' + date_now\n resample_seconds = 10\n mains_data_location = '{}/elec/meter1'.format(building_path)\n\n # DataSet and MeterDevice metadata:\n meter_devices = {\n 'MLE': {\n 'model': 'MLE',\n 'sample_period': resample_seconds,\n 'max_sample_period': resample_seconds,\n 'measurements': [{\n 'physical_quantity': measurement[0],\n 'type': measurement[1]\n }]\n },\n 'mains': {\n 'model': 'mains',\n 'sample_period': resample_seconds,\n 'max_sample_period': resample_seconds,\n 'measurements': [{\n 'physical_quantity': measurement[0],\n 'type': measurement[1]\n }]\n }\n }\n\n merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)\n total_timeframe = TimeFrame(merged_timeframes[0].start,\n merged_timeframes[-1].end)\n\n dataset_metadata = {'name': output_name, 'date': date_now,\n 'meter_devices': meter_devices,\n 'timeframe': total_timeframe.to_dict()}\n output_datastore.save_metadata('/', dataset_metadata)\n\n # Building metadata\n\n # Mains meter:\n elec_meters = {\n 1: {\n 'device_model': 'mains',\n 'site_meter': True,\n 'data_location': mains_data_location,\n 'preprocessing_applied': {}, # TODO\n 'statistics': {\n 'timeframe': total_timeframe.to_dict()\n }\n }\n }\n\n # Appliances and submeters:\n appliances = []\n appliance = {\n 'meters': [meter_instance],\n 'type': 'kettle',\n 'instance': 1\n # TODO this `instance` will only be correct when the\n # model is trained on the same house as it is tested on.\n # https://github.com/nilmtk/nilmtk/issues/194\n }\n appliances.append(appliance)\n\n elec_meters.update({\n meter_instance: {\n 'device_model': 'MLE',\n 'submeter_of': 1,\n 'data_location': ('{}/elec/meter{}'\n .format(building_path, meter_instance)),\n 'preprocessing_applied': {}, # TODO\n 'statistics': {\n 'timeframe': total_timeframe.to_dict()\n }\n }\n })\n elec_meters[meter_instance]['name'] = 'kettle'\n\n building_metadata = {\n 'instance': mains.building(),\n 'elec_meters': elec_meters,\n 'appliances': appliances\n }\n\n output_datastore.save_metadata(building_path, building_metadata)",
"def normalize_units(data):\n for obj in data:\n obj['unit'] = normalize_units_function(obj.get('unit', ''))\n # for param in ds.get('parameters', {}).values():\n # if 'unit' in param:\n # param['unit'] = normalize_units_function(param['unit'])\n return data",
"def clean_units(units):\n return [clean_unit(unit) for unit in units]",
"def get_unit(self, data, max=1.0):\n self.unitval = max\n for item in data:\n if isinstance(item, (list, tuple)):\n self.get_unit(item, self.unitval)\n elif item < max and item > 0.0:\n self.unitval = item\n return self.unitval",
"def pressures_in_mb( pressures ):\n if not hasattr( pressures, 'units' ): return None\n if pressures.units=='mb':\n pressures.units = 'mbar' # udunits uses mb for something else\n return pressures[:]\n tmp = udunits(1.0,pressures.units)\n s,i = tmp.how('mbar')\n pressmb = s*pressures[:] + i\n return pressmb",
"def si_mass_units(kg_unit):\r\n if (\r\n kg_unit.scale.name != 'kilogram' and \r\n kg_unit.scale.symbol != 'kg'\r\n ):\r\n raise RuntimeError(\r\n \"conventional name required, got {0.name} and{0.symbol}\".format(\r\n kg_unit.scale\r\n )\r\n )\r\n \r\n register = kg_unit.register\r\n \r\n gram = register.unit( \r\n proportional_unit(kg_unit,\r\n 'gram',\r\n 'g', \r\n 1.0 / 1000.0,\r\n )\r\n )\r\n \r\n for p_i in metric_prefixes:\r\n if p_i.value != 1E3: \r\n register.unit( \r\n proportional_unit(kg_unit,\r\n p_i.name+'gram',\r\n p_i.symbol+'g', \r\n p_i.value / 1000.0\r\n )\r\n )",
"def _getunits(x):\n if pb.units.has_units(x):\n \n units = x.units\n \n else:\n \n units = None\n \n return units",
"def chunker(self, w):\n# print('call chunker')\n w = self.input_word\n w=w.lower()\n initials=self.table.columns.values.tolist()\n finals=self.table['Unnamed: 0'].tolist()\n w = self.cleaner(w)\n w = self.geminates_checker(w)\n# print('now go mb mp')\n w = self.mp_mb_checker(w)\n #print('the result is: '+w)\n w = self.gk_g_checker(w)\n w = self.ch_t_checker(w)\n w = self.yotated_checker(w)\n# print('the result is: '+w)\n syls = []\n counter = True\n while len(w) > 0 and counter:\n s_len = len(syls)\n initial, len_init = self.initial_finder(w, initials)\n final, len_fin = self.final_finder(w, len_init, finals)\n len_syllable = len_init+len_fin\n final_idx=finals.index(final)\n syllable = [initial, final, final_idx]\n w_old = w\n w = w[len_syllable:]\n syls.append(syllable)\n if len(w_old) == len(w):\n# print('we got into a hole')\n counter = False\n if counter == False:\n syls = []\n return syls",
"def total_chunks(self) -> global___Expression:",
"def _unit_lems_validator(self, value_in_unit):\n if is_dimensionless(value_in_unit):\n return str(value_in_unit)\n value, unit = value_in_unit.in_best_unit().split(' ')\n lemsunit = _to_lems_unit(unit)\n if lemsunit in nml_units:\n return \"{} {}\".format(value, lemsunit)\n else:\n self._model.add(make_lems_unit(name_to_unit[unit]))\n return \"{} {}\".format(value, lemsunit)",
"def unit(x):\n\tl = sum([i**2 for i in x])**0.5\n\treturn [xi/l for xi in x]",
"def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]",
"def preprocess_sub_units(self):\n if self.unit == \"char\":\n self.preprocess_char()\n elif self.unit == \"char-ngram\":\n self.preprocess_char_ngram()\n elif self.unit == \"morpheme\":\n self.preprocess_morpheme()\n elif self.unit == \"oracle\":\n self.preprocess_oracle()\n else:\n sys.exit(\"Unknown unit\")",
"def analyse_HM_obs(obs):\n empty, failed = 0, 0\n cum_space_decrease = np.zeros(2)\n if os.path.isdir(obs.results_dir): # check there are results\n accepted_space = helper.last_wave(obs.results_dir)\n if len(accepted_space) == 0:\n #print(obs.results_dir, 'Empty plausible space.')\n empty = 1\n else:\n target = helper.split(obs.parameters)\n bounds = helper.get_bounds(accepted_space)\n for d in range(DIMENSIONS):\n cum_space_decrease[d] = \\\n (helper.bound_len(bounds[PARAMETERS[d]]) /\n helper.bound_len(ORIG_BOUNDS[PARAMETERS[d]]))\n if not helper.in_bound(target[d], bounds[PARAMETERS[d]]):\n failed = 1\n #print(obs.results_dir, 'Target parameter discarded')\n return empty, failed, cum_space_decrease",
"def units(self):\n pass",
"def _M_step(self, stats):\n new_model = super()._M_step(stats)\n\n if 'e' in self.tr_params:\n new_model['B'] = [\n (stats['B']['numer'][i] / stats['B']['denom'][i])\n for i in range(self.n_emissions)\n ]\n\n return new_model",
"def _fix_units(cube, definition):\n\n if cube.var_name != 'pr':\n cube.convert_units(definition.units)",
"def _parse_units(self, model, comp, node):\n node = dom_child(node, 'unitDefinition')\n while node:\n name = node.getAttribute('id')\n self.log('Parsing unit definition for \"' + name + '\".')\n unit = myokit.units.dimensionless\n node2 = dom_child(node, 'listOfUnits')\n node2 = dom_child(node2, 'unit')\n while node2:\n kind = str(node2.getAttribute('kind')).strip()\n u2 = self._convert_unit(kind)\n if node2.hasAttribute('multiplier'):\n m = float(node2.getAttribute('multiplier'))\n else:\n m = 1.0\n if node2.hasAttribute('scale'):\n m *= 10 ** float(node2.getAttribute('scale'))\n u2 *= m\n if node2.hasAttribute('exponent'):\n u2 **= float(node2.getAttribute('exponent'))\n unit *= u2\n node2 = dom_next(node2, 'unit')\n self.units[name] = unit\n node = dom_next(node, 'unitDefinition')",
"def test_run_mantel_test_partial_mantel_too_small(self):\r\n exp = '# A sample comment.\\nDM\\tDM\\tCDM\\tNumber of entries\\t' + \\\r\n 'Mantel r statistic\\tp-value\\tNumber of permutations\\t' + \\\r\n 'Tail type\\nfoo.txt\\tbar.txt\\tbaz.txt\\t\\tToo few samples\\n'\r\n obs = run_mantel_test('partial_mantel', [self.fp1, self.fp2],\r\n [self.dm1,\r\n self.dm2], self.num_perms, self.tail_type,\r\n self.comment, self.fp3, self.dm4)\r\n self.assertEqual(self.remove_nums(obs), exp)",
"def base_units(self, context):\n units = []\n\n logger.info('Determining base units')\n\n for input in context.inputs:\n with input.open(context.user) as var:\n time = var.getTime()\n\n if time is not None:\n input.first = time[0]\n\n input.units = time.units\n\n units.append(input.units)\n\n logger.info('%r units: %r first: %r', input.filename, input.units,\n input.first)\n else:\n logger.info('Skipping %r', input.filename)\n\n try:\n context.units = sorted(units)[0]\n except IndexError:\n pass\n\n self.status('Setting units to {!r}', context.units)\n\n return context",
"def test_milsplit(self):\n convert5 = cnv()\n # normal case\n self.assertEqual(\n convert5.milsplit('einemilliondreihundertvierundzwanzigtausendsiebenhundertneunundachtzig'),\n 1324789)\n # case nothing behind\n self.assertEqual(convert5.milsplit('fünfundzwanzigmillionen'), 25000000)\n # case nothing in front\n self.assertEqual(convert5.milsplit('millionundzwei'), 1000002)\n # case nothing in front nothing behind\n self.assertEqual(convert5.milsplit('million'), 1000000)",
"def _validate_mass(mass):\n if not isinstance(mass, u.unyt_array):\n warnings.warn(\"Masses are assumed to be g/mol\")\n mass *= u.gram / u.mol\n elif mass.units.dimensions != (u.gram / u.mol).units.dimensions:\n warnings.warn(\"Masses are assumed to be g/mol\")\n mass = mass.value * u.gram / u.mol\n else:\n pass\n\n return mass",
"def _adaptively_modify_eval_chunk(self, t):\n if t > self._EVAL_DEMON_MAX and self._EVAL_CHUNK > 5:\n self._EVAL_CHUNK = min(\n self._EVAL_CHUNK - 1,\n max(\n int(self._EVAL_CHUNK * (self._EVAL_DEMON_MAX / t)),\n self._EVAL_CHUNK - 10,\n ),\n )\n elif t < self._EVAL_DEMON_MIN:\n self._EVAL_CHUNK = max(\n self._EVAL_CHUNK + 1,\n min(\n int(self._EVAL_CHUNK * (self._EVAL_DEMON_MIN / t)),\n self._EVAL_CHUNK + 10,\n ),\n )",
"def loudness(chunk):\n data = numpy.array(chunk, dtype=float) / 32768.0\n ms = math.sqrt(numpy.sum(data ** 2.0) / len(data))\n if ms < 10e-8: ms = 10e-8\n return 10.0 * math.log(ms, 10.0)",
"def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n\n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n print 'Using vanadium mass: ',van_mass\n print ' sample mass: ',samp_mass \n print ' sample_rmm : ',samp_rmm \n # check if mono-vanadium is provided as multiple files list or just put in brackets ocasionally\n if isinstance(mono_van,list):\n if len(mono_van)>1:\n raise IOError(' Can currently work only with single monovan file but list supplied')\n else:\n mono_van = mono_van[0];\n\n \n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n map_file = \"\"\n print 'one2one selected'\n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file = map_file+'.map'\n reducer.map_file = map_file;\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integration range to: ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n\n \n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n if (kwargs.get('hardmaskOnly')): \n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n else:\n specs=\"\"\n \n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking =mtd['mask_wksp']\n else:\n print '########### Run diagnose for sample run ##############'\n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking) \n print 'first Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n fail_list=get_failed_spectra_list(masking) \n else:\n print '########### Run diagnose for monochromatic vanadium run ##############'\n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2 \n reducer.spectra_masks=total_mask \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(total_mask)\n #fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n \n \n #Run the conversion first on the sample\n deltaE_wkspace_sample = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n\n \n if kwargs.has_key('mono_correction_factor'):\n absnorm_factor=kwargs.get('mono_correction_factor')\n print 'Using supplied correction factor for absolute units'\n else:\n print '##### Evaluate the integral from the monovan run and calculate the correction factor ######'\n print ' Using absolute units vanadion integration range : ', reducer.monovan_integr_range \n #now on the mono_vanadium run swap the mapping file\n reducer.map_file = monovan_mapfile \n deltaE_wkspace_monovan = reducer.convert_to_energy(mono_van, ei_guess, wb_mono)\n \n (absnorm_factorL,absnorm_factorSS,absnorm_factorP,absnorm_factTGP) = getAbsNormalizationFactor(deltaE_wkspace_monovan.getName(),str(reducer.monovan_integr_range[0]),str(reducer.monovan_integr_range[1])) \n \n print 'Absolute correction factor S^2 =',absnorm_factorSS,' Libisis: ',absnorm_factorL,' Puasonian: ',absnorm_factorP, ' TGP : ',absnorm_factTGP\n CreateSingleValuedWorkspace(OutputWorkspace='AbsFactor',DataValue=absnorm_factTGP)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace_sample.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found for sample run ',ei,' meV'\n print 'Incident energy found for mono vanadium run ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace_sample,OutputWorkspace=results_name)\n if results_name != wksp_out:\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n Divide(LHSWorkspace=wksp_out,RHSWorkspace='AbsFactor',OutputWorkspace=wksp_out)\n DeleteWorkspace(Workspace='AbsFactor')\n return mtd[wksp_out]"
]
| [
"0.5595163",
"0.55166554",
"0.5493909",
"0.54604405",
"0.5417519",
"0.53858703",
"0.5327096",
"0.52271676",
"0.52093446",
"0.52073175",
"0.5189472",
"0.518666",
"0.51688814",
"0.5151618",
"0.51342285",
"0.5129267",
"0.50638485",
"0.5045443",
"0.5035866",
"0.5003558",
"0.496898",
"0.49628028",
"0.4947326",
"0.4918724",
"0.49119222",
"0.48999077",
"0.489971",
"0.4881531",
"0.48690018",
"0.4867549"
]
| 0.5635728 | 0 |
Crops feature_train(onpower_train, offpower_train and duration_train) to get same samples from different appliances(same modelappliance) and avoids overfittings to a many samples appliance. Updates stats attribute. Does the retraining. | def no_overfitting(self):
# Instance with minimun length should be the maximum length
train_len = []
[train_len.append(st['Nevents']) for st in self.stats]
train_len = np.array(train_len)
max_len = train_len[train_len != 0].min()
# CROPS FEATURE SAMPLES
onpower_train = pd.DataFrame()
offpower_train = pd.DataFrame()
duration_train = pd.DataFrame()
start = 0
end = 0
for ind in np.arange(len(self.stats)):
if self.stats[ind]['Nevents'] != 0:
if ind == 0:
start = 0
else:
start = end
end += self.stats[ind]['Nevents']
aux = self.onpower_train[start:end]
aux = aux[:max_len]
onpower_train = pd.concat([onpower_train, aux])
aux = self.offpower_train[start:end]
aux = aux[:max_len]
offpower_train = pd.concat([offpower_train, aux])
aux = self.duration_train[start:end]
aux = aux[:max_len]
duration_train = pd.concat([duration_train, aux])
# udating stats:
self.stats[ind]['Nevents'] = max_len
self.onpower_train = onpower_train
self.offpower_train = offpower_train
self.duration_train = duration_train
# RE-TRAINS FEATURES:
self.__retrain(self.onpower, self.onpower_train)
self.__retrain(self.offpower, self.offpower_train)
self.__retrain(self.duration, self.duration_train) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")",
"def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model",
"def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################",
"def train():\r\n print('Loading and compiling models...')\r\n model_systole = get_model()\r\n model_diastole = get_model()\r\n\r\n # load the preprocessed data with the heart cut-out\r\n print('Loading data...')\r\n X_train, scaling_train, ids_train, y_train = load_train_data()\r\n X_test, scaling_test, ids_test, y_test = load_test_data()\r\n\r\n nb_iter = 200 # a higher number seems to give rise to overfitting\r\n epochs_per_iter = 3 # reduces overfitting\r\n batch_size = 32 # not tuned - potential improvement\r\n calc_crps = 2 # calculate CRPS every n-th iteration (set to 0 if CRPS estimation is not needed)\r\n\r\n # remember min val. losses (best iterations), used as sigmas for submission\r\n min_val_loss_systole = sys.float_info.max\r\n min_val_loss_diastole = sys.float_info.max\r\n\r\n print('-'*50)\r\n print('Training...')\r\n print('-'*50)\r\n\r\n for i in range(nb_iter):\r\n print('-'*50)\r\n print('Iteration {0}/{1}'.format(i + 1, nb_iter))\r\n print('-'*50)\r\n\r\n # augment data to make up for low number of samples\r\n print('Augmenting images - rotations')\r\n X_train_aug = rotation_augmentation(X_train, 15)\r\n print('Augmenting images - shifts')\r\n X_train_aug = shift_augmentation(X_train_aug, 0.1, 0.1)\r\n\r\n print('Fitting systole model...')\r\n hist_systole = model_systole.fit([X_train_aug, scaling_train], y_train[:, 0], shuffle=True, nb_epoch=epochs_per_iter,\r\n batch_size=batch_size, validation_data=([X_test, scaling_test], y_test[:, 0]))\r\n\r\n print('Fitting diastole model...')\r\n hist_diastole = model_diastole.fit([X_train_aug, scaling_train], y_train[:, 1], shuffle=True, nb_epoch=epochs_per_iter,\r\n batch_size=batch_size, validation_data=([X_test, scaling_test], y_test[:, 1]))\r\n\r\n # sigmas for predicted data, actually loss function values (RMSE)\r\n loss_systole = hist_systole.history['loss'][-1]\r\n loss_diastole = hist_diastole.history['loss'][-1]\r\n val_loss_systole = hist_systole.history['val_loss'][-1]\r\n val_loss_diastole = hist_diastole.history['val_loss'][-1]\r\n\r\n if calc_crps > 0 and i % calc_crps == 0:\r\n print('Evaluating CRPS...')\r\n pred_systole = model_systole.predict([X_train, scaling_train], batch_size=batch_size, verbose=1)\r\n pred_diastole = model_diastole.predict([X_train, scaling_train], batch_size=batch_size, verbose=1)\r\n val_pred_systole = model_systole.predict([X_test, scaling_test], batch_size=batch_size, verbose=1)\r\n val_pred_diastole = model_diastole.predict([X_test, scaling_test], batch_size=batch_size, verbose=1)\r\n\r\n # CDF for train and test data (actually a step function)\r\n cdf_train = real_to_cdf(np.concatenate((y_train[:, 0], y_train[:, 1])))\r\n cdf_test = real_to_cdf(np.concatenate((y_test[:, 0], y_test[:, 1])))\r\n\r\n # CDF for predicted data\r\n cdf_pred_systole = real_to_cdf(pred_systole, loss_systole)\r\n cdf_pred_diastole = real_to_cdf(pred_diastole, loss_diastole)\r\n cdf_val_pred_systole = real_to_cdf(val_pred_systole, val_loss_systole)\r\n cdf_val_pred_diastole = real_to_cdf(val_pred_diastole, val_loss_diastole)\r\n\r\n # evaluate CRPS on training data\r\n crps_train = crps(cdf_train, np.concatenate((cdf_pred_systole, cdf_pred_diastole)))\r\n print('CRPS(train) = {0}'.format(crps_train))\r\n\r\n # evaluate CRPS on test data\r\n crps_test = crps(cdf_test, np.concatenate((cdf_val_pred_systole, cdf_val_pred_diastole)))\r\n print('CRPS(test) = {0}'.format(crps_test))\r\n\r\n print('Saving weights...')\r\n # save weights so they can be loaded later\r\n model_systole.save_weights('weights_systole.hdf5', overwrite=True)\r\n model_diastole.save_weights('weights_diastole.hdf5', overwrite=True)\r\n\r\n # for best (lowest) val losses, save weights\r\n if val_loss_systole < min_val_loss_systole:\r\n min_val_loss_systole = val_loss_systole\r\n model_systole.save_weights('weights_systole_best.hdf5', overwrite=True)\r\n\r\n if val_loss_diastole < min_val_loss_diastole:\r\n min_val_loss_diastole = val_loss_diastole\r\n model_diastole.save_weights('weights_diastole_best.hdf5', overwrite=True)\r\n\r\n # save best (lowest) val losses in file (to be later used for generating submission)\r\n with open('val_loss.txt', mode='w+') as f:\r\n f.write(str(min_val_loss_systole))\r\n f.write('\\n')\r\n f.write(str(min_val_loss_diastole))",
"def one_experiment():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'overfit_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 15, 1: 85}]\n # {0:15, 1:85}]#, {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n features_to_use = ['user', 'countries', 'session', 'format', 'token']\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=40)\n set_params(features_to_use=features_to_use)\n\n # save constant parameters to a new \"experiment_..\" filgithx+P@2ub\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**3)), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name,\n new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()",
"def train(self):\n\n # Step 1 - Obtain optimized weights for final model ------------------------------------------------------------\n\n t0 = time()\n\n # Check the training data for potential hazardous problems\n self.check_training_samples()\n\n opt_results = pd.DataFrame()\n kf_opt = StratifiedKFold(n_splits=self.kfold_cv, shuffle=True)\n rep_str, opt_str = '', ''\n\n if self.verbose:\n print('\\n\\n__ TRAINING STEP 1/2 \\_______________________________')\n print(' \\ Train with reverse %d-fold CV - %d time(s) /\\n' % (self.kfold_cv, self.n_repeat))\n\n for i_rep in range(self.n_repeat):\n\n if self.verbose:\n rep_str = '\\n_/--- Rep %d/%d' % (i_rep + 1, self.n_repeat)\n\n # Sample clf-net parameters to test\n param = [\n np.random.normal(loc=self.n_estimators,\n scale=self.n_estimators*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_impurity_decrease,\n scale=self.min_impurity_decrease*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_sample_leaf,\n scale=np.ceil(self.min_sample_leaf*self.param_tune_scale),\n size=self.kfold_cv),\n ]\n scores = list()\n\n for j_fold, (opt_idxs, cv_train_idxs) in enumerate(kf_opt.split(\n X=self.datas[self.train_idx].nidx_train,\n y=self.datas[self.train_idx].gen_labels(condense_labels=True))):\n\n if self.verbose:\n print(rep_str + ' - CV %d/%d ---\\_____\\n' % (j_fold + 1, self.kfold_cv))\n\n # set clf-net parameters\n self.n_estimators = param[0][j_fold]\n self.min_impurity_decrease = param[1][j_fold]\n self.min_sample_leaf = param[2][j_fold]\n self.clf_net = self.gen_rfc()\n\n # Split data\n opt_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in opt_idxs])\n cv_train_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in cv_train_idxs])\n\n # Partition train/eval nidx for reverse k-fold CV training\n _, _, opt_eval_nidxs, opt_train_nidxs = train_test_split(\n np.zeros(len(opt_nidxs)),\n opt_nidxs,\n test_size=1/(self.kfold_cv - 1),\n shuffle=True,\n stratify=self.datas[self.train_idx].gen_labels(nidxs=opt_nidxs, condense_labels=True))\n\n # Train clfs\n if self.verbose:\n print('\\n> Training base classifiers ...')\n self._train_clfs(train_nidxs=cv_train_nidxs)\n\n # Evaluate train with cv_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_train partition ...')\n self.clfs_predict(nidxs_target=cv_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n eval_idx=self.train_idx)\n\n # Evaluate pre-optimization with opt_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_eval partition ...')\n cv_res = self.clfs_predict(nidxs_target=opt_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n\n # Train clf-opt with opt_train partition results\n if self.verbose:\n print('\\n> Training clf-opt ...')\n self._train_clf_opt(predictions=cv_res)\n\n # Evaluate clf-opt with opt_eval partition\n if self.verbose:\n print('\\n> Evaluating optimized classifier with opt_test partition ...')\n opt_res = self.clfs_predict(nidxs_target=opt_eval_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n opt_results = opt_results.append(opt_res, ignore_index=True)\n\n # Append score to optimize clf-net parameter\n r = self.scores(opt_res['ytruth'], opt_res['ynet'])\n if not self.aim:\n scores.append(r['aucroc'])\n else:\n aim = self.aim.replace('hard', '')\n scores.append(r[aim])\n\n # reset link2featidx\n self.datas[self.train_idx].link2featidx = {}\n\n # Aggregate results from clf-net parameter search\n self._set_clf_net_param(param, scores)\n\n # STEP 2 - Train final model -----------------------------------------------------------------------------------\n # .clf_opt is already trained through previous iterations by using warm_start\n\n if self.verbose:\n print('\\n__ TRAINING STEP 2/2 \\_______________________________')\n print(' \\ Train final model with all train data /\\n')\n\n # Train clfs with all the data\n self._train_clfs()\n\n # Evaluate final clf-opt with all data\n print('\\n> Evaluating final classifier ...')\n self.clfs_predict(nidxs_target=self.datas[self.train_idx].nidx_train, to_eval=True, eval_idx=self.train_idx)\n print('** Because this is evaluating with the training data, classifier performances should be very high.')\n\n # Assign model ID - this is here so that if retrained, it would be known that it is not the same model anymore\n self.id = 'm_%s' % gen_id()\n\n if self.verbose:\n te = (time() - t0) / 60\n print('\\n Training took %.1f minutes on %d processors' % (te, os.cpu_count()))\n print('\\n__ __________')\n print(' \\ Training complete! /\\n')\n\n return opt_results",
"def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor",
"def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)",
"def retrain(model, train_data, test_data, dag_folder, opt, metrics_callback, plotting_callback):\n # Prepare path for saving results\n stage_name = \"retrain_{}\".format(dag_folder)\n save_path = os.path.join(opt.exp_path, stage_name)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n # Check if already computed\n if os.path.exists(os.path.join(save_path, \"best-model.pkl\")):\n print(stage_name, \"already computed. Loading result from disk.\")\n return load(save_path, \"best-model.pkl\")\n\n time0 = time.time()\n\n # initialize stuff for learning loop\n nlls = []\n nlls_val = []\n losses = []\n losses_val = []\n grad_norms = []\n grad_norm_ma = [0.0] * (opt.num_train_iter + 1)\n\n # early stopping stuff\n best_model = copy.deepcopy(model)\n best_nll_val = np.inf\n patience = opt.patience\n\n if opt.optimizer == \"sgd\":\n optimizer = torch.optim.SGD(model.parameters(), lr=opt.lr)\n elif opt.optimizer == \"rmsprop\":\n # This allows the optimizer to return the learning rates for each parameters\n monkey_patch_RMSprop(torch.optim.RMSprop)\n optimizer = torch.optim.RMSprop(model.parameters(), lr=opt.lr)\n else:\n raise NotImplementedError(\"optimizer {} is not implemented\".format(opt.optimizer))\n\n # Learning loop:\n for iter in range(opt.num_train_iter):\n # compute loss\n model.train()\n x, mask, regime = train_data.sample(opt.train_batch_size)\n weights, biases, extra_params = model.get_parameters(mode=\"wbx\")\n nll = compute_loss(x, mask, regime, model, weights, biases, extra_params,\n opt.intervention, opt.intervention_type,\n opt.intervention_knowledge)\n\n nlls.append(nll.item())\n model.eval()\n\n # compute regularizer\n # w_adj = model.get_w_adj()\n # reg = opt.reg_coeff * compute_penalty([w_adj], p=1)\n # reg /= w_adj.shape[0]**2\n\n # if opt.coeff_interv_sparsity > 0 and opt.intervention_knowledge == \"unknown\" :\n # interv_w = 1 - model.gumbel_interv_w.get_proba()\n # group_norm = torch.norm(interv_w, p=1, dim=1, keepdim=True)\n # reg_interv = opt.coeff_interv_sparsity * (group_norm).sum()\n # else:\n # reg_interv = torch.tensor(0)\n\n reg = torch.tensor(0)\n reg_interv = torch.tensor(0)\n\n # compute augmented langrangian\n loss = nll\n\n # optimization step on augmented lagrangian\n optimizer.zero_grad()\n loss.backward()\n _, lr = optimizer.step() if opt.optimizer == \"rmsprop\" else optimizer.step(), opt.lr\n\n # compute augmented lagrangian moving average\n losses.append(loss.item())\n grad_norms.append(model.get_grad_norm(\"wbx\").item())\n grad_norm_ma[iter + 1] = grad_norm_ma[iter] + 0.01 * (grad_norms[-1] - grad_norm_ma[iter])\n\n # compute loss on whole validation set\n if iter % 1000 == 0:\n with torch.no_grad():\n x, mask, regime = test_data.sample(test_data.num_samples)\n nll_val = compute_loss(x, mask, regime, model, weights, biases,\n extra_params, opt.intervention,\n opt.intervention_type,\n opt.intervention_knowledge)\n # nll_val = - torch.mean(model.compute_log_likelihood(x, weights, biases, extra_params)).item()\n nlls_val.append(nll_val)\n losses_val.append([iter, nll_val + reg.item()])\n\n # nll_val the best?\n if nll_val < best_nll_val:\n best_nll_val = nll_val\n patience = opt.patience\n best_model = copy.deepcopy(model)\n else:\n patience -= 1\n\n # log metrics\n if iter % 100 == 0:\n print(\"Iteration:\", iter)\n metrics_callback(stage=stage_name, step=iter,\n metrics={\"loss\": loss.item(),\n \"loss-val\": losses_val[-1][1],\n \"nll\": nlls[-1],\n \"nll-val\": nlls_val[-1],\n \"grad-norm-moving-average\": grad_norm_ma[iter + 1],\n \"w_prop_0\": sum([(w == 0).long().sum().item() for w in weights]) /\n model.numel_weights,\n \"patience\": patience,\n \"best-nll-val\": best_nll_val})\n\n # plot\n if iter % opt.plot_freq == 0:\n plot_learning_curves_retrain(losses, losses_val, nlls, nlls_val, save_path)\n\n # Have we converged?\n if patience == 0:\n timing = time.time() - time0\n\n # save\n dump(best_nll_val, save_path, 'best-nll-val', txt=True)\n dump(opt.__dict__, save_path, 'opt')\n dump(nlls, save_path, 'nlls-train')\n dump(nlls_val, save_path, 'nlls-val')\n dump(losses, save_path, 'losses')\n dump(losses_val, save_path, 'losses-val')\n dump(grad_norms, save_path, 'grad-norms')\n dump(grad_norm_ma[:iter], save_path, 'grad-norm-ma')\n dump(timing, save_path, 'timing')\n\n # plot\n plot_learning_curves_retrain(losses, losses_val, nlls, nlls_val, save_path)\n\n return model",
"def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):",
"def train_client(self,fvects,no_features): \n # print train statistics. \n self.N = len(fvects)\n self.initialize_weights()\n if self.n == 2:\n print \"Binary Logistic Regression\"\n else:\n print \"Multi-class (%d classes) Logistic Regression\" % self.n\n print \"L2 regularization coefficient = %f\" % self.c\n print \"Total iterations = %d\" % self.total_iterations\n print \"Initial learning rate = %f\" % self.eta0\n print \"Total number of instances = %d\" % self.N\n self.k = 1\n self.s = 0\n RA = ROLLING_AVERAGER(self.L2_rolling, self.L2_bound)\n # Iterate over the training dataset.\n for i in range(1, self.total_iterations+1):\n print \"\\nIteration #%d\" % i,\n startTime = time.time()\n self.loss = 0\n count = 0\n for fv in fvects:\n count += 1\n eta = float(self.eta0) / (1. + (float(count)/self.N))\n self.update(fv,eta)\n self.k += 1\n endTime = time.time()\n print \"time taken (sec)=\", (endTime-startTime)\n # Show the value of the bias term.\n if self.verbose:\n for lbl in self.bias:\n print \"Bias Term %d = %f\" % (lbl,self.bias[lbl])\n (L1_norm, L2_norm, actives) = self.get_norms()\n self.active_features = actives\n print \"Active Features = %d/%d\" % (actives,no_features) \n print \"L1 norm = %f\" % L1_norm\n print \"L2 norm = %f\" % L2_norm\n if RA.add(L2_norm) == 1:\n print \"Terminating...L2 norm does not change\"\n break\n if self.verbose:\n self.display_training_error(fvects)\n if self.heldoutVects:\n self.display_heldout_error(self.heldoutVects) \n # if not in the verbose mode then print the final results.\n if not self.verbose:\n trainError = self.display_training_error(fvects)\n if self.heldoutVects:\n self.display_heldout_error(self.heldoutVects) \n pass",
"def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break",
"def train(self):\n loss_func = torch.nn.MSELoss()\n training_done = False\n total_loss_array = []\n while not training_done:\n # sample a timestep before the cutoff for cross_validation\n rand_timestep_within_sched = np.random.randint(len(self.X_train_naive))\n input_nn = self.X_train_naive[rand_timestep_within_sched]\n\n # iterate over pairwise comparisons\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n truth_nn = input_nn.clone()\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n truth_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n self.opt.zero_grad()\n output = self.model.forward(input_nn)\n\n loss = loss_func(output, truth_nn)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n total_loss_array.append(loss.item())\n\n total_iterations = len(total_loss_array)\n\n if total_iterations % 1000 == 999:\n print('current timestep:', total_iterations, 'avg loss for last 500: ', np.mean(total_loss_array[-500:]))\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/rohanpaleja/PycharmProjects/bayesian_prolo/scheduling_env/additions_for_HRI/models/Autoencoder' + str(self.num_schedules) + '.tar')\n\n if total_iterations > 2000000:\n training_done = True\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/rohanpaleja/PycharmProjects/bayesian_prolo/scheduling_env/additions_for_HRI/models/Autoencoder' + str(self.num_schedules) + '.tar')",
"def train(ensemble, train_x, train_y, eval_x, eval_y, train_mean, train_sd, validation=False, minibatch_size=None, no_epochs=None, subsampling=None, optimizer=None, early_stopping=False): \n # if validation is true, expect eval_x and eval_y to be normalised as well\n if early_stopping == True:\n no_epochs_range = deepcopy(no_epochs)\n no_epochs = max(no_epochs_range)\n\n results_dict_list = [] \n\n trainset_size = train_y.size()[0]\n # train networks in ensemble\n no_epochs = int(no_epochs)\n with trange(no_epochs) as epochs:\n for epoch in epochs: # loop over epochs\n for model_no, model in enumerate(ensemble.models):\n # calculate the number of batches in this epoch\n no_batches = int(np.floor(trainset_size/minibatch_size))\n #print('Beginning epoch {}'.format(epoch))\n # loop over trainset\n # shuffle the dataset\n idx = torch.randperm(trainset_size)\n x_train_normalised = train_x[idx,:] \n y_train_normalised = train_y[idx] \n for i in range(no_batches):\n # clear previous gradients\n ensemble.optimizers[model_no].zero_grad()\n \n # fetch the batch, but only if there are enough datapoints left\n if (i+1)*minibatch_size <= trainset_size - 1:\n x_train_batch = x_train_normalised[i*minibatch_size:(i+1)*minibatch_size,:]\n y_train_batch = y_train_normalised[i*minibatch_size:(i+1)*minibatch_size]\n \n # forward pass and calculate loss\n loss = model.get_U(x_train_batch, y_train_batch, trainset_size=trainset_size)\n \n # compute gradients of all variables wrt loss \n loss.backward()\n\n # perform updates using calculated gradients\n ensemble.optimizers[model_no].step()\n\n if early_stopping == True: # early stopping should be done over an entire ensemble\n if (epoch + 1) in no_epochs_range:\n ensemble_MSE, ensemble_LL = evaluate(ensemble, eval_x, eval_y, train_mean, train_sd, validation=True) \n results_dict = {'ensemble_MSE':ensemble_MSE, 'ensemble_LL':ensemble_LL, 'no_epochs':epoch + 1}\n results_dict_list.append(results_dict)\n\n if early_stopping == True:\n return results_dict_list",
"def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def train(self, df, feature, max_range, extra=False, defender=False):\n\n df2 = self._train_preprocess(df, feature, extra)\n\n # No need for names anymore\n if defender:\n df2 = df2.drop([\"Player Id\"], axis=1)\n\n # Instantiate the models\n self.rfrg = RandomForestRegressor(n_estimators=1000, n_jobs=-1, random_state=69420)\n\n if not defender:\n self.gbrg = LGBMRegressor(n_estimators=1000, learning_rate=0.01)\n\n # Then, perform regression -> This is to see how it performs over weeks\n mean_error1 = []\n mean_error2 = []\n\n for week in range(max_range - 5, max_range):\n train = df2[df2['week'] < week]\n val = df2[df2['week'] == week]\n\n x_train, x_test = train.drop([feature], axis=1), val.drop([feature], axis=1)\n y_train, y_test = train[feature].values, val[feature].values\n\n self.rfrg.fit(x_train, y_train)\n preds1 = self.rfrg.predict(x_test)\n error1 = rmsle(y_test, preds1)\n print('Week %d - Error for Random Forest %.5f' % (week, error1))\n\n mean_error1.append(error1)\n if not defender:\n self.gbrg.fit(x_train, np.log1p(y_train))\n preds2 = np.expm1(self.gbrg.predict(x_test))\n error2 = rmsle(y_test, preds2)\n print('Week %d - Error for Gradient Boosting %.5f' % (week, error2))\n mean_error2.append(error2)\n\n print()\n print()\n print(\"Feature statistics:\")\n print(f\"Min value for feature {feature}: {df[feature].min()}\")\n print(f\"Max value for feature {feature}: {df[feature].max()}\")\n print(f\"Mean value for feature {feature}: {df[feature].mean()}\")\n print(f\"Standard deviation for feature {feature}: {df[feature].std()}\")\n print()\n print(\"Results\")\n print('Mean Error for Random Forest = %.5f' % np.mean(mean_error1))\n\n # Note: the final model is trained on every week and stored in self.model!\n final_xtrain = df2.drop([feature], axis=1)\n final_ytrain = df2[feature].values\n self.rfrg.fit(final_xtrain, final_ytrain)\n\n if not defender:\n print('Mean Error for Gradient Boosting = %.5f' % np.mean(mean_error2))\n self.gbrg.fit(final_xtrain, np.log1p(final_ytrain))",
"def compute_metrics(self, train_data, test_data, criterion):\n m = self.metrics\n warnings.filterwarnings('ignore','Mean of empty slice')\n\n ## load data\n trn, trn_labs = train_data\n tst, tst_labs = test_data\n\n # trn = trn.transpose(1,0)\n tst = tst.transpose(1,0)\n\n t_final = -(np.flipud(trn!=self.padding).argmax(0)+1)\n test_tfinal = -(np.flipud(tst!=self.padding).argmax(0)+1)\n\n ntest = tst.size(1)\n P = self.decoder.out_features\n\n ## training data ###########################################################\n # hidden = self.init_hidden(trn.size(1))\n # out, hidden = self.transparent_forward(trn, hidden)\n # # output = out[t_final, np.arange(trn.size(1)), :]\n # output = out.squeeze()\n # # compute orthogonality\n # mem_act = np.array([np.cumsum(trn==p,axis=0).int().detach().numpy() % 2 \\\n # for p in range(self.q_)]).transpose((1,2,0))\n\n # ps_clf = LinearDecoder(self, 2**(self.q_-1), MeanClassifier)\n # ps = []\n # for d in Dichotomies(mem_act, 'simple'):\n # np.warnings.filterwarnings('ignore',message='invalid value encountered in')\n # ps_clf.fit(hidden.detach().numpy(), d)\n # new_ps = ps_clf.orthogonality()\n # ps.append(new_ps)\n # # if new_ps > ps:\n # # ps = new_ps\n # m['train_parallelism'] = np.append(m['train_parallelism'], np.array(ps).T, axis=0)\n\n # # print(mem_act.shape)\n # # print(hidden.shape)\n # # self.orth_clf.fit(hidden.detach().numpy(), mem_act)\n # # orth_score = self.orth_clf.orthogonality()\n # # m['train_orthogonality'] = np.append(m['train_orthogonality'], orth_score)\n\n ## test data ##############################################################\n hidden = self.init_hidden(tst.size(1))\n out, hidden = self.transparent_forward(tst, hidden)\n # output = out.squeeze()\n # print(hidden.shape)\n # print(out.shape)\n # print(test_tfinal)\n output = out[test_tfinal, np.arange(tst.size(1)), :]\n # raise Exception\n\n # compute loss\n test_loss = criterion(output.squeeze(0),tst_labs.squeeze())\n\n m['test_loss'] = np.append(m['test_loss'], test_loss.item())\n\n # compute orthogonality\n # mem_act = np.array([np.cumsum(tst==p,axis=0).int().detach().numpy() % 2 \\\n # for p in range(self.q_)]).transpose((1,2,0))\n\n # # self.orth_clf.fit(hidden.detach().numpy(), mem_act)\n # # orth_score = self.orth_clf.orthogonality()\n # # m['test_orthogonality'] = np.append(m['test_orthogonality'], orth_score)\n\n # # compute parallelism\n # ps_clf = LinearDecoder(self, 2**(self.q_-1), MeanClassifier)\n # ps = []\n # for d in Dichotomies(mem_act, 'simple'):\n # np.warnings.filterwarnings('ignore',message='invalid value encountered in')\n # ps_clf.fit(hidden.detach().numpy(), d)\n # new_ps = ps_clf.orthogonality()\n # ps.append(new_ps)\n # # if new_ps > ps:\n # # ps = new_ps\n # m['test_parallelism'] = np.append(m['test_parallelism'], np.array(ps).T, axis=0)\n\n ## package #################################################################\n self.metrics = m\n warnings.filterwarnings('default')",
"def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target",
"def evaluate(self, train_set, test_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n # val_set_x = val_set[:,:-2]\n # val_set_u = val_set[:,-2]\n # val_set_y = val_set[:,-1]\n val_set_x, val_set_y = shared_dataset(val_set[:,:-2], val_set[:,-1])\n val_set_u = theano.shared(np.asarray(val_set[:,-2],dtype='int32')) \n test_set_x = test_set[:,:-2]\n test_set_u = test_set[:,-2]\n test_set_y = test_set[:,-1] \n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n # compile Theano functions to get train/val/test errors\n \n \n test_y_pred = self.predict(test_set_x,test_set_u)\n test_error = T.mean(T.neq(test_y_pred, self.y))\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end], \n self.u: val_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.u, self.y], test_error, allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1 - np.mean(val_losses) \n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val acc: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n if self.Users is not None:\n test_loss = test_model(test_set_x, test_set_u, test_set_y)\n else:\n test_loss = test_model(test_set_x, test_set_y)\n test_perf = 1 - test_loss \n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n # set_trace() \n return test_perf",
"def main(debug=False):\n num_rows = 10000 if debug else None\n df = application_train_test(num_rows)\n print('df shape:', df.shape, '- After app process')\n with timer(\"Process bureau and bureau_balance\"):\n bureau = bureau_and_balance(num_rows)\n print('---------')\n print(\"df shape:\", bureau.shape, '- just bureau')\n df = df.join(bureau, how='left', on='SK_ID_CURR')\n print('df shape:', df.shape, '- After bureau')\n del bureau\n gc.collect()\n\n with timer(\"Process previous_applications\"):\n prev = previous_applications(num_rows)\n print('---------')\n print(\"df shape:\", prev.shape, '- Previous applications')\n df = df.join(prev, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined Previous applications')\n del prev\n gc.collect()\n\n with timer(\"Process POS-CASH balance\"):\n pos = pos_cash(num_rows)\n print('---------')\n print(\"df shape:\", pos.shape, \"- just Pos-cash balance\")\n df = df.join(pos, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined pos-cash')\n del pos\n gc.collect()\n\n with timer(\"Process installments payments\"):\n ins = installments_payments(num_rows)\n print(\"df shape:\", ins.shape, \"just Installments payments\")\n df = df.join(ins, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined Installments')\n del ins\n gc.collect()\n with timer(\"Process credit card balance\"):\n cc = credit_card_balance(num_rows)\n print(\"df shape:\", cc.shape, \"- just Credit card balance \")\n df = df.join(cc, how='left', on='SK_ID_CURR')\n print(\"df shape:\", df.shape, '- Joined cc balance')\n del cc\n gc.collect()\n\n with timer(\"Run LightGBM with kfold\"):\n feature_importance_df, oof_preds, y = kfold_lightgbm(df, num_rows, num_folds=10, stratified=False, debug=debug)\n\n return feature_importance_df, oof_preds, y",
"def _train(self, features: pd.DataFrame, labels: pd.DataFrame,\n output_folder: str, n_iter: int=3, n_epoch: int=100,\n train_size: float=0.8,\n out_features: int=None, weight_class: bool=False,\n balanced_sampling: bool=False,\n base_net: Net=None, train_last: bool=False,\n refit: bool=False, refit_n_epoch: int=100, verbose: bool=True):\n\n # weight_class and balanced_sample cannot be True at the same time.\n # if weight_class and balanced_sample:\n # raise ValueError('weight_class and balanced_sample cannot be '\n # '\"True\" at the same time.')\n\n # Make an output folder if not exist.\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # apply log10 to some features.\n # TODO: fine this code.\n features['period'], min_period = apply_log10(features['period'])\n features['amplitude'], min_amplitude = apply_log10(features['amplitude'])\n features['hl_amp_ratio'], min_hl_amp_ratio = \\\n apply_log10(features['hl_amp_ratio'])\n features['kurtosis'], min_kurtosis = apply_log10(features['kurtosis'])\n features['phase_cusum'], min_phase_cusum = \\\n apply_log10(features['phase_cusum'])\n features['phase_eta'], min_phase_eta = \\\n apply_log10(features['phase_eta'])\n features['quartile31'], min_quartile31 = \\\n apply_log10(features['quartile31'])\n features['skewness'], min_skewness = apply_log10(features['skewness'])\n features['slope_per90'], min_slope_per90 = \\\n apply_log10(features['slope_per90'])\n\n min_values = {\n 'min_period': min_period,\n 'min_amplitude': min_amplitude,\n 'min_hl_amp_ratio': min_hl_amp_ratio,\n 'min_kurtosis': min_kurtosis,\n 'min_phase_cusum': min_phase_cusum,\n 'min_phase_eta': min_phase_eta,\n 'min_quartile31': min_quartile31,\n 'min_skewness': min_skewness,\n 'min_slope_per90': min_slope_per90\n }\n\n self.min_values = min_values\n # Save for later usage.\n pickle.dump(self.min_values, open(os.path.join(\n output_folder, 'min_params.pkl'), 'wb'))\n\n features = np.array(features)\n labels = np.array(labels)\n\n # Normalize.\n features_median = np.median(features, axis=0)\n features_std = np.std(features, axis=0)\n\n # original.\n features_norm = (features - features_median) / features_std\n\n # new.\n # features_min = np.min(features, axis=0)\n # features_max = np.max(features, axis=0)\n # features_norm = features - features_min\n # features_norm /= features_max\n\n # Save the number of features at the last layers.\n if out_features is None:\n self.n_final = np.unique(labels).size\n else:\n self.n_final = out_features\n\n # Save.\n pickle.dump(self.n_final, open(os.path.join(\n output_folder, 'n_final.pkl'), 'wb'))\n\n # Save the values for later usage (e.g. prediction).\n # original.\n self.norm_params = [features_median, features_std]\n # new.\n # self.norm_params = [features_min, features_max]\n pickle.dump(self.norm_params, open(os.path.join(\n output_folder, 'norm_params.pkl'), 'wb'))\n\n # Fit a label encoder.\n le = LabelEncoder()\n le.fit(labels)\n labels_encoded = le.transform(labels)\n\n # Save the label encoder.\n self.label_encoder = le\n pickle.dump(self.label_encoder, open(os.path.join(\n output_folder, 'label_encoder.pkl'), 'wb'))\n\n # Derive class weight by its frequency.\n if weight_class:\n unique, counts = np.unique(labels_encoded, return_counts=True)\n counts = np.array(counts)\n rev_counts = 1. / counts\n # weights = rev_counts / np.sum(rev_counts)\n weights = np.sum(counts) / counts\n class_weights = torch.FloatTensor(weights).to(self.device)\n\n # Training information.\n training_info = {'learning_rate': [],\n 'training_loss': [], 'validation_loss': [],\n 'test_f1': [], 'training_f1': [],\n 'test_mc': [], 'training_mc': []}\n\n # Train a model for the number of iteration.\n best_f1 = 0.\n best_mc = 0.\n f1_average = 'macro'\n for i in range(n_iter):\n # Train and test set split. So each iteration,\n # using a set separated differently.\n x_train, x_test, y_train, y_test = \\\n train_test_split(features_norm, labels_encoded,\n train_size=train_size, stratify=labels_encoded)\n\n # Build datasets.\n trainset = LightCurveDataset(x_train, y_train)\n testset = LightCurveDataset(x_test, y_test)\n\n # Up-sampling imbalanced dataset.\n if balanced_sampling:\n train_weights = self._get_balanced_sample_weights(y_train)\n test_weights = self._get_balanced_sample_weights(y_test)\n\n train_sampler = torch.utils.data.sampler.WeightedRandomSampler(\n train_weights, len(train_weights), replacement=True)\n test_sampler = torch.utils.data.sampler.WeightedRandomSampler(\n test_weights, len(test_weights), replacement=True)\n shuffle = False\n else:\n train_sampler = None\n test_sampler = None\n shuffle = True\n\n # Build data loaders.\n # batch_size = 1024\n batch_size = 10240\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=shuffle,\n sampler=train_sampler, num_workers=2)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=batch_size, shuffle=shuffle,\n sampler=test_sampler, num_workers=2)\n\n # Initialize a network before entering the iteration.\n net = Net()\n net.to(self.device)\n if base_net is not None:\n # For transfer learning.\n net.load_state_dict(base_net.state_dict())\n\n # Set the number of neurons at the final layers, which is\n # actually the number of target classes.\n net.fc4 = nn.Linear(net.bn4.num_features, self.n_final)\n net.bn5 = nn.BatchNorm1d(self.n_final)\n net.to(self.device)\n\n # Initial learning rate.\n learning_rate = 0.1\n\n # Set training instances.\n if base_net is not None:\n # Transfer only the last layer.\n if train_last:\n optimizer = optim.SGD(net.fc4.parameters(), lr=learning_rate,\n momentum=0.9)\n else:\n optimizer = optim.SGD(net.parameters(), lr=learning_rate,\n momentum=0.9)\n else:\n optimizer = optim.SGD(net.parameters(), lr=learning_rate,\n momentum=0.9)\n\n scheduler = ReduceLROnPlateau(optimizer, 'min', patience=3,\n eps=1e-15)\n if weight_class:\n criterion = nn.CrossEntropyLoss(weight=class_weights)\n else:\n criterion = nn.CrossEntropyLoss()\n\n # Iterate.\n for epoch in range(n_epoch):\n running_loss = 0.0\n\n # Iterate learning rate.\n if optimizer.param_groups[0]['lr'] <= 1e-10:\n optimizer.param_groups[0]['lr'] = learning_rate\n\n # For each batch.\n predicted_label = []\n true_label = []\n net.train()\n for l, data in enumerate(trainloader, 0):\n # Get the inputs.\n inputs, labels = data\n inputs, labels = inputs.to(self.device), \\\n labels.to(self.device)\n\n # Zero the parameter gradients.\n optimizer.zero_grad()\n\n # Forward + backward + optimize.\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n\n # Get true and predicted labels.\n outputs_numpy = torch.max(outputs, 1)[1].cpu().numpy()\n test_numpy = labels.cpu().numpy()\n predicted_label += outputs_numpy.tolist()\n true_label += test_numpy.tolist()\n\n loss.backward()\n optimizer.step()\n\n # Running loss.\n running_loss += loss.item()\n\n # Calculate training f1.\n training_f1 = f1_score(true_label, predicted_label,\n average=f1_average)\n training_mc = matthews_corrcoef(true_label, predicted_label)\n training_mc = (training_mc + 1) / 2.\n\n # Get test-set performance\n val_loss = 0.\n predicted_label = []\n true_label = []\n net.eval()\n for m, test_data in enumerate(testloader, 0):\n test_inputs, test_labels = test_data\n test_inputs, test_labels = test_inputs.to(self.device), \\\n test_labels.to(self.device)\n\n outputs = net(test_inputs)\n val_loss += criterion(outputs, test_labels).item()\n\n # Get true and predicted labels.\n outputs_numpy = torch.max(outputs, 1)[1].cpu().numpy()\n test_numpy = test_labels.cpu().numpy()\n predicted_label += outputs_numpy.tolist()\n true_label += test_numpy.tolist()\n\n test_f1 = f1_score(true_label, predicted_label,\n average=f1_average)\n test_mc = matthews_corrcoef(true_label, predicted_label)\n test_mc = (test_mc + 1) / 2.\n\n curr_f1 = test_f1\n curr_mc = test_mc\n\n if verbose:\n self.logger.info(('[{0}, {1}] '\n 'train Mc: {2:.6f}, test Mc: {3:.6f}, '\n 'learning rate {4:.1e}').format(\n i + 1, epoch + 1, training_mc, curr_mc,\n optimizer.param_groups[0]['lr'])\n )\n\n # Save training information for later usage.\n training_info['learning_rate'].append(\n optimizer.param_groups[0]['lr'])\n training_info['training_loss'].append(running_loss)\n training_info['validation_loss'].append(val_loss)\n training_info['training_f1'].append(training_f1)\n training_info['test_f1'].append(curr_f1)\n training_info['training_mc'].append(training_mc)\n training_info['test_mc'].append(curr_mc)\n\n # We save at the end of each epoch,\n # just in case the training stops unexpectedly.\n pickle.dump(training_info, open(os.path.join(\n output_folder, 'training_info.pkl'), 'wb'))\n\n # Update the best f1 score.\n if curr_f1 > best_f1:\n best_f1 = curr_f1\n self.f1_best = best_f1\n\n # Only if the new model is better.\n if curr_mc > best_mc:\n best_mc = curr_mc\n self.mc_best = best_mc\n\n # Save the model.\n torch.save(net.state_dict(), os.path.join(\n output_folder, 'state_dict.pt'))\n self.net = net\n # self.logger.info('Better model saved.')\n\n # Save true and predicted labels for later usages.\n pickle.dump([true_label, predicted_label],\n open(os.path.join(output_folder,\n 'true_predicted.pkl'), 'wb'))\n\n # Save the best mc as a plain text for temporary saving.\n fp = open(os.path.join(output_folder, 'info.txt'), 'w')\n fp.writelines('# Mc: {0:.6f}, F1: {1:.6f}\\n'.\n format(best_mc, best_f1))\n fp.close()\n\n # Scheduler based on validation loss (i.e. test-set loss).\n scheduler.step(val_loss)\n\n # Epoch ends.\n if verbose:\n self.logger.info('The overall best Mc and F1 using the '\n 'validation set: {0:.6f} and {1:.6f}'.\n format(self.mc_best, self.f1_best))\n\n ################################\n # The whole training finishes. #\n ################################\n\n # Get the best test F1 for each iteration.\n test_f1 = np.max(\n np.array(training_info['test_f1']).reshape(-1, n_epoch), axis=1)\n # Calculate statistics of test_f1.\n self.f1_mean = np.mean(test_f1)\n self.f1_median = np.median(test_f1)\n self.f1_std = np.std(test_f1)\n\n # Get the best test Mc for each iteration.\n test_mc = np.max(\n np.array(training_info['test_mc']).reshape(-1, n_epoch), axis=1)\n # Calculate statistics of test_mc.\n self.mc_mean = np.mean(test_mc)\n self.mc_median = np.median(test_mc)\n self.mc_std = np.std(test_mc)\n\n # Save F1 information.\n fp = open(os.path.join(output_folder, 'info.txt'), 'w')\n fp.writelines('# Best_Mc Median_Mc Mean_Mc Std_Mc '\n 'Best_F1 Median_F1 Mean_F1 Std_F1\\n')\n fp.writelines('{0:.10f} {1:.10f} {2:.10f} {3:.10f} '\n '{4:.10f} {5:.10f} {6:.10f} {7:.10f}\\n'.format(\n self.mc_best, self.mc_median, self.mc_mean, self.mc_std,\n self.f1_best, self.f1_median, self.f1_mean, self.f1_std))\n fp.close()\n\n # Refit the model using the entire dataset.\n if refit:\n self.logger.info('Refit the trained model.')\n self._refit(features_norm, labels_encoded, output_folder,\n weight_class, balanced_sampling,\n refit_n_epoch, verbose)",
"def compute_features_one_round(\n train_base_df,\n train_delta_df,\n test_df,\n df_config,\n feature_config_list,\n feature_map,\n filter_by_month,\n compute_load_ratio=False,\n):\n\n train_round_df = pd.concat([train_base_df, train_delta_df])\n max_train_timestamp = train_round_df[df_config[\"time_col_name\"]].max()\n max_test_timestamp = test_df[df_config[\"time_col_name\"]].max()\n train_test_diff = max_test_timestamp - max_train_timestamp\n max_horizon = ceil(train_test_diff.days * 24 + train_test_diff.seconds / 3600)\n train_features, feature_pipeline = compute_training_features(\n train_round_df, df_config, feature_config_list, feature_map, max_horizon,\n )\n\n test_features = compute_testing_features(test_df, feature_pipeline, feature_config_list, train_round_df)\n\n if compute_load_ratio:\n rolling_window_args = LOAD_RATIO_CONFIG[\"same_day_of_week_rolling_args\"]\n previous_years_lag_args = LOAD_RATIO_CONFIG[\"same_week_of_year_lag_args\"]\n same_week_day_hour_rolling_featurizer = SameDayOfWeekRollingWindowFeaturizer(\n df_config, input_col_names=df_config[\"target_col_name\"], max_horizon=max_horizon, **rolling_window_args\n )\n train_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(train_round_df)\n same_week_day_hour_rolling_featurizer.train_df = train_round_df\n test_df_with_recent_load = same_week_day_hour_rolling_featurizer.transform(test_df)\n\n time_col_name = df_config[\"time_col_name\"]\n ts_id_col_names = df_config[\"ts_id_col_names\"]\n keep_col_names = [time_col_name]\n if ts_id_col_names is not None:\n if isinstance(ts_id_col_names, list):\n keep_col_names = keep_col_names + ts_id_col_names\n else:\n keep_col_names.append(ts_id_col_names)\n lag_df_list = []\n start_week = rolling_window_args[\"start_week\"]\n end_week = start_week + rolling_window_args[\"agg_count\"]\n for i in range(start_week, end_week):\n col_old = df_config[\"target_col_name\"] + \"_\" + rolling_window_args[\"output_col_suffix\"] + \"_\" + str(i)\n col_new = col_old + \"_\" + previous_years_lag_args[\"output_col_suffix\"]\n col_ratio = \"recent_load_ratio_\" + str(i)\n\n same_week_day_hour_lag_featurizer = SameWeekOfYearLagFeaturizer(\n df_config,\n input_col_names=col_old,\n train_df=train_df_with_recent_load,\n max_horizon=max_horizon,\n **previous_years_lag_args\n )\n\n lag_df = same_week_day_hour_lag_featurizer.transform(test_df_with_recent_load)\n lag_df[col_ratio] = lag_df[col_old] / lag_df[col_new]\n lag_df_list.append(lag_df[keep_col_names + [col_ratio]].copy())\n\n test_features = reduce(\n lambda left, right: pd.merge(left, right, on=keep_col_names), [test_features] + lag_df_list,\n )\n\n if filter_by_month:\n test_month = test_features[\"month_of_year\"].values[0]\n train_features = train_features.loc[train_features[\"month_of_year\"] == test_month,].copy()\n\n train_features.dropna(inplace=True)\n\n return train_features, test_features",
"def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')",
"def set_training_data(self):\n # Optional training data period\n # TODO: add training data period feature to training data query\n if not self.training_period == None:\n training_period_date = (datetime.datetime.utcnow() - timedelta(minutes=self.training_period)).strftime(\"%Y-%m-%d\")\n print(f\"Training data start date: {training_period_date}\")\n # Extract queried data from Athena\n #athena = athena_connect.Athena()\n #features_df = athena.pandas_read_athena(self.training_data_sql)\n with open('feature_sql.txt', 'w') as f:\n print(self.training_data_sql, file=f) \n features_df = pd.read_sql(self.training_data_sql, self.logic_db_engine())\n features_df.fillna(0, inplace=True)\n print(features_df.shape)\n features_df = features_df[max(self.feature_minutes_list):]\n print(features_df.shape)\n # Remove infinity string\n features_df.replace({'Infinity': 0}, inplace=True)\n # Convert all object fields to numeric except date fields\n object_col_list = features_df.columns[features_df.dtypes.eq('object')]\n object_col_list = [col for col in object_col_list if 'trade_date' not in col]\n features_df[object_col_list] = features_df[object_col_list].apply(pd.to_numeric, errors='coerce')\n self.training_df = features_df",
"def train(self):\n acc_time = []\n data_test = self.val_data[0][0][0]\n labels_test = self.val_data[0][0][1]\n for i, train_batch in enumerate(self.dataset):\n \n writerDIM = SummaryWriter('runs/experiment_DIM'+str(i))\n data,labels, t = train_batch\n\n index_tr,index_cv,coreset = data_split(data.shape[0],777)\n\n # adding eventual replay patterns to the current batch\n if i == 0:\n ext_mem = [data[coreset], labels[coreset]]\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n else:\n dataP = ext_mem[0]\n labP = ext_mem[1]\n\n ext_mem = [\n np.concatenate((data[coreset], ext_mem[0])),\n np.concatenate((labels[coreset], ext_mem[1]))]\n if self.replay:\n dataC = np.concatenate((data[index_tr], data[index_cv],dataP),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv],labP),axis=0)\n else:\n dataC = np.concatenate((data[index_tr], data[index_cv]),axis=0)\n labC = np.concatenate((labels[index_tr],labels[index_cv]),axis=0)\n\n\n\n print(\"----------- batch {0} -------------\".format(i))\n print(\"Task Label: \", t)\n trC,cvC = data_split_Tr_CV(dataC.shape[0],777)\n\n train_set = LoadDataset(dataC,labC,transform=self.tr,indices=trC)\n val_set = LoadDataset(dataC,labC,transform=self.tr,indices=cvC)\n print('Training set: {0} \\nValidation Set {1}'.format(train_set.__len__(),val_set.__len__()))\n batch_size=32\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaders = {'train':train_loader,'val':valid_loader}\n \n ####### Set hyperparameters for the training\n if i ==0: \n prior = False\n ep=40\n dim_model = DIM_model(batch_s=32,num_classes =128,feature=True) \n dim_model.to(self.device)\n classifierM = _classifier(n_input=128,n_class=50,n_neurons=[256,256,128])\n classifierM = classifierM.to(self.device)\n writer = SummaryWriter('runs/experiment_C'+str(i))\n lr_new = 0.00001\n lrC=0.0001\n \n else:\n prior = True\n ep=6\n \n lr_new =0.000005\n lrC = 0.00005\n\n optimizer = torch.optim.Adam(dim_model.parameters(),lr=lr_new)\n scheduler = lr_scheduler.StepLR(optimizer,step_size=40,gamma=0.1) #there is also MultiStepLR\n\n tr_dict_enc = {'ep':ep,'writer':writerDIM,'best_loss':1e10,'t_board':True,\n 'gamma':.5,'beta':.5,'Prior_Flag':prior,'discriminator':classifierM} \n tr_dict_cl = {'ep':40,'writer':writer,'best_loss':1e10,'t_board':True,'gamma':1}#40\n\n if i==0 and self.load:\n print('Load DIM model weights first step')\n dim_model.load_state_dict(torch.load(self.path + 'weights/weightsDIM_T0.pt'))\n else:\n ############################## Train Encoder########################################\n dim_model,self.stats = trainEnc_MI(self.stats,dim_model, optimizer, scheduler,dataloaders,self.device,tr_dict_enc)\n ####################################################################################\n if i==0:\n torch.save(dim_model.state_dict(), self.path + 'weights/weightsDIM_T'+str(i)+'.pt')\n\n ####\n #Conversion of image into latent space representation for classifier training\n ####\n dim_model.requires_grad_(False)\n for phase in ['train','val']:\n dataF = None\n labF = None\n for inputs, labels in dataloaders[phase]:\n torch.cuda.empty_cache()\n if len(inputs.shape)==5:\n\n inputs = inputs[:,:,:,:,0].to(self.device)\n else:\n inputs = inputs.to(self.device)\n\n _,_,pred = dim_model(inputs)\n pred_l = pred.data.cpu().numpy()\n if dataF is None:\n dataF = pred_l\n labF = labels.data.cpu().numpy()\n else:\n dataF = np.concatenate((dataF,pred_l),axis=0)\n labF = np.concatenate((labF,labels.data.cpu().numpy()),axis=0)\n\n if phase == 'train':\n dataTr_f = dataF\n labTr_f = labF\n else:\n dataCv_f = dataF\n labCv_f = labF\n \n dim_model.requires_grad_(True)\n train_set = LoadFeat(dataTr_f,labTr_f)\n val_set = LoadFeat(dataCv_f,labCv_f)\n batch_size=32\n\n train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)\n dataloaderC = {'train':train_loader,'val':valid_loader}\n\n optimizerC = torch.optim.Adam(classifierM.parameters(),lr=lrC)\n schedulerC = lr_scheduler.StepLR(optimizerC,step_size=40,gamma=0.1)\n classifierM.requires_grad_(True)\n\n ############################## Train Classifier ########################################\n classifierM,self.stats = train_classifier(self.stats,classifierM, optimizerC, schedulerC,dataloaderC,self.device,tr_dict_cl) \n #################################### #################################### ##############\n\n torch.save(classifierM.state_dict(), self.path + 'weights/weightsC_T'+str(i)+'.pt')\n dim_model.eval()\n classifierM.eval()\n #### Cross_val Testing\n \n test_set = LoadDataset(data_test,labels_test,transform=self.trT)\n batch_size=32\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)\n score= []\n\n for inputs, labels in test_loader:\n torch.cuda.empty_cache()\n inputs = inputs.to(self.device)\n labels = labels.to(self.device) \n _,_,ww =dim_model(inputs)\n pred = classifierM(ww)\n pred_l = pred.data.cpu().numpy()\n score.append(np.sum(np.argmax(pred_l,axis=1)==labels.data.cpu().numpy())/pred_l.shape[0])\n print('TEST PERFORMANCES:', np.asarray(score).mean())\n acc_time.append(np.asarray(score).mean())\n del test_set,test_loader\n \n self.dim_model = dim_model\n self.classifierM = classifierM\n acc_time = np.asarray(acc_time)\n return self.stats,acc_time",
"def test_intent_classifier_update_training_samples(self):\n pass",
"def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n (self._rng, self.optimizer_state, self.online_params,\n loss, quantile_loss, coherence_loss, orthogonality_loss) = train(\n self.network_def,\n self.online_params,\n self.target_network_params,\n self.optimizer,\n self.optimizer_state,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._rng,\n self._coherence_weight,\n self._option,\n self._use_ortho_loss,\n self._use_cohe_loss,\n self._tau,\n self._alpha,\n self._clip_value_min)\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n if self._use_ortho_loss and self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality',\n simple_value=orthogonality_loss),\n ])\n elif self._use_ortho_loss and not self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality', simple_value=orthogonality_loss),\n ])\n elif self._use_cohe_loss and not self._use_ortho_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n ])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1",
"def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss",
"def train_promotion_prediction_model(input_data, input_features, output_features, cat_columns, model,\r\n learning_rate, max_depth, num_leaves, n_iter, n_estimators,\r\n train_size, test_months_exclusion, cat_var_exclusion,\r\n remove_outliers, impact_stores_outlier=None, promo_duration_outlier=None,\r\n discount_depths_outlier=None, min_samples=3):\r\n\r\n # convert input data format\r\n # for cols in input data that is not in the cat cols list, convert to numeric\r\n # for col in list(input_data.columns):\r\n # if col not in list(cat_columns):\r\n input_data['perc_inc_qty'] = pd.to_numeric(input_data['perc_inc_qty'])\r\n input_data['in_leaflet_flag'] = pd.to_numeric(input_data['in_leaflet_flag'])\r\n input_data['in_gondola_flag'] = pd.to_numeric(input_data['in_gondola_flag'])\r\n input_data['duration_wks'] = pd.to_numeric(input_data['duration_wks'])\r\n\r\n # subtract 2 in duration_wks\r\n input_data[\"duration_wks\"] = input_data[\"duration_wks\"].apply(lambda x: x - 2)\r\n\r\n # Check no. of input sample data rows\r\n logger.info(\"Input sample data includes {b} samples...\".format(b=input_data.shape[0]))\r\n\r\n # Lets remove data within the test exclusion months list\r\n if 'campaign_start_month' in list(input_data.columns) and test_months_exclusion is not None:\r\n outliers = input_data[input_data.campaign_start_month.isin(test_months_exclusion)]\r\n logger.info(\r\n \"Removing sample data where campaign start months is in {a}, {b} sample data points removed...\".format(\r\n a=test_months_exclusion,\r\n b=outliers.shape[0]))\r\n input_data = input_data[~input_data['campaign_start_month'].isin(test_months_exclusion)]\r\n\r\n # Lets remove data where store count is below a certain value\r\n if 'no_impacted_stores' in list(input_data.columns) and impact_stores_outlier is not None:\r\n outliers = input_data[input_data['no_impacted_stores'] < impact_stores_outlier]\r\n logger.info(\"Removing sample data where impacted stores < {a}, {b} sample data points removed...\".format(\r\n a=impact_stores_outlier,\r\n b=outliers.shape[0]))\r\n input_data = input_data[input_data['no_impacted_stores'] >= impact_stores_outlier]\r\n\r\n # Lets remove data where duration is above a certain value\r\n if 'duration_days' in list(input_data.columns) and promo_duration_outlier is not None:\r\n outliers = input_data[input_data['duration_days'] > promo_duration_outlier]\r\n logger.info(\"Removing sample data where promotion duration > {a}, {b} sample data points removed...\".format(\r\n a=promo_duration_outlier,\r\n b=outliers.shape[0]))\r\n input_data = input_data[input_data['duration_days'] <= promo_duration_outlier]\r\n\r\n # Lets remove data where discount depth is not in specified list\r\n if 'discount_depth' in list(input_data.columns) and discount_depths_outlier is not None:\r\n outliers = input_data[~input_data.discount_depth.isin(discount_depths_outlier)]\r\n logger.info(\"Removing sample data where discount depth is not in {a}, {b} sample data points removed...\".format(\r\n a=discount_depths_outlier,\r\n b=outliers.shape[0]))\r\n input_data = input_data[input_data.discount_depth.isin(discount_depths_outlier)]\r\n\r\n if remove_outliers:\r\n logger.info(\"Removing outliers from sample data...\")\r\n\r\n # outlier removal based on negative values and where the baseline is smaller than 100\r\n outliers = input_data[(input_data[output_features[0]] <= 0) & (input_data['total_baseline_qty'] < 100)]\r\n logger.info(\r\n \"Removing all negative values from {a} and where the baseline is smaller than 100, \"\r\n \"{b} sample data points removed...\".format(a=output_features[0], b=outliers.shape[0]))\r\n\r\n input_data = input_data[(input_data[output_features[0]] > 0) |\r\n (input_data[output_features[0]] <= 0) & (input_data['total_baseline_qty'] > 100)]\r\n\r\n # zero values which are negative\r\n outliers = input_data[input_data[output_features[0]] < 0]\r\n logger.info(\r\n \"Zeroing all negative values from {a}, \"\r\n \"{b} sample data points removed...\".format(a=output_features[0], b=outliers.shape[0]))\r\n\r\n input_data.loc[input_data[output_features[0]] < 0, output_features[0]] = 0\r\n\r\n # outlier removal based on too high % uplift - set value to 1000%\r\n if 'perc_inc_qty' in list(input_data.columns):\r\n outliers = input_data[input_data['perc_inc_qty'] >= 10]\r\n logger.info(\r\n \"Removing sample data where % qty uplift is greater than 1000%, {b} sample data points removed...\".format(\r\n b=outliers.shape[0]))\r\n input_data = input_data[input_data['perc_inc_qty'] < 10]\r\n\r\n # outlier removal based on quantile in target variable\r\n q = input_data[output_features[0]].quantile(0.95)\r\n\r\n outliers = input_data[input_data[output_features[0]] >= q]\r\n logger.info(\"Based on 95% quantiles, {} sample data points removed...\".format(outliers.shape[0]))\r\n\r\n input_data = input_data[input_data[output_features[0]] < q]\r\n\r\n # Filter on only the input features\r\n total_features = input_features + output_features\r\n input_data = input_data[total_features]\r\n\r\n # Check absent values\r\n null_value_stats_x = input_data[input_features].isnull().sum(axis=0)\r\n logger.info(\"Null values for input features include:\\n{}\".format(null_value_stats_x[null_value_stats_x != 0]))\r\n\r\n null_value_stats_y = input_data[output_features].isnull().sum(axis=0)\r\n logger.info(\"Null values for target variable include:\\n{}\".format(null_value_stats_y[null_value_stats_y != 0]))\r\n\r\n # Throw error if any values are null in y\r\n if input_data[output_features].isnull().values.any():\r\n logger.error(\"Null values found in target data...\")\r\n raise ValueError('Null values found in target data!')\r\n\r\n # Fill remaining absent values in X with -999\r\n input_data.fillna(-999, inplace=True)\r\n\r\n # Describe the dataset\r\n logger.info(\"Summary statistics for numeric features in input data are...\")\r\n logger.info(\"{}\".format(input_data.describe()))\r\n\r\n # Check data types\r\n X = input_data[input_features]\r\n\r\n # If the output feature is uplift %, multiply by 100\r\n if 'p_cal_perc_inc_sale_qty' in output_features:\r\n logger.info(\"Scaling target variable {a} by 100...\".format(a=output_features[0]))\r\n y = input_data[output_features]*100\r\n\r\n else:\r\n y = input_data[output_features]\r\n\r\n logger.info(\"Input dataset data types include:\\n{}\".format(X.dtypes))\r\n logger.info(\"Target variable data types include:\\n{}\".format(y.dtypes))\r\n\r\n # Lets split the data into training and validation sets\r\n X_train, X_validation, y_train, y_validation = train_test_split(X, y, train_size=train_size, random_state=42)\r\n logger.info(\"Training dataset includes {} samples...\".format(X_train.shape[0]))\r\n logger.info(\"Test dataset includes {} samples...\".format(X_validation.shape[0]))\r\n\r\n # create a mapping dictionary (to be used for models which require int categorical cols)\r\n map_dict = {}\r\n\r\n if model == 'CatBoost':\r\n\r\n # Obtain categorical feature index\r\n cat_features_index = [X.columns.get_loc(c) for c in cat_columns if c in X]\r\n\r\n # initialise CatBoost regressor\r\n train_model = CatBoostRegressor(iterations=700,\r\n learning_rate=learning_rate,\r\n depth=max_depth,\r\n eval_metric='RMSE',\r\n random_seed=42,\r\n bagging_temperature=0.2,\r\n od_type='Iter',\r\n metric_period=75,\r\n od_wait=100)\r\n\r\n # Fit the model - catboost does not require us to specify integers for cat features\r\n train_model.fit(X_train, y_train,\r\n eval_set=(X_validation, y_validation),\r\n cat_features=cat_features_index,\r\n use_best_model=True)\r\n\r\n pred = train_model.predict(X_validation)\r\n\r\n elif model == 'lightgbm':\r\n\r\n # For lightgbm, we need to convert our categorical features to int\r\n # Loop through categorical cols\r\n for col in cat_columns:\r\n if col in list(X.columns):\r\n # get unique values\r\n unique_vals = X[col].unique()\r\n unique_vals_dict = dict([(val, num) for num, val in enumerate(unique_vals)])\r\n\r\n # map them for the train and test data sets\r\n X_train = X_train.copy()\r\n X_train[col] = X_train[col].map(unique_vals_dict)\r\n X_validation = X_validation.copy()\r\n X_validation[col] = X_validation[col].map(unique_vals_dict)\r\n\r\n # store the mapping for later use\r\n map_dict[col] = unique_vals_dict\r\n\r\n # LightGBM dataset formatting (with categorical variables)\r\n if cat_var_exclusion:\r\n lgtrain = lgb.Dataset(X_train, y_train,\r\n feature_name=input_features)\r\n lgvalid = lgb.Dataset(X_validation, y_validation,\r\n feature_name=input_features)\r\n else:\r\n cat_col = [col for col in cat_columns if col in list(X.columns)]\r\n\r\n lgtrain = lgb.Dataset(X_train, y_train,\r\n feature_name=input_features,\r\n categorical_feature=cat_col)\r\n lgvalid = lgb.Dataset(X_validation, y_validation,\r\n feature_name=input_features,\r\n categorical_feature=cat_col)\r\n\r\n params = {\r\n 'objective': 'regression',\r\n 'metric': 'rmse',\r\n 'num_leaves': num_leaves,\r\n 'max_depth': max_depth,\r\n 'learning_rate': learning_rate,\r\n 'feature_fraction': 0.8,\r\n 'bagging_fraction': 0.8,\r\n 'bagging_freq': 1,\r\n 'boosting_type': 'gbdt',\r\n 'verbosity': -1\r\n }\r\n\r\n train_model = lgb.train(\r\n params,\r\n lgtrain,\r\n num_boost_round=n_iter,\r\n valid_sets=[lgtrain, lgvalid],\r\n valid_names=[\"train\", \"valid\"],\r\n early_stopping_rounds=1000,\r\n verbose_eval=500\r\n )\r\n\r\n pred = train_model.predict(X_validation)\r\n\r\n elif model == 'xgboost':\r\n\r\n # For xgboost, we need to convert our categorical features to int\r\n # There are 3 approaches - one-hot encode, label encode and binary encode\r\n\r\n # Here, for simplicity, we are using label encoders\r\n # Loop through categorical cols\r\n for col in cat_columns:\r\n if col in list(X.columns):\r\n # get unique values\r\n unique_vals = X[col].unique()\r\n unique_vals_dict = dict([(val, num) for num, val in enumerate(unique_vals)])\r\n\r\n # map them for the train and test data sets\r\n X_train = X_train.copy()\r\n X_train[col] = X_train[col].map(unique_vals_dict)\r\n X_validation = X_validation.copy()\r\n X_validation[col] = X_validation[col].map(unique_vals_dict)\r\n\r\n # store the mapping for later use\r\n map_dict[col] = unique_vals_dict\r\n\r\n train_model = xgb.XGBRegressor(objective='reg:linear',\r\n colsample_bytree=0.3,\r\n learning_rate=learning_rate,\r\n max_depth=max_depth,\r\n alpha=10,\r\n n_estimators=n_estimators,\r\n verbosity=2)\r\n\r\n train_model.fit(X_train, y_train)\r\n\r\n pred = train_model.predict(X_validation)\r\n\r\n elif model == 'regression':\r\n\r\n # Use linear regression only if subcategory and brand name and segment is included in the list\r\n # Compute linear regression coefficients for the following combinations\r\n # 1) Subcategory and brand\r\n # 2) Segment\r\n # 3) Subcategory\r\n\r\n # Primary regressor variable includes discount_depth_rank\r\n\r\n # Compute coefficients for the remaining fields in the input data set\r\n # Output the R^2, p_value, and stdev for each combination\r\n # Follow a hierarchy when applying the model to each sku\r\n # If the subcategory and brand the sku sits in has an R^2, p_value and stdev smaller/ larger than a given threshold,\r\n # use, the segment model and likewise, when the segment model has an R^2, p_value and stdev smaller/ larger than a given\r\n # threshold, use the subcategory model\r\n\r\n # We will only thus be able to predict values for segments/ categories and subcategories where there has been a\r\n # promotion in the past\r\n if ('category' not in list(input_data[input_features].columns)) or \\\r\n ('subcategory' not in list(input_data[input_features].columns)) or \\\r\n ('segment' not in list(input_data[input_features].columns)) or \\\r\n ('sku_root_id' not in list(input_data[input_features].columns)) or \\\r\n ('promo_mechanic_dd' not in list(input_data[input_features].columns)):\r\n logger.error(\r\n \"Currently performing a linear regression per subcategory and/ or brand and/ or segment with discount depth rank \"\r\n \"as the primary regressor. However subcategory and brand name and segment and discount depth rank is not defined as \"\r\n \"an input variable!\")\r\n raise ValueError('Subcategory and/ or brand name and/ or segment and discount depth rank is not defined as an input variable')\r\n\r\n # For simplicity, use all data to train the model and compute the R2, stdev, intercept and coefficient\r\n logger.info(\"For regression, both train and test datasets will be used to train the model...\")\r\n logger.info(\"Combined sample dataset includes {} samples...\".format(input_data.shape[0]))\r\n\r\n # Loop through each combination and compute the regression\r\n combination = {('sku_root_id', 'promo_mechanic_dd'): 1, ('segment', 'promo_mechanic_dd'): 2,\r\n ('subcategory', 'promo_mechanic_dd'): 3, ('category', 'promo_mechanic_dd'): 4}\r\n all_combination = ('sku_root_id', 'segment', 'subcategory', 'category', 'promo_mechanic_dd')\r\n\r\n # Convert all categorical variables into numeric for regression\r\n input_data_train = input_data.copy()\r\n for col in cat_columns:\r\n if col in list(input_data.columns) and col not in all_combination:\r\n # get unique values\r\n unique_vals = input_data[col].unique()\r\n unique_vals_dict = dict([(val, num) for num, val in enumerate(unique_vals)])\r\n\r\n # map the input dataset\r\n input_data_train[col] = input_data_train[col].map(unique_vals_dict)\r\n\r\n # store the mapping for later use\r\n map_dict[col] = unique_vals_dict\r\n\r\n # Create a dataframe to store the results\r\n train_model_all = pd.DataFrame(columns=['rank', 'agg_fields', 'key', 'model', 'no_data_points', 'outlier_data_points',\r\n 'r2', 'rmse', 'mean_error', 'mae', 'mape', 'f_prob', 'coeff_c0', 'coeff_duration',\r\n 'coeff_in_gondola', 'coeff_in_leaflet', 'p_coeff_c0', 'p_coeff_duration',\r\n 'p_coeff_in_gondola', 'p_coeff_in_leaflet', 'VIF_coeff_c0', 'VIF_coeff_duration',\r\n 'VIF_coeff_in_gondola', 'VIF_coeff_in_leaflet'])\r\n\r\n # Create a dataframe to store the validation set to compute overall metrics\r\n valid_model = pd.DataFrame()\r\n filtered_model = pd.DataFrame()\r\n\r\n for agg_list in combination.keys():\r\n\r\n # Training model for combination\r\n logger.info(\"Training linear regression model for {a}...\".format(a=agg_list))\r\n\r\n # get unique values\r\n unique_df = input_data_train.drop_duplicates(list(agg_list))[list(agg_list)].reset_index(drop=True)\r\n logger.info(\"There are {a} unique {b} in the data...\".format(a=unique_df.shape[0], b=agg_list))\r\n\r\n # group by agg_list\r\n input_data_model = input_data_train.groupby(list(agg_list))\r\n\r\n # Select the list of input attributes not in agg_list\r\n training_list = list(set(list(input_data_train[input_features].columns)) - set(all_combination))\r\n logger.debug(\"Training features include {}\".format(training_list))\r\n\r\n for key, group in input_data_model:\r\n\r\n # Convert key to tuple if not\r\n if not isinstance(key, tuple):\r\n key_t = (key,)\r\n else:\r\n key_t = key\r\n\r\n # Train the model for each group\r\n logger.info(\"Training linear regression model for {a}...\".format(a=key_t))\r\n logger.info(\"There are {a} data samples...\".format(a=group.shape[0]))\r\n\r\n n_data_points = group.shape[0]\r\n\r\n # Lets remove all outlier data points with high z scores\r\n # q_group = group[output_features[0]].quantile(0.95)\r\n #\r\n # outliers = group[group[output_features[0]] >= q_group]\r\n # logger.info(\"Removing outlier data points...\")\r\n # logger.info(\"Based on 95% quantiles, {} sample data points removed...\".format(outliers.shape[0]))\r\n #\r\n # outlier_data_points = outliers.shape[0]\r\n #\r\n # group = group[group[output_features[0]] < q_group]\r\n outlier_data_points = 0\r\n\r\n # If there is less than 3 sample data points, then skip\r\n if group.shape[0] < 2:\r\n logger.info(\"Too few data sample needed for training...\")\r\n logger.info(\"Skipping to next group....\")\r\n\r\n train_model_dict = {'rank': combination[agg_list],\r\n 'agg_fields': agg_list,\r\n 'key': key_t,\r\n 'model': None,\r\n 'no_data_points': group.shape[0],\r\n 'outlier_data_points': outlier_data_points,\r\n 'r2': None,\r\n 'rmse': None,\r\n 'mean_error': None,\r\n 'mae': None,\r\n 'mape': None,\r\n 'f_prob': None,\r\n 'coeff_c0': None,\r\n 'coeff_duration': None,\r\n 'coeff_in_gondola': None,\r\n 'coeff_in_leaflet': None,\r\n 'p_coeff_c0': None,\r\n 'p_coeff_duration': None,\r\n 'p_coeff_in_gondola':None,\r\n 'p_coeff_in_leaflet':None,\r\n 'VIF_coeff_c0':None,\r\n 'VIF_coeff_duration':None,\r\n 'VIF_coeff_in_gondola':None,\r\n 'VIF_coeff_in_leaflet':None\r\n }\r\n\r\n # add train model to dataframe\r\n train_model_all = train_model_all.append(train_model_dict, ignore_index=True)\r\n continue\r\n\r\n # Append group to validation dataset\r\n v_group = group[list(all_combination)].copy()\r\n valid_model = valid_model.append(v_group.drop_duplicates())\r\n filtered_model = filtered_model.append(group)\r\n n_data_points_used = group.shape[0]\r\n\r\n t_list = training_list+output_features\r\n group = group[t_list]\r\n\r\n # plot the relationship\r\n # plotScatter(group, \"in_gondola_flag\", output_features[0], \"duration_wks\")\r\n\r\n X_reg = group[training_list]\r\n\r\n # force to add constant if a constant values is already supplied\r\n # this will ensure consistency in output format\r\n X_reg = sm.add_constant(X_reg, has_constant='add')\r\n y_reg = group[output_features]\r\n\r\n # Train robust linear regression model\r\n\r\n reg_model = sm.OLS(y_reg, X_reg).fit()\r\n logger.info(\"Completed model training...\")\r\n\r\n # Log the model results\r\n logger.debug(\"Model results...\")\r\n logger.debug(\"\\n{}\".format(reg_model.summary()))\r\n\r\n mape = np.divide(reg_model.resid.values,\r\n y_reg[output_features[0]].values)\r\n mape[mape == np.inf] = 0\r\n mape[mape == -np.inf] = 0\r\n mape = np.median(np.abs(mape))\r\n\r\n # Compute variance inflation factors\r\n VIF = [variance_inflation_factor(X_reg.values, i) for i in range(X_reg.shape[1])]\r\n\r\n train_model_dict = {'rank': combination[agg_list],\r\n 'agg_fields': agg_list,\r\n 'key': key_t,\r\n 'model': reg_model,\r\n 'no_data_points': n_data_points_used,\r\n 'outlier_data_points': outlier_data_points,\r\n 'r2': reg_model.rsquared,\r\n 'rmse': np.sqrt(np.mean(np.square(reg_model.resid.values))),\r\n 'mean_error': np.mean(reg_model.resid.values),\r\n 'mae': np.median(np.abs(reg_model.resid.values)),\r\n 'mape': mape,\r\n 'f_prob': reg_model.f_pvalue,\r\n 'coeff_c0': reg_model.params['const'],\r\n 'coeff_duration': reg_model.params['duration_wks'],\r\n 'coeff_in_gondola': reg_model.params['in_gondola_flag'],\r\n 'coeff_in_leaflet': reg_model.params['in_leaflet_flag'],\r\n 'p_coeff_c0': reg_model.pvalues['const'],\r\n 'p_coeff_duration': reg_model.pvalues['duration_wks'],\r\n 'p_coeff_in_gondola':reg_model.pvalues['in_gondola_flag'],\r\n 'p_coeff_in_leaflet':reg_model.pvalues['in_leaflet_flag'],\r\n 'VIF_coeff_c0':VIF[0],\r\n 'VIF_coeff_duration':VIF[3],\r\n 'VIF_coeff_in_gondola':VIF[2],\r\n 'VIF_coeff_in_leaflet':VIF[1]}\r\n\r\n # add train model to dataframe\r\n train_model_all = train_model_all.append(train_model_dict, ignore_index=True)\r\n\r\n\r\n if model in ('catboost', 'lightgbm', 'xgboost'):\r\n\r\n # Evaluate the model\r\n rmse = np.sqrt(mean_squared_error(y_validation, pred))\r\n logger.info(\"RMSE: {}\".format(rmse))\r\n\r\n mean_error = np.mean(y_validation[output_features[0]].values - pred)\r\n logger.info(\"Mean Error: {}\".format(mean_error))\r\n\r\n mae = np.median(np.absolute(y_validation[output_features[0]].values - pred))\r\n logger.info(\"MAE: {}\".format(mae))\r\n\r\n mape = np.divide(y_validation[output_features[0]].values - pred, y_validation[output_features[0]].values)\r\n mape[mape == np.inf] = 0\r\n mape[mape == -np.inf] = 0\r\n mape = np.median(np.abs(mape))\r\n logger.info(\"MAPE: {}%\".format(mape * 100))\r\n\r\n val_std = np.std(y_validation[output_features[0]].values)\r\n logger.info(\"Benchmark STD: {}\".format(val_std))\r\n\r\n val_mean = np.mean(y_validation[output_features[0]].values)\r\n logger.info(\"Benchmark Mean Error: {}\".format(val_mean))\r\n\r\n val_mae = np.mean(np.absolute(y_validation[output_features[0]].values))\r\n logger.info(\"Benchmark MAE: {}\".format(val_mae))\r\n\r\n logger.info(\"Benchmark MAPE: -100%\")\r\n\r\n # plot the results\r\n plothist(y_validation, pred)\r\n\r\n # plot the feature importance\r\n plotImp(model, train_model, X, num=20)\r\n\r\n join_fields = None\r\n filtered_model = None\r\n\r\n\r\n elif model in ('regression'):\r\n\r\n # save the full model\r\n train_model_all.to_csv(\"train_model_all.csv\", encoding='utf-8', index=False)\r\n\r\n # get the unique values in the validation model\r\n valid_model = valid_model.drop_duplicates(list(all_combination))[list(all_combination)].reset_index(drop=True)\r\n valid_model['p_id'] = valid_model[list(all_combination)].apply(tuple, axis=1)\r\n\r\n # For each line in valid_model, find the corresponding rows in train model where key is in p_id\r\n train_model = pd.DataFrame()\r\n logger.info(\"Computing best model for each {a}\".format(a=all_combination))\r\n for index, row in valid_model.iterrows():\r\n\r\n # logging progress\r\n logger.info(\"{a} out of {b} permutations complete...\".format(a=index, b=valid_model.shape[0]))\r\n\r\n # find all the rows in train model that satisfy the criterion\r\n train_model_all['valid'] = train_model_all.apply(lambda x: set(x.key).issubset(row.p_id), axis=1)\r\n\r\n # filter on the rows that are true\r\n valid_rows = train_model_all[train_model_all['valid'] == True]\r\n\r\n # # Using the rank condition, lets filter on only the model which has the most favorable property\r\n # # R2 threshold > 0.2 and\r\n # if len(valid_rows[(valid_rows['r2'] >= 0.2)]) >= 1:\r\n # valid_rows = valid_rows[valid_rows['r2'] >= 0.2]\r\n\r\n # valid_rows = valid_rows.sort_values(by='mape', ascending=True).head(1)\r\n\r\n # Using the rank condition, lets filter where the # data samples is > min_data samples, then sort by rank\r\n # and take the bottom row\r\n if len(valid_rows[(valid_rows['no_data_points'] >= min_samples)]) >= 1:\r\n valid_rows = valid_rows[valid_rows['no_data_points'] >= min_samples]\r\n valid_rows['valid_dp'] = True\r\n else:\r\n valid_rows['valid_dp'] = False\r\n\r\n valid_rows = valid_rows.sort_values(by='rank', ascending=True).head(1)\r\n\r\n # Include the valid model all combinations cols\r\n valid_rows.reset_index(drop=True, inplace=True)\r\n row_df = row.to_frame().T\r\n row_df.reset_index(drop=True, inplace=True)\r\n\r\n valid_rows = pd.concat([valid_rows, row_df], axis=1)\r\n\r\n train_model = train_model.append(valid_rows)\r\n\r\n # Compute aggregate statistics\r\n rmse = train_model['rmse'].mean()\r\n logger.info(\"RMSE: {}\".format(rmse))\r\n\r\n mean_error = train_model['mean_error'].mean()\r\n logger.info(\"Mean Error: {}\".format(mean_error))\r\n\r\n mae = train_model['mae'].median()\r\n logger.info(\"MAE: {}\".format(mae))\r\n\r\n mape = train_model['mape'].median()\r\n logger.info(\"MAPE: {}%\".format(mape * 100))\r\n\r\n mae_std = train_model['mae'].std()\r\n logger.info(\"MAE_std: {}\".format(mae_std))\r\n\r\n mape_std = train_model['mape'].std()\r\n logger.info(\"MAPE_std: {}%\".format(mape_std * 100))\r\n\r\n R2_avg = train_model['r2'].mean()\r\n logger.info(\"R^2_avg: {}\".format(R2_avg))\r\n\r\n join_fields = list(all_combination)\r\n\r\n return train_model, map_dict, mae, mape, join_fields, filtered_model",
"def fitting_lr_and_rf(file, test_yr, fit_lr, fit_rf):\n df = pd.read_csv(file)\n\n mask_test = (df.year == test_yr)\n mask_train = (df.year >= test_yr-6) & (df.year <= test_yr-1)\n\n target = 'wkts'\n\n features_full = ['year1_mtchs_pld', 'year2_mtchs_pld', 'year3_mtchs_pld', 'year4_mtchs_pld', 'year5_mtchs_pld',\n 'year1_wkts_pm', 'year2_wkts_pm', 'year3_wkts_pm','year4_wkts_pm', 'year5_wkts_pm',\n 'bowler_agnst_oppo', 'oppo_agnst_bowl_typ', 'bowl_home_adv', 'ground_bowl_typ']\n features_small = ['year1_wkts_pm', 'year2_wkts_pm', 'year3_wkts_pm', 'year4_wkts_pm', 'year5_wkts_pm',\n 'bowler_agnst_oppo', 'oppo_agnst_bowl_typ', 'bowl_home_adv', 'ground_bowl_typ']\n features_smaller = ['year1_wkts_pm', 'year2_wkts_pm', 'year3_wkts_pm', 'year4_wkts_pm', 'year5_wkts_pm',\n 'bowl_home_adv', 'ground_bowl_typ']\n features_smallest = ['year1_wkts_pm', 'year2_wkts_pm', 'year3_wkts_pm', 'year4_wkts_pm', 'year5_wkts_pm']\n\n print(\"*********************************************\")\n print(\"**** RUNNING MODELS FOR FULL FEATURE SET ****\")\n print(\"*********************************************\")\n\n features = features_full.copy()\n\n X_train = df[mask_train][features]\n y_train = df[mask_train][target]\n X_test = df[mask_test][features]\n y_test = df[mask_test][target]\n\n if fit_lr:\n fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n if fit_rf:\n fit_rf_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n\n print(\"**********************************************\")\n print(\"**** RUNNING MODELS FOR SMALL FEATURE SET ****\")\n print(\"**********************************************\")\n\n features = features_small.copy()\n\n X_train = df[mask_train][features]\n y_train = df[mask_train][target]\n X_test = df[mask_test][features]\n y_test = df[mask_test][target]\n\n if fit_lr:\n fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n if fit_rf:\n fit_rf_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n\n print(\"************************************************\")\n print(\"**** RUNNING MODELS FOR SMALLER FEATURE SET ****\")\n print(\"************************************************\")\n\n features = features_smaller.copy()\n\n X_train = df[mask_train][features]\n y_train = df[mask_train][target]\n X_test = df[mask_test][features]\n y_test = df[mask_test][target]\n\n if fit_lr:\n fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n if fit_rf:\n fit_rf_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n\n print(\"*************************************************\")\n print(\"**** RUNNING MODELS FOR SMALLEST FEATURE SET ****\")\n print(\"*************************************************\")\n\n features = features_smallest.copy()\n\n X_train = df[mask_train][features]\n y_train = df[mask_train][target]\n X_test = df[mask_test][features]\n y_test = df[mask_test][target]\n\n if fit_lr:\n fit_lr_model(df, X_train, y_train, X_test, y_test, mask_test)\n\n if fit_rf:\n fit_rf_model(df, X_train, y_train, X_test, y_test, mask_test)"
]
| [
"0.6257319",
"0.6156836",
"0.6149533",
"0.61292195",
"0.6066113",
"0.60474396",
"0.5980389",
"0.59172255",
"0.58837765",
"0.58806056",
"0.5880044",
"0.5877682",
"0.58746827",
"0.587393",
"0.58707666",
"0.5865389",
"0.5858618",
"0.5855791",
"0.58530575",
"0.58465743",
"0.58410573",
"0.58358264",
"0.5833366",
"0.58234215",
"0.5821901",
"0.58194166",
"0.58164704",
"0.58152455",
"0.5812092",
"0.57967055"
]
| 0.7580361 | 0 |
Cheks integrity of feature model distributions. CDF has to be bounded by one. | def check_cdfIntegrity(self, step):
# Selecting bins automatically:
x_max = self.onpower_train.max().values[0]
x_min = 0
step = 1
x_onpower = np.arange(x_min, x_max, step).reshape(-1, 1)
x_max = 0
x_min = self.offpower_train.min().values[0]
step = 1
x_offpower = np.arange(x_min, x_max, step).reshape(-1, 1)
x_max = self.duration_train.max().values[0]
x_min = 0
step = 1
x_duration = np.arange(x_min, x_max, step).reshape(-1, 1)
# Evaluating score for:
# Onpower
y_onpower = self.__pdf2(self.onpower, x_onpower)
print("Onpower cdf: " + str(y_onpower.sum()))
# Offpower
y_offpower = self.__pdf2(self.offpower, x_offpower)
print("Offpower cdf: " + str(y_offpower.sum()))
# duration
y_duration = self.__pdf2(self.duration, x_duration)
print("Duration cdf: " + str(y_duration.sum()))
# Plots:
# fig1 = plt.figure()
# ax1 = fig1.add_subplot(311)
# ax2 = fig1.add_subplot(312)
# ax3 = fig1.add_subplot(313)
# ax1.plot(x_onpower, y_onpower)
# ax1.set_title("PDF CDF: Onpower")
# ax1.set_ylabel("density")
# ax1.set_xlabel("Watts")
# ax2.plot(x_offpower, y_offpower)
# ax2.set_title(" PDF CDF: Offpower")
# ax2.set_ylabel("denisty")
# ax2.set_xlabel("Watts")
# ax3.plot(x_duration, y_duration)
# ax3.set_title("PDF CDF: Duration")
# ax3.set_ylabel("density")
# ax3.set_xlabel("Seconds") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def part1c_0():\n xs = exampleInput\n T = submission.computeEdgeMarginals(simpleCRF, xs)\n for t in T:\n grader.requireIsEqual( 1.0, sum(t.values()) )",
"def initialize_cdesf(self) -> None:\n self.nyquist = self.check_point_cases * 2\n # initialize PMG\n self.process_model_graph = initialize_graph(nx.DiGraph(), self.cases)\n # compute case metrics for initial cases\n self.initialize_case_metrics()\n\n # initialise denstream\n self.denstream.dbscan(self.cases)\n\n groups = self.denstream.generate_clusters()\n for group in groups[0]:\n for cluster in group:\n self.active_core_clusters.add(cluster.id)\n\n # plot\n if self.gen_plot:\n normals, outliers = [], []\n for case in self.cases:\n if not np.isnan(np.sum(case.point)) and self.denstream.is_normal(case.point):\n normals.append(case.point)\n else:\n outliers.append(case.point)\n feature_space(self.name,\n self.event_index,\n self.cp_count,\n normals,\n outliers,\n self.denstream.generate_clusters(),\n self.denstream.generate_outlier_clusters(),\n self.denstream.epsilon,\n self.feature_space_plot_path)\n\n # metrics\n if self.gen_metrics:\n for case in self.cases:\n self.metrics.compute_case_metrics(self.event_index, self.check_point, self.cp_count,\n case, self.denstream.is_normal(case.point))\n self.metrics.save_case_metrics_on_check_point()\n self.metrics.compute_cluster_metrics(self.event_index, self.check_point, self.cp_count,\n self.denstream.generate_clusters(),\n self.denstream.generate_outlier_clusters())\n self.metrics.save_cluster_metrics_on_check_point()\n\n if len(self.process_model_graph.edges) > 0:\n self.metrics.save_pmg_on_check_point(self.process_model_graph, self.cp_count)\n\n self.initialized = True",
"def consistency_checker(model,universals,existentials):\n universal_set=set(universals)\n existential_set=set(existentials)\n #Additionally to the universal and existential variables the model may\n #contain additional auxiliary variables -- e.g. for setting default values.\n #We consider these variables such as the existential variables.\n auxiliary_variables_in_model={abs(l) for clause in model for l in clause \n if (not abs(l) in universal_set) and (not abs(l) in existential_set)}\n existential_set = existential_set.union(auxiliary_variables_in_model)\n result, certificate = checkModelQBF(model, universal_set, existential_set)\n return result",
"def full_modeling(target, pre_clust_df, model_path, id_column):\n targets = [x for x in pre_clust_df.columns if x[:8] == 'default_']\n # folders for result saving\n folder_auc = model_path + '/pictures/roc_auc'\n folder_column_pics = model_path + '/pictures'\n folder_model_output = model_path + '/model_output'\n create_folder(folder_auc)\n create_folder(folder_model_output)\n \n #take only matured loans\n pre_clust_df = pre_clust_df[pre_clust_df[target]>-.5] \n pre_clust_df = pre_clust_df.set_index(id_column)\n\n #drop all target columns except current tarhet column\n drop_targets = [col for col in targets if col != target] \n drop_targets = list(set(drop_targets) & set(pre_clust_df))\n pre_clust_df = pre_clust_df.drop(drop_targets, 1)\n\n #transform continous variables to bucket columns\n dfPreWoe, clustVarsInfo = sf.continuousVariables(pre_clust_df, columnLimit=10) \n #trassform to woe columns\n dfPostWoe, woeVarsInfo = sf.woeVariables(dfPreWoe,target)\n\n #look at information value of variables\n gg = sf.giniGrowth(dfPostWoe,woeVarsInfo,target)\n #chose best columns\n goodColumns, badColumns = sf.chooseColumnsFromIT(gg, badFlag=target, min_limit=0.01)\n\n #create log regression model\n model = sf.logReg(preLR=dfPostWoe[goodColumns], badFlag=target)\n #save roc_auc picture \n model.print_roc_curve(to_file=True, folder=folder_auc)\n\n #generate doc information about model and variables\n intercept, woeOut = sf.modelOutput(folder_model_output, woeVarsInfo, goodColumns, model, gg, rewrite=True)\n\n #generate and save pictures of feature distribution\n bad_columns = woe.save_pictures(woeVarsInfo, folder = folder_column_pics, badRateLimit=100)",
"def feature_worth(model, train):\n error = cv(data=train, folds=5, model=model)\n print error\n model.fit(train)\n for var in model.variables:\n print var\n print model.fitted_models[var].feature_importances_",
"def clus_func(df_all, n_components, feat_subset):\n\n df = df_all[featureSet_dic[feat_subset]].copy()\n\n X = df.values\n\n # # Fit a Gaussian mixture with EM\n # gmm_model = mixture.GaussianMixture(n_components=n_components,\n # covariance_type=cv_type,\n # random_state=1,\n # n_init=10)\n # gmm_model = gmm_model.fit(X)\n\n model_path = os.path.join(CURR_PATH, 'clustering_model') # create directiry for the current time\n model_name = os.path.join(model_path, 'gmm.joblib')\n gmm_model = joblib.load(model_name)\n\n # predic labels & probabilities\n labels = gmm_model.predict(X)\n labels_prob = gmm_model.predict_proba(X)\n\n # adding all droped features (for plotting purposes) of the standardized dataframe\n added_feat = [feat for feat in data_columns if feat not in df.columns]\n df[added_feat] = df_all_stand[added_feat].copy()\n df = df[data_columns]\n\n # adding the labels to the dataframe\n df.insert(0, 'Clus_label', labels)\n\n for n in range(n_components):\n df['Prob_L'+str(n)] = labels_prob[:, n]\n\n return gmm_model, df # export all gmm models and a dictionary of all labeled datasets",
"def graphConsistency(self, feature):\n # get a list of all constraints in which feature appears in the head\n headConstraints = self.getHeadConstraints(feature.name)\n # make a copy of the constraints list - we will treat this like a stack\n constraintList = headConstraints[:]\n # loop through all the constraints\n while len(constraintList) > 0:\n if (len(constraintList) % 100 == 0):\n print \"\\tconsistency checking constraints = \" + str(len(constraintList))\n # grab a constraint off the stack\n constraint = constraintList.pop()\n # check the constraint for arc consistency\n consistent = self.arcConsistency(constraint)\n # if we removed all the values from the domain of the tail then we need to backtrack\n if (len(constraint.tail.domain) == 0):\n return False\n # if the arc wasn't consistent then we need to add back all the constraints\n # with a head equal to the tail of the changed constraint to the queue\n constraintsAdded = 0\n if (not consistent):\n # get a list of constraints where the tail feature we just changed appears as\n # the head\n reCheckConstraints = self.getHeadConstraints(constraint.tail.name)\n # go through the list, add back all constraints that are not already in the stack\n for c in reCheckConstraints:\n # if the constraint is not already in the stack\n if not c in constraintList:\n # put it at the bottom of the stack\n constraintList.insert(0, c)\n constraintsAdded += 1\n print \"\\t\\tNumber of constraints added: \" + str(constraintsAdded)\n return True",
"def dfd(df, accuracy, index=None):\n partitions = {}\n masks = Masks(df.columns)\n non_uniq = set(df.columns)\n unique_attrs = set()\n dependencies = DfdDependencies(df.columns)\n for i in non_uniq.copy():\n if df[i].is_unique or i == index:\n unique_attrs.add(i)\n non_uniq.remove(i)\n dependencies.add_unique_lhs(i)\n for i in tqdm(non_uniq):\n lhss = find_LHSs(i, non_uniq, df, partitions, accuracy, masks)\n dependencies.add_LHSs(i, lhss)\n return dependencies",
"def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def __init__(self,db):\n self._numFeatures = len(db.keys)\n self._numEntries = len(db.entries)\n numMissing = 0\n if isinstance(db.entries[0],dict):\n #already sparse database given as input\n self.featureMatrix = None\n self.featureDicts = [{} for i in range(self._numFeatures)]\n self.discreteFeature = [True]*self._numFeatures\n for i in xrange(self._numFeatures):\n for j in xrange(self._numEntries):\n if i in db.entries[j]:\n v = db.entries[j][i]\n if v != int(v):\n self.discreteFeature[i] = False\n break\n self.entryLists = [[] for i in range(self._numFeatures)]\n self.featureSets = []\n for i in xrange(self._numEntries):\n flist = []\n for j in xrange(self._numFeatures):\n if j in db.entries[i]:\n flist.append(j)\n self.entryLists[j].append(i)\n self.featureDicts[j][i] = db.entries[i][j]\n else:\n numMissing += 1\n self.featureSets.append(set(flist))\n else:\n featureMatrix = np.array(db.entries,dtype=np.float_)\n self.featureMatrix = np.asfortranarray(featureMatrix).T\n self.featureDicts = [{} for i in range(self._numFeatures)]\n self.discreteFeature = []\n for i in xrange(self.featureMatrix.shape[0]):\n self.discreteFeature.append(not any(v != int(v) for v in self.featureMatrix[i,:] if not np.isnan(v)))\n self.entryLists = [[] for i in range(self._numFeatures)]\n self.featureSets = []\n for i in xrange(self._numEntries):\n flist = []\n for j in xrange(self._numFeatures):\n if not np.isnan(featureMatrix[i,j]):\n flist.append(j)\n self.entryLists[j].append(i)\n self.featureDicts[j][i] = featureMatrix[i,j]\n else:\n numMissing += 1\n self.featureSets.append(set(flist))\n if numMissing == 0:\n self.featureSets = None\n self.featureDicts = None\n else:\n self.featureMatrix = None\n self.sparsity = float(numMissing) / (self._numFeatures*self._numEntries)",
"def controller(df, func):\n # Initialization: Generate computational graph for each attribute which will be on RHS\n schema = df.columns\n computational_graph = dict()\n FDs = []\n for RHS in schema:\n computational_graph[RHS] = generate_computational_graph(RHS, schema)\n\n for level in range(3):\n # Get current level candidates\n current_level_candidates = dict()\n for RHS in computational_graph.keys():\n current_level_candidates[RHS] = get_candidates(level, computational_graph[RHS])\n\n # print('candidates:',current_level_candidates)\n # Use current_level candidates as an input to FD-functions for each level, func will return discovered (soft/delta)functional dependencies\n tFDs = func(level, df, current_level_candidates)\n # print('FDs:',tFDs)\n # print(tFDs)\n FDs.extend(tFDs)\n # Transform res into a dictionary where key: RHS value: a list of LHS where candidates are in the form of sets\n current_level_result = transform_res(tFDs)\n # print(current_level_result)\n\n # Prune graphs according to feedback of FD-functions\n # print(f\"level:{level}, computatioanl_graph_key:{computational_graph.keys()},current_level_result_key:{current_level_result.keys()}\")\n for RHS in computational_graph.keys():\n if RHS in current_level_result.keys():\n computational_graph[RHS] = prune_graph(level, current_level_result[RHS], computational_graph[RHS])\n\n return FDs",
"def dataClust(resAttrDF, infCol = 'Dollars', resName = None):\n \n if resName is None:\n raise Exception('**** RESTAURANT NAME WAS NOT PROVIDED ****')\n \n ## COPY AND PREPROCESS RESTAURANT ATTRIBUTE DATA\n print(f'\\n**** PREPROCESSING AND CLUSTERING DATA ACCORDING TO...{infCol.upper()} COLUMN ****')\n\n k_clust = resAttrDF.copy()\n k_clust = k_clust.reset_index(drop = True)\n \n labelEncoder = LabelEncoder()\n k_clust['Name'] = labelEncoder.fit_transform(k_clust['Name'])\n for col in k_clust.columns:\n if k_clust[col].dtypes == 'object':\n k_clust[col] = pd.to_numeric(k_clust[col])\n\n kprot_data = k_clust.copy()\n for c in k_clust.select_dtypes(exclude='object').columns:\n pt = PowerTransformer()\n kprot_data[c] = pt.fit_transform(np.array(kprot_data[c]).reshape(-1, 1))\n\n categorical_columns = [0] ## MAKE SURE TO SPECIFY CURRECT INDICES\n\n ## ACTUAL CLUSTERING\n if infCol != 'Dollars':\n kproto = KPrototypes(n_clusters= len(k_clust[infCol].unique()), init='Cao', n_jobs = 4)\n clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns)\n else:\n kproto = KPrototypes(n_clusters= len(k_clust['Dollars'].unique()), init='Cao', n_jobs = 4)\n clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns) \n\n ## PRINT COUNT OF EACH CLUSTER GROUP\n print('The count for each cluster group is printed below')\n pd.Series(clusters).value_counts()\n \n ## EVALUATE CLUSTER ACCURACY WITH LGBMCLASSIFIER\n clf_kp = lgb.LGBMClassifier(colsample_by_tree=0.8, random_state=1)\n cv_scores_kp = cross_val_score(clf_kp, k_clust, clusters, scoring='f1_weighted')\n print(f'CV F1 score for K-Prototypes clusters is {np.mean(cv_scores_kp)}')\n\n ## PLOT INFLUENTIAL COLOUMNS\n clf_kp.fit(k_clust, clusters)\n explainer_kp = shap.TreeExplainer(clf_kp)\n shap_values_kp = explainer_kp.shap_values(k_clust)\n shap.summary_plot(shap_values_kp, k_clust, plot_type=\"bar\", plot_size=(15, 10))\n\n ## ADD CLUSTERS TO ORIGINAL DATAFRAME AND INVERSE LABEL ENCODE RESTAURANT NAMES\n k_clust['Cluster'] = clusters\n k_clust['Name'] = labelEncoder.inverse_transform(k_clust['Name'])\n\n ## FILTER RESTAURNAT CLUSTER OF CHOICE\n clusterVal = clusters[list(k_clust['Name']).index(resName)]\n k_clust = k_clust[k_clust['Cluster'] == clusterVal]\n k_clust = k_clust.reset_index(drop = True)\n k_clust = k_clust[['Name', 'ZipCode', 'Dollars', 'Photos']]\n\n print('**** CLUSTERING COMPLETED AND SAVING CLUSTER DATAFRAME LOCALLY ****\\n')\n resFileName = resName.replace(' ', '_')\n fileName = f'{resFileName.upper()}_CLUSTER_DATA.csv'\n k_clust.to_csv(fileName)\n\n return k_clust",
"def cross_validate(featureFile, nFolds, verbosity = False, percentTData = 1., extype='attribution'):\n oData,aData = importC5(featureFile)\n nAuthors = len(set(aData))\n if extype == 'attribution' and np.mean(Counter(aData).values()) != Counter(aData).values()[0]:\n print('Number of docs per author should be equal in attribution experiment')\n docsPerFold = len(oData) / nFolds\n cMatrix = np.zeros( (nAuthors, nAuthors) )\n\n for N in range(0,nFolds):\n testAuthors = list()\n trainAuthors= list()\n testData = list()\n trainData = list()\n for idv in range(0,len(oData)):\n if (N+idv) % nFolds == 0:\n testData.append(oData[idv])\n testAuthors.append(aData[idv])\n else:\n trainData.append(oData[idv])\n trainAuthors.append(aData[idv]) \n teFile = '%s.cvtest' % (os.path.splitext(featureFile)[0])\n trFile = '%s.cvtrain' % (os.path.splitext(featureFile)[0])\n tAmount = int(round(len(trainAuthors) * percentTData)) # limit training data\n exportFoldFile(testData, testAuthors, teFile)\n exportFoldFile(trainData[0:tAmount], trainAuthors[0:tAmount], trFile)\n predict = classify(trFile, teFile, len(oData[0]))\n if extype != 'attribution':\n cMatrix += confusionMatrix(testAuthors, predict, extype)\n os.remove(teFile)\n os.remove(trFile)\n if percentTData != 1.0: print('Ran CV only with %.f %% (%d docs) of training data.' % (percentTData * 100, tAmount))\n return cMatrix",
"def distribution(self, env):\n pass",
"def part1c_1():\n xs = exampleInput\n T = [ Counter({('-BEGIN-', '-FEAT-'): 0.561, ('-BEGIN-', '-SIZE-'): 0.439}),\n Counter({('-FEAT-', '-SIZE-'): 0.463, ('-SIZE-', '-SIZE-'): 0.343, \n ('-SIZE-', '-FEAT-'): 0.096, ('-FEAT-', '-FEAT-'): 0.096}),\n Counter({('-SIZE-', '-SIZE-'): 0.590, ('-SIZE-', '-FEAT-'): 0.217,\n ('-FEAT-', '-SIZE-'): 0.151, ('-FEAT-', '-FEAT-'): 0.041})\n ]\n T_ = submission.computeEdgeMarginals(simpleCRF, xs)\n for t, t_ in zip(T, T_):\n grader.requireIsTrue( Counters.approximateEquals(t, t_) )",
"def __init__(self):\n super().__init__()\n self.upperBoundUsed = False # True if the distribution is right truncated\n self.lowerBoundUsed = False # True if the distribution is left truncated\n self.hasInfiniteBound = False # True if the untruncated distribution has bounds of +- system max\n self.upperBound = None # Right bound\n self.lowerBound = None # Left bound\n self.__adjustmentType = '' # this describe how the re-normalization to preserve the probability should be done for truncated distributions\n self.dimensionality = None # Dimensionality of the distribution (1D or ND)\n self.distType = None # Distribution type (continuous or discrete)\n self.memory = False # This variable flags if the distribution has history dependence in the sampling process (True) or not (False)\n self.printTag = 'DISTRIBUTIONS'\n self.preferredPolynomials = None # best polynomial for probability-weighted norm of error\n self.preferredQuadrature = None # best quadrature for probability-weighted norm of error\n self.compatibleQuadrature = [] #list of compatible quadratures\n self.convertToDistrDict = {} #dict of methods keyed on quadrature types to convert points from quadrature measure and domain to distribution measure and domain\n self.convertToQuadDict = {} #dict of methods keyed on quadrature types to convert points from distribution measure and domain to quadrature measure and domain\n self.measureNormDict = {} #dict of methods keyed on quadrature types to provide scalar adjustment for measure transformation (from quad to distr)\n self.convertToDistrDict['CDFLegendre'] = self.CDFconvertToDistr\n self.convertToQuadDict ['CDFLegendre'] = self.CDFconvertToQuad\n self.measureNormDict ['CDFLegendre'] = self.CDFMeasureNorm\n self.convertToDistrDict['CDFClenshawCurtis'] = self.CDFconvertToDistr\n self.convertToQuadDict ['CDFClenshawCurtis'] = self.CDFconvertToQuad\n self.measureNormDict ['CDFClenshawCurtis'] = self.CDFMeasureNorm",
"def test_fit_distribution_arg(self):\n # Setup\n distribution = 'copulas.univariate.gaussian_kde.GaussianKDE'\n copula = GaussianMultivariate(distribution=distribution)\n\n # Run\n copula.fit(self.data)\n\n # Check\n assert copula.distribution == 'copulas.univariate.gaussian_kde.GaussianKDE'\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert get_qualified_name(copula.univariates[i].__class__) == copula.distribution\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()",
"def fairness_discrepancy(data, n_classes, norm=0):\n unique, freq = np.unique(data, return_counts=True)\n props = freq / len(data) #Proportion of data that belongs to that data\n \n #------------------Modification to correct the zero support problem------------------------------------------------\n temp=np.zeros(n_classes)\n temp[unique]=props\n props=temp\n #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n l2_fair_d = np.sqrt(((props - truth)**2).sum())/n_classes\n l1_fair_d = abs(props - truth).sum()/n_classes\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes)/n_classes \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n #Create array to populate proportions\n # props2=np.zeros(n_classes)\n # props2[unique]=props\n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n for i in props:\n f.write(\"%f \"%(i))\n f.write(\"\\n\")\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity,wd/metric_max(n_classes,\"wd\"),wds/metric_max(n_classes,\"wds\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity",
"def eg4(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg4_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n noise_feature = np.random.randn(n, p_noise)\n stable_feature_dependent = np.zeros([n, p_stable])\n stable_feature_independent = np.random.randn(n, p_stable)\n for i in range(p_stable):\n stable_feature_dependent[:, i] = noise_feature[:, i % p_noise] + noise_feature[:,\n (i + 1) % p_noise] + 2 * np.random.randn(\n n) # still need noise\n stable_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n stable_depend_label = np.concatenate([stable_depend_label] * p_stable, axis=1)\n stable_feature = np.where(stable_depend_label < depend_ratio, stable_feature_dependent,\n stable_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n Y = np.matmul(stable_feature, b) + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg4'\n return data\n\n data_train = eg4_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg4_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def cv_training(\n db: audformat.Database,\n partitioning: str,\n features: pd.DataFrame,\n normalization: str,\n root: str\n):\n\n df = db['covid'].df\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n folds = sorted(list(set([x.split('.')[-2] for x in db.tables if f'folds.{partitioning}' in x])))\n\n metrics = {\n 'F1': audmetric.unweighted_average_fscore,\n 'UAR': audmetric.unweighted_average_recall,\n 'ACC': audmetric.accuracy\n }\n\n if not os.path.exists(os.path.join(root, 'results.csv')):\n for fold in folds:\n\n def get_fold(db, fold_name):\n df = db[f'folds.{partitioning}.{fold}.{fold_name}'].df\n df['speaker'] = db['files'].get(index=df.index)['speaker']\n df = df.loc[~df['covid'].isna()]\n df['covid'] = df['covid'].apply(lambda x: 'positive' if x else 'negative')\n return df\n df_train = get_fold(db, 'train')\n df_dev = get_fold(db, 'dev')\n df_test = get_fold(db, 'test')\n\n features = features.fillna(0)\n\n c_params = [\n .0001, \n .0005, \n .001, \n .005, \n .01, \n .05, \n .1, \n .5, \n 1\n ]\n\n steps = []\n if normalization == 'standard':\n # normalization performed on the fly for each fold\n steps.append(('scale', StandardScaler()))\n steps.append(('classify', SVC(kernel='rbf', probability=True)))\n\n max_f1 = 0\n best_c = None\n for c_param in audeer.progress_bar(\n c_params,\n total=len(c_params),\n desc='LOSO',\n disable=True\n ):\n \n clf = Pipeline(steps)\n clf.set_params(**{'classify__C': c_param})\n clf.fit(\n features.loc[df_train.index],\n df_train['covid'],\n )\n pred = clf.predict(features.loc[df_dev.index])\n f1_score = audmetric.unweighted_average_fscore(df_dev['covid'], pred)\n if f1_score > max_f1:\n max_f1 = f1_score\n best_c = c_param\n \n clf.set_params(**{'classify__C': best_c})\n clf.fit(\n features.loc[pd.concat((df_train, df_dev)).index],\n pd.concat((df_train, df_dev))['covid'],\n )\n joblib.dump(\n clf,\n os.path.join(root, f'clf.{fold}.pkl')\n )\n df.loc[df_test.index, 'predictions'] = clf.predict(features.loc[df_test.index])\n df.loc[df_test.index, 'probabilities'] = clf.predict_proba(features.loc[df_test.index])[:, 0]\n \n df.reset_index(inplace=True)\n df.to_csv(os.path.join(root, 'results.csv'), index=False)\n else:\n df = pd.read_csv(os.path.join(root, 'results.csv'))\n\n results = {\n key: metrics[key](df['covid'], df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'results.yaml'), 'w') as fp:\n yaml.dump(results, fp)\n\n file_df = df.groupby('file').apply(\n lambda x: pd.Series({\n 'covid': x['covid'].mode()[0],\n 'predictions': x['predictions'].mode()[0]\n })\n )\n\n results = {\n key: metrics[key](file_df['covid'], file_df['predictions'])\n for key in metrics\n }\n with open(os.path.join(root, 'speaker_results.yaml'), 'w') as fp:\n yaml.dump(results, fp)",
"def sync_fit(df_train, df_test, estimator, n_folds=2, stratify=True, random_state=1):\n # We will compute the indices of the CV in each thread\n de = DriftEstimator(estimator, n_folds, stratify, random_state)\n de.fit(df_train, df_test)\n\n return de.score()",
"def test_build_feature_base(self):\n data = pd.DataFrame(pd.read_csv(\"tests/in_data/pro1_sub.csv\"))\n\n X = data.ix[:,1]\n Y = data.ix[:,0]\n model_sample = Model([],\"presence\")\n\n feature_base = model_sample.build_feature_base(X,Y)\n feature_evaluation =\n assert_equal(len(feature_base) > 10, True)",
"def getBeliefDistribution(self):\n pass",
"def update_entropy(self, save=True):\n\n #min_consensus = self.mturk_assignment.hit.hit_type \\\n #.experiment_settings.min_output_consensus\n min_consensus = 3\n\n # update substance label and entropy\n self.substance = None\n substances = self.substances.filter(invalid=False) \\\n .values_list('substance_id', flat=True)\n if substances:\n self.substance_entropy = compute_entropy(substances)\n hist = Counter(substances).most_common(2)\n substance_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.substance_id = substance_id\n self.quality_method = 'M'\n\n # update name label and entropy\n self.name = None\n names = self.names.filter(invalid=False) \\\n .values_list('name_id', flat=True)\n if names.exists():\n self.name_entropy = compute_entropy(names)\n hist = Counter(names).most_common(2)\n name_id, count = hist[0]\n # must be at least the consensus, and larger than the 2nd choice\n if count >= min_consensus and (len(hist) == 1 or hist[1][1] < count):\n self.name_id = name_id\n self.quality_method = 'M'\n\n # update rectified normal\n self.rectified_normal = None\n if self.planar:\n for n in self.rectified_normals.all():\n if n.better_than(self.rectified_normal):\n self.rectified_normal = n\n if self.rectified_normal and not self.rectified_normal.correct:\n self.rectified_normal = None\n\n # update bsdf\n self.bsdf_wd = None\n for b in self.bsdfs_wd.all():\n if b.gloss_correct and b.color_correct and b.better_than(self.bsdf_wd):\n self.bsdf_wd = b\n\n if save:\n self.save()",
"def test_forest_dml(self):\n\n Y, T, X, _ = ihdp_surface_B()\n est = AutomatedForestDML(model_y=automl_model_reg(),\n model_t=GradientBoostingClassifier(),\n discrete_treatment=True,\n n_estimators=1000,\n subsample_fr=.8,\n min_samples_leaf=10,\n min_impurity_decrease=0.001,\n verbose=0, min_weight_fraction_leaf=.01)\n est.fit(Y, T, X=X)\n _ = est.effect(X)",
"def fn(conn, libraries, params, predecessors):\n pd = libraries[\"pandas\"]\n\n preferred_distributions = params.get('preferred_distributions', False)\n\n # iterate through component list\n for pred in predecessors:\n if 'dist' in pred:\n weibull_table_name = pred\n elif 'domain' in pred:\n dc_table_name = pred\n else:\n rc_table_name = pred\n\n # load data from distributions and checks\n df_dist = pd.read_sql(sql=\"\"\"SELECT dd.*, rc.Range_Check, dc.Domain_Check FROM {} dd JOIN {} rc ON rc.distribution_id = dd.distribution_id\n JOIN {} dc ON dc.distribution_id = dd.distribution_id\"\"\".format(weibull_table_name, rc_table_name, dc_table_name), \n con=conn)\n\n # add preferred column, initially all false\n df_dist['Preferred'] = 0\n \n # save given preferred distributions if supplied\n if preferred_distributions: # empty list is false as well\n\n df_dist.loc[df_dist.distribution_id.isin(preferred_distributions), 'Preferred'] = 1\n\n return df_dist\n\n ## otherwise perform automated classification\n # save original df for saving at the end\n df_dist_to_return = df_dist.copy()\n\n ## first replace weibulls with exponentials if beta test doesn't pass\n # can do this for all wucs at once\n removed_distributions_weibull_test = 0\n for ips in list(df_dist.interval_parameter_set_id.unique()):\n df_dist_one_ips = df_dist.loc[(df_dist.interval_parameter_set_id == ips), :].copy()\n if df_dist_one_ips.loc[(df_dist_one_ips.dist_name == 'weibull'), 'beta_eq_one_pval'].iloc[0] > 0.05:\n removed_distributions_weibull_test += 1\n df_dist.drop(df_dist_one_ips.loc[(df_dist_one_ips.dist_name == 'weibull'), :].index[0], axis=0, inplace=True)\n else:\n df_dist.drop(df_dist_one_ips.loc[(df_dist_one_ips.dist_name == 'exponential'), :].index[0], axis=0, inplace=True)\n print('removed {} weibull distributions for failing the beta=1 check (will use exponential)'.format(removed_distributions_weibull_test))\n\n # add columns to df to help SE comparisons\n # using 2 as approximation for 2-sided 95% confidence intervals\n # (assuming normality of estimate, which is iffy)\n df_dist['eta_se_upper_ci'] = df_dist.apply(lambda row: row.eta + 2*row.eta_se, axis=1)\n df_dist['eta_se_lower_ci'] = df_dist.apply(lambda row: row.eta - 2*row.eta_se, axis=1)\n\n def exclude_based_on_time_frame(df):\n # returns indices to exclude based on ruled out by time frame / time period\n\n # use 5 years if 5 years is different than both 10 years and all years\n # use 10 years if above check doesn't pass and if 10 years is different than all years\n\n def compare_10_and_s04(df_s04, df_10yr):\n\n if df_s04.eta == df_10yr.eta:\n diff_10_s04 = False\n elif df_s04.eta > df_10yr.eta: \n if df_s04.eta_se_lower_ci < df_10yr.eta_se_upper_ci:\n diff_10_s04 = False\n else:\n diff_10_s04 = True\n elif df_s04.eta < df_10yr.eta:\n if df_s04.eta_se_upper_ci > df_10yr.eta_se_lower_ci:\n diff_10_s04 = False\n else:\n diff_10_s04 = True\n \n if diff_10_s04:\n use = 'Removed_Last_10_Years'\n else: \n use = 'Since 04'\n return use\n\n # if a WUC doesn't have any removals in the last 5 or 10 years there won't be a Weibull\n # we have to catch these instances and handle them separately\n\n # retrieve one-row dfs as series\n df_s04 = df.loc[df.Time_Frame == 'Since 04', :].iloc[0]\n if len(df.loc[df.Time_Frame == 'Removed_Last_10_Years', :]) > 0:\n df_10yr = df.loc[df.Time_Frame == 'Removed_Last_10_Years', :].iloc[0]\n else:\n use = 'Since 04'\n print('WUC {} using {} because no other time frames'.format(df.Work_Unit_Code.iloc[0], use))\n assert df[df.Time_Frame != use].empty\n return df[df.Time_Frame != use].index\n if len(df.loc[df.Time_Frame == 'Removed_Last_5_Years', :]) > 0:\n df_5yr = df.loc[df.Time_Frame == 'Removed_Last_5_Years', :].iloc[0]\n else:\n use = compare_10_and_s04(df_s04, df_10yr)\n print('WUC {} using {}'.format(df.Work_Unit_Code.iloc[0], use))\n # return indices to exclude\n return df[df.Time_Frame != use].index\n\n\n if df_10yr.eta > df_5yr.eta: # e.g 2000 and 1500\n if df_10yr.eta_se_lower_ci < df_5yr.eta_se_upper_ci: # e.g. 1800 & 1700\n diff_5_10 = False\n else:\n diff_5_10 = True\n elif df_10yr.eta < df_5yr.eta:\n if df_10yr.eta_se_upper_ci > df_5yr.eta_se_lower_ci:\n diff_5_10 = False\n else:\n diff_5_10 = True\n else:\n diff_5_10 = False\n if diff_5_10:\n # compare 5 and s04\n if df_s04.eta > df_5yr.eta: \n if df_s04.eta_se_lower_ci < df_5yr.eta_se_upper_ci:\n diff_5_s04 = False\n else:\n diff_5_s04 = True\n elif df_s04.eta < df_5yr.eta:\n if df_s04.eta_se_upper_ci > df_5yr.eta_se_lower_ci:\n diff_5_s04 = False\n else:\n diff_5_s04 = True\n else:\n diff_5_s04 = False\n\n if diff_5_10 and diff_5_s04:\n use = 'Removed_Last_5_Years'\n else:\n # disqualify 5\n # compare 10 and s04\n use = compare_10_and_s04(df_s04, df_10yr)\n \n \n print('WUC {} using {}'.format(df.Work_Unit_Code.iloc[0], use))\n \n # return indices to exclude\n return df[df.Time_Frame != use].index\n\n def exclude_based_on_mds(df):\n # if J and H are different, use both. \n # otherwise, use MDS \n # (assume checks were made already to make sure there are both MDS)\n \n # returns indices to exclude based on ruled out by MDS\n\n # retrieve one-row dfs as series\n df_mds = df.loc[df.MDS != 'Any MDS']\n df_a = df_mds.iloc[0, :]\n df_b = df_mds.iloc[1, :]\n\n if df_b.eta > df_a.eta: # e.g 2000 and 1500\n if df_b.eta_se_lower_ci < df_a.eta_se_upper_ci: # e.g. 1800 & 1700\n diff_a_b = False\n else:\n diff_a_b = True\n elif df_b.eta < df_a.eta:\n if df_b.eta_se_upper_ci > df_a.eta_se_lower_ci:\n diff_a_b = False\n else:\n diff_a_b = True\n else:\n diff_a_b = False\n \n if diff_a_b:\n print('WUC {} splitting by MDS'.format(df.Work_Unit_Code.iloc[0]))\n # return indices to exclude\n return df[df.MDS == 'Any MDS'].index\n else: \n print('WUC {} not splitting by MDS'.format(df.Work_Unit_Code.iloc[0]))\n # return indices to exclude\n return df[df.MDS != 'Any MDS'].index\n\n # now loop through distributions and check Time Range and MDS\n for w in list(df_dist.Work_Unit_Code.unique()):\n \n df_single_wuc = df_dist.loc[df_dist.Work_Unit_Code == w, :].copy()\n\n # remove the unused rows from All MDS and any specific MDS\n indices_to_exclude = exclude_based_on_time_frame(df_single_wuc)\n df_single_wuc.drop(indices_to_exclude, axis=0, inplace=True)\n\n # now compare MDS, but only if there are distributions to compare \n # (more than 2 distributions, which would be one-speicific WUC + Any MDS)\n if df_single_wuc[df_single_wuc.MDS != 'Any MDS'].shape[0] > 1:\n # compare MDS\n indices_to_exclude = exclude_based_on_mds(df_single_wuc)\n df_single_wuc.drop(indices_to_exclude, axis=0, inplace=True)\n else:\n # drop the specific-MDS distribution\n print('WUC {} has single MDS - use Any MDS'.format(df_single_wuc.iloc[0].Work_Unit_Code))\n df_single_wuc.drop(df_single_wuc[df_single_wuc.MDS != 'Any MDS'].index, axis=0, inplace=True)\n\n # distributions that remain are preferred\n df_single_wuc.Preferred = 1\n df_dist_to_return.update(df_single_wuc)\n\n return df_dist_to_return",
"def randomForestClassifier(self, train_cols, test_cols, targets, feature_selction_var, min_abundance_threshold, shuffle=False):\n from sklearn.ensemble import RandomForestClassifier\n #from sklearn.ensemble import RandomForestRegressor\n \n #train = self.abundance_df.loc[:,train_cols] #train.as_matrix(cols)\n train = self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols] #train.as_matrix(cols)\n #test = self.abundance_df.loc[:,test_cols] #.as_matrix(test_cols)\n test = self.abundance_df[self.abundance_df['masked']==False].loc[:,test_cols] #.as_matrix(test_cols)\n #names = list(self.abundance_df.loc[:, 'species'])\n names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'])\n \n #most_common_species_set = set()\n #for col in train_cols:\n # sorted_series = self.abundance_df.loc[:, col].sort_values(ascending=False)[:100]\n # most_common_species_set |= set(list(sorted_series.index))\n #most_common_species_list = []\n #for id0 in most_common_species_set:\n # #print(max(self.abundance_df.loc[id0,train_cols]))\n # if max(self.abundance_df.loc[id0,train_cols]) >= min_abundance_threshold:\n # most_common_species_list.append(id0)\n ##print(len(most_common_species_list))\n #most_common_species_set = set(most_common_species_list)\n #train = train.loc[list(most_common_species_set),:]\n #test = test.loc[list(most_common_species_set),:]\n #names = list(self.abundance_df.loc[list(most_common_species_set),'species'])\n \n #feature selection by variance\n from sklearn.feature_selection import VarianceThreshold\n sel = VarianceThreshold(threshold=(0.999 * (1 - 0.999))) \n if feature_selction_var:\n #ds1 = np.transpose(ds10.as_matrix())\n #ds1 = sel.fit_transform(np.transpose(ds10.as_matrix()))\n #ds2 = np.transpose(ds20.as_matrix())\n #train = sel.fit_transform(np.transpose(train.as_matrix()))\n train = sel.fit_transform(np.transpose(train.values))\n \n #names = list(self.abundance_df.loc[:, 'species'].as_matrix()[sel.get_support()])\n #names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].as_matrix()[sel.get_support()])\n names = list(self.abundance_df[self.abundance_df['masked']==False].loc[:, 'species'].values[sel.get_support()])\n #test = sel.fit_transform(np.transpose(test.as_matrix()))\n test = sel.fit_transform(np.transpose(test.values))\n ds10 = np.asmatrix(train)[[i for i, j in enumerate(targets) if j == 0],:]\n ds1 = np.transpose(sel.fit_transform(np.transpose(ds10)))\n else:\n\n #train = np.transpose(train.as_matrix())\n train = np.transpose(train.values)\n #test = np.transpose(test.as_matrix())\n test = np.transpose(test.values)\n ds10 = train.iloc[:,[i for i, j in enumerate(targets) if j == 0]]\n #ds1 = np.transpose(ds10.as_matrix())\n ds1 = np.transpose(ds10.values)\n\n if shuffle == 'index':\n from random import shuffle\n shuffle(names)\n\n #rf = RandomForestClassifier(n_estimators=10)\n target = targets \n #group1 = list(self.abundance_df.loc[:,train_cols].columns[:target.count(0)])\n group1 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[:target.count(0)])\n #group2 = list(self.abundance_df.loc[:,train_cols].columns[target.count(0):])\n group2 = list(self.abundance_df[self.abundance_df['masked']==False].loc[:,train_cols].columns[target.count(0):])\n\n #rf = RandomForestRegressor(n_estimators=1000)#, class_weight=\"balanced\")\n rf = RandomForestClassifier(n_estimators=1000) # bootstrap=False\n #, max_features=100)#, min_sample_leaf=50)\n #rf = RandomForestRegressor(n_estimators=20, max_features=2)\n #class_weight=\"balanced\" #{class_label: weight}\n #n_estimators=1000,\n rf.fit(train, target)\n \n #from sklearn.metrics import roc_auc_score\n #for l in leaf:\n #model = RandomForestRegressor(min_samples_split=2, max_depth=None, bootstrap=False, min_samples_leaf=2)\n # #n_estimator=200, oob_score=True, min_samples_leaf=10,max_features=f, \n #model.fit(train,target)\n # #print(\"AUC - ROC : \")\n # #print(roc_auc_score(target,model.oob_prediction_))\n # #print(model.feature_importances_)\n \n #from sklearn.ensemble import ExtraTreesClassifier\n #model = ExtraTreesClassifier()\n #model.fit(train, target)\n \n from treeinterpreter import treeinterpreter as ti\n prediction, bias, contributions = ti.predict(rf, np.array(train))\n \n #for i in range(len(train)):\n # j = 0\n # # print(i)\n # #print(\"\\tBias (trainset mean)\")\n # #print(bias[i])\n # # print(contributions[0][0])\n # #for c, feature in sorted(zip(contributions[i], \n # # names), \n # # #self.abundance_df.index), \n # # key=lambda x: -abs(x[0])):\n # for c, feature in zip(contributions[i], list(self.abundance_df.index)):\n # if c[0] != 0:\n # #print feature, ':\\t', \"{:.2e}\".format(c), '\\t', self.abundance_df.loc[feature, 'species']\n # if j <10:\n # # print()'\\t' + self.abundance_df.loc[feature, 'species'], '\\t', \"{:.2e}\".format(c[0]))\n # j += 1\n totalc = np.mean(contributions, axis=0) \n \n #from sklearn import model_selection\n #from sklearn.model_selection import cross_val_score\n #clf = RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)\n #scores = cross_val_score(clf, X, y)\n \n ##compare 2 groups of samples\n prediction1, bias1, contributions1 = ti.predict(rf, np.array(ds1))\n\n mean_contri = [0 for i in xrange(len(names))]\n for s in xrange(len(ds1)):\n for i in xrange(len(names)):\n mean_contri[i] += contributions1[s][i][0]\n mean_contri = [x/len(ds1)for x in mean_contri]\n \n names_list = []\n #for c, org in sorted(zip(mean_contri, list(self.abundance_df.loc[:,'species'])), reverse=True):\n for c, org in sorted(zip(mean_contri, names), reverse=True):\n if c != 0:\n #print(self.abundance_df.loc[i,group1])\n #idx = self.abundance_df[self.abundance_df['species'] == org].index.tolist()[0]\n idx = self.abundance_df[self.abundance_df['masked']==False][self.abundance_df['species'] == org].index.tolist()[0]\n if shuffle:\n #print(names.index(org))\n #idx = list(self.abundance_df.index)[names.index(org)]\n idx = list(self.abundance_df[self.abundance_df['masked']==False].index)[names.index(org)]\n #maximum = max(self.abundance_df.loc[idx,group1 + group2])\n maximum = max(self.abundance_df[self.abundance_df['masked']==False].loc[idx,group1 + group2])\n #print(str(round(c, 3)) + '\\t' + org + '\\t' + str(round(maximum,3)))\n names_list.append([round(c, 3), org, round(maximum,3)])\n \n return names_list",
"def constraint_for(dist=None, param=None):\n\n constraints = {\n 'atol':\n tfb.Softplus(),\n 'rtol':\n tfb.Softplus(),\n 'concentration':\n tfb.Softplus(),\n 'GeneralizedPareto.concentration': # Permits +ve and -ve concentrations.\n lambda x: tf.math.tanh(x) * 0.24,\n 'concentration0':\n tfb.Softplus(),\n 'concentration1':\n tfb.Softplus(),\n 'df':\n tfb.Softplus(),\n 'InverseGaussian.loc':\n tfb.Softplus(),\n 'JohnsonSU.tailweight':\n tfb.Softplus(),\n 'PowerSpherical.mean_direction':\n lambda x: tf.math.l2_normalize(tf.math.sigmoid(x) + 1e-6, -1),\n 'ContinuousBernoulli.probs':\n tfb.Sigmoid(),\n 'Geometric.logits': # TODO(b/128410109): re-enable down to -50\n # Capping at 15. so that probability is less than 1, and entropy is\n # defined. b/147394924\n lambda x: tf.minimum(tf.maximum(x, -16.), 15.\n ), # works around the bug\n 'Geometric.probs':\n constrain_between_eps_and_one_minus_eps(),\n 'Binomial.probs':\n tfb.Sigmoid(),\n 'NegativeBinomial.probs':\n tfb.Sigmoid(),\n 'Bernoulli.probs':\n tfb.Sigmoid(),\n 'PlackettLuce.scores':\n tfb.Softplus(),\n 'ProbitBernoulli.probs':\n tfb.Sigmoid(),\n 'RelaxedBernoulli.probs':\n tfb.Sigmoid(),\n 'cutpoints': # Permit values that aren't too large\n lambda x: tfb.Ascending().forward(10. * tf.math.tanh(x)),\n 'log_rate':\n lambda x: tf.maximum(x, -16.),\n 'mixing_concentration':\n tfb.Softplus(),\n 'mixing_rate':\n tfb.Softplus(),\n 'rate':\n tfb.Softplus(),\n 'scale':\n tfb.Softplus(),\n 'scale_diag':\n tfb.Softplus(),\n 'scale_identity_multiplier':\n tfb.Softplus(),\n 'tailweight':\n tfb.Softplus(),\n 'temperature':\n tfb.Softplus(),\n 'total_count':\n lambda x: tf.floor(tfb.Sigmoid()(x / 100.) * 100.) + 1.,\n 'Bernoulli':\n lambda d: dict(d, dtype=tf.float32),\n 'CholeskyLKJ':\n fix_lkj,\n 'LKJ':\n fix_lkj,\n 'Zipf':\n lambda d: dict(d, dtype=tf.float32),\n 'GeneralizedNormal.power':\n tfb.Softplus(),\n }\n\n if param is not None:\n return constraints.get('{}.{}'.format(dist, param),\n constraints.get(param, tfb.Identity()))\n return constraints.get(dist, tfb.Identity())",
"def run_rfc():\n num_folds = 5\n with pd.HDFStore('./OT_clr_train_LGG_grade.h5') as store:\n X = store['expression'].values\n Y = store['labels'].values\n\n # standardize expression\n mu = np.mean(X,axis=0)\n std = np.std(X, axis=0)\n X = (X-mu)/std\n\n # define Predictor object to manage nested CV\n rf_predictor = Predictor(\n CVmodel(RandomForestClassifier_skl,[4,8,16,32,64,128], 'max_depth',\n n_estimators=100, n_jobs=-1),\n scorers.accuracy_scorer)\n # cross validate\n rf_cross_validation_scores = \\\n rf_predictor.cross_validate(X, Y,\n outer_folds=num_folds, inner_folds=num_folds)\n logger.info('Random Forest cross-validation = {0:.3f}'.format(\n np.mean(rf_cross_validation_scores)))",
"def _ei_halluc_acq(x):\n mu, sigma = gp.eval_with_hallucinated_observations(x, halluc_pts, uncert_form='std')\n Z = (mu - curr_best) / sigma\n return (mu - curr_best)*normal_distro.cdf(Z) + sigma*normal_distro.pdf(Z)"
]
| [
"0.55498654",
"0.52209383",
"0.51860523",
"0.5153627",
"0.5109204",
"0.51061743",
"0.504352",
"0.5028017",
"0.49969104",
"0.4995817",
"0.49739793",
"0.49615988",
"0.49461237",
"0.49342352",
"0.48906568",
"0.4873443",
"0.48696098",
"0.48662293",
"0.48627234",
"0.48428598",
"0.4820065",
"0.48192063",
"0.4815433",
"0.4814938",
"0.48066399",
"0.48053238",
"0.48002356",
"0.4791702",
"0.47910047",
"0.47700527"
]
| 0.5273454 | 1 |
Request File Path from user input import data to employees.csv | def emp_import():
while True:
try:
file_path = input("Enter the path of your file or enter 'quit' to go back to menu.\n File Path: ")
except FileNotFoundError:
print("File Not Found Error.")
continue
if file_path == "quit":
return
elif not os.path.exists(file_path) and not os.path.isfile(file_path):
print("Invalid Path.")
continue
elif file_path.lower().endswith(('.csv')) == False:
print("Please Choose a CSV File!")
continue
else:
print("File Found!")
break
new_lines = list()
lines = list()
with open(file_path, 'r') as f:
reader = csv.reader(f)
for row in reader:
new_lines.append(row)
with open("employees.csv", 'r') as readFile:
reader = csv.reader(readFile)
next(reader, None)
for row in reader:
lines.append(row)
new_list = new_lines + lines
to_add = set(tuple(row) for row in new_list)
with open('employees.csv', 'w', newline='') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(to_add)
print("Employees Added.")
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_input_file():\n\n filename = input('Input the file name to save data to: ') + '.csv'\n return filename",
"def file_from_user():\n try:\n file = get_input(\"Please enter name of the file to get data from. For exit press 0: \")\n if file == \"0\":\n sys.exit()\n LocationList.add_location(read_from_csv(file))\n except FileNotFoundError:\n print(\"\\nThis file wasn't found. Try again or press 0 to exit.\\n\")\n file_from_user()",
"def csv_path(name):\n return \"./data/%s\" % name",
"def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)",
"def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv",
"def path_delete_emp():\n\twhile True:\n\t\ttry:\n\t\t\tfile_path = input(\"Enter the path of your file or enter 'quit' to go back to menu.\\n File Path: \")\n\t\texcept FileNotFoundError:\n\t\t\tprint(\"File Not Found Error.\")\n\t\t\tcontinue\n\t\tif file_path == \"quit\":\n\t\t\treturn\n\t\telif not os.path.exists(file_path) and not os.path.isfile(file_path):\n\t\t\tprint(\"Invalid Path.\")\n\t\t\tcontinue\n\t\telif file_path.lower().endswith(('.csv')) == False:\n\t\t\tprint(\"Please Choose a CSV File!\")\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(\"File Found!\")\n\t\t\tbreak\t\t\n\tnew_lines = list()\n\tlines = list()\n\twith open(file_path, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tnew_lines.append(row)\n\twith open(\"employees.csv\", 'r') as readFile:\n\t\treader = csv.reader(readFile)\n\t\tnext(reader, None)\n\t\tfor row in reader:\n\t\t\tlines.append(row)\n\tnew_list = [x for x in lines if x not in new_lines]\n\twith open('employees.csv', 'w', newline='') as writeFile:\n\t\twriter = csv.writer(writeFile)\n\t\twriter.writerows(new_list)\n\t\tprint(\"Employees Deleted.\")\n\t\treturn",
"def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out",
"def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')",
"def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")",
"def get_fileName(path):\n fileName = input('Select data file from ' + ','.join(os.listdir(path)) + ' ')\n return fileName",
"def askopenfilename():\r\n file_opt = options = {}\r\n options['defaultextension'] = '.csv'\r\n options['filetypes'] = [('all files', '.*'), ('csv files', '.csv')]\r\n options['initialdir'] = os.getcwd()\r\n options['initialfile'] = 'profile.csv'\r\n options['title'] = 'choose file'\r\n\r\n # get filename\r\n filename = tkFileDialog.askopenfilename(**file_opt)\r\n\r\n # open file on your own\r\n return filename",
"def openData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n self.createTab(pandaData, name=filename)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")",
"def file_path():\n file_name = input(\"Enter the file name:\")\n return file_name",
"def browseforcsv(self, entry):\r\n filename = filedialog.askopenfilename(title='Select CSV')\r\n if filename != '': # Doesn't change if no file name entered\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, filename)",
"def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)",
"def path_to_data():\n jab = os.expanduser('~/jab')\n return os.path.join(jab, 'local/login_sums.csv')",
"def dwn_rel_sup_csv(request):\n i = int(request.GET.get('i'))\n \n return FileResponse(open('temp/relation_support_datasets/relation_support_dataset_{}_{}.csv'.format(i, request.user.username),'rb'))",
"def loadCSV(input_file):",
"def get_input_name():\n xlsTypes = [(\"Файлы Excel или csv\", \".xls .xlsx\")]\n return askopenfilenames(initialdir=os.path.abspath(os.getcwd()), filetypes=xlsTypes, title=\"Выберите файлы Excel или CSV\")",
"def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)",
"def open_csv_data_file(gui_specs,data_filename,overwrite_ok=None):\n \n #if the filename has .csv delete it\n if data_filename[-4:] == '.csv':\n data_filename = data_filename[:-4]\n \n #add the path to the file name\n data_filename = gui_specs['save_dir'] + data_filename + '.csv'\n \n #open the csv file\n data_file = open(data_filename, 'a')\n \n #write all the header information\n for key, value in gui_specs.iteritems():\n data_file.write('\"')\n data_file.write(key)\n data_file.write(',')\n data_file.write(str(value))\n data_file.write('\"')\n data_file.write('\\n')\n data_file.write('\\n')\n \n return data_file",
"def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)",
"def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict",
"def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)",
"def csvPathname(self, scenario, baseline=None, outputDir='.', type=RESULT_TYPE_SCENARIO):\n # Output files are stored in the output dir with same name as query file but with 'csv' extension.\n basename = os.path.basename(self.queryFile)\n mainPart, extension = os.path.splitext(basename)\n middle = scenario if type == RESULT_TYPE_SCENARIO else (\"%s-%s\" % (scenario, baseline))\n csvFile = \"%s-%s.csv\" % (mainPart, middle)\n csvPath = os.path.abspath(os.path.join(outputDir, csvFile))\n return csvPath",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def import_csv(self):\r\n path = tk.filedialog.askopenfile(initialdir=\"/\", title=\"Select File\",\r\n filetypes=((\"Comma-separated values (.csv)\", \"*.csv\"), (\"Text Document (.txt)\", \"*.txt\"),\r\n (\"All Files\", \"*.*\")))\r\n\r\n items = []\r\n if path is not None:\r\n for ticker in path:\r\n items.append(ticker)\r\n else:\r\n return\r\n\r\n tickers = items[0].split(',')\r\n for ticker in tickers:\r\n self.root.main.get_quote(ticker)",
"def file_name_request(self):\n self.file_name = input(\"What is the name of the input file?\\n>>>\")",
"def loadData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n while self.tabWidget.count() != 0:\n self.closeTab()\n self.createTab(pandaData)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")",
"def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path"
]
| [
"0.671666",
"0.6316371",
"0.62783736",
"0.6256375",
"0.6097976",
"0.6014507",
"0.6010938",
"0.5962907",
"0.5962493",
"0.5878151",
"0.58474374",
"0.58217865",
"0.581936",
"0.5810715",
"0.57662374",
"0.5760681",
"0.5747308",
"0.5692834",
"0.5593718",
"0.55768937",
"0.5554444",
"0.5552476",
"0.55484",
"0.54974765",
"0.5485931",
"0.5483863",
"0.54798055",
"0.5459263",
"0.5414647",
"0.5406021"
]
| 0.6647591 | 1 |
Request File Path from user input and Removes listed data from employees.csv | def path_delete_emp():
while True:
try:
file_path = input("Enter the path of your file or enter 'quit' to go back to menu.\n File Path: ")
except FileNotFoundError:
print("File Not Found Error.")
continue
if file_path == "quit":
return
elif not os.path.exists(file_path) and not os.path.isfile(file_path):
print("Invalid Path.")
continue
elif file_path.lower().endswith(('.csv')) == False:
print("Please Choose a CSV File!")
continue
else:
print("File Found!")
break
new_lines = list()
lines = list()
with open(file_path, 'r') as f:
reader = csv.reader(f)
for row in reader:
new_lines.append(row)
with open("employees.csv", 'r') as readFile:
reader = csv.reader(readFile)
next(reader, None)
for row in reader:
lines.append(row)
new_list = [x for x in lines if x not in new_lines]
with open('employees.csv', 'w', newline='') as writeFile:
writer = csv.writer(writeFile)
writer.writerows(new_list)
print("Employees Deleted.")
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def emp_import():\n\twhile True:\n\t\ttry:\n\t\t\tfile_path = input(\"Enter the path of your file or enter 'quit' to go back to menu.\\n File Path: \")\n\t\texcept FileNotFoundError:\n\t\t\tprint(\"File Not Found Error.\")\n\t\t\tcontinue\n\t\tif file_path == \"quit\":\n\t\t\treturn\n\t\telif not os.path.exists(file_path) and not os.path.isfile(file_path):\n\t\t\tprint(\"Invalid Path.\")\n\t\t\tcontinue\n\t\telif file_path.lower().endswith(('.csv')) == False:\n\t\t\tprint(\"Please Choose a CSV File!\")\n\t\t\tcontinue\n\t\telse:\n\t\t\tprint(\"File Found!\")\n\t\t\tbreak\n\tnew_lines = list()\n\tlines = list()\n\twith open(file_path, 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tnew_lines.append(row)\n\twith open(\"employees.csv\", 'r') as readFile:\n\t\treader = csv.reader(readFile)\n\t\tnext(reader, None)\n\t\tfor row in reader:\n\t\t\tlines.append(row)\n\tnew_list = new_lines + lines\n\tto_add = set(tuple(row) for row in new_list)\n\twith open('employees.csv', 'w', newline='') as writeFile:\n\t\twriter = csv.writer(writeFile)\n\t\twriter.writerows(to_add)\n\t\tprint(\"Employees Added.\")\n\t\treturn",
"def file_from_user():\n try:\n file = get_input(\"Please enter name of the file to get data from. For exit press 0: \")\n if file == \"0\":\n sys.exit()\n LocationList.add_location(read_from_csv(file))\n except FileNotFoundError:\n print(\"\\nThis file wasn't found. Try again or press 0 to exit.\\n\")\n file_from_user()",
"def remove_edit_serial_names(self, serial_name):\n # Open the edit .csv file\n # Open a temporary .csv file\n # Copy the edit .csv file to the temporary file excluding the row \n # with the serial name\n input_filename = \"HobowareEditSerialNames.csv\"\n temp_filename = \"temp.csv\"\n name = str(serial_name)\n with open(temp_filename, \"wb\") as temp_file: \n with open(input_filename, \"r\") as input_file:\n for input_row in csv.reader(input_file):\n if name not in input_row:\n csv.writer(temp_file).writerow(input_row)\n \n # Remove and replace the edit .csv file with the temporary .csv file \n os.remove(input_filename)\n os.rename(temp_filename, input_filename)",
"def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')",
"def borrar_CSV(self):\n archivo = input(\"Indique el nombre del archivo a eliminar: \")\n os.remove(f\"{archivo}.txt\")\n print(\n f\"\"\"\n ==========================================\n == SE HAN ELIMINADO LOS DATOS ==\n ==========================================\n \"\"\"\n )",
"def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict",
"def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)",
"def clean_file(csv_file):\n my_list = []\n with open(csv_file, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar=\" \")\n for row in file_reader:\n my_list.append(row)\n\n \"\"\"\n > Part Two\n Input: Nested list csv_table and a string file_name\n Action: Write fields in csv_table into a comma-separated CSV file with the name file_name\n Mutates output: Yes\n \"\"\"\n with open(csv_file, 'w', newline='') as csvfile:\n my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n for row in my_list:\n row2 = []\n for item in row:\n a = item.lstrip('\"')\n b = a.rstrip('\"')\n row2.append(b)\n my_csv_writer.writerow(row2)",
"def managecsv(data):\n\n checkfolderdata()\n if not datafileexist(data[7]):\n createcsv(data[7])\n managecsv(data)\n else:\n addcsv(data, data[7])",
"def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict",
"def remove_files(file, filename_excel):\n os.remove(file)\n print(\"Remove the data input sent by the user\")\n os.remove(velocity_input + \"/\" + filename_excel + \"_v_input.txt\")\n print(\"Remove the input file for velocity calculations\")\n os.remove(velocity_output + \"/\" + filename_excel + \"_vx.output\")\n print(\"Remove the vx output file\")\n os.remove(velocity_output + \"/\" + filename_excel + \"_vy.output\")\n print(\"Remove the vy output file\")\n os.remove(velocity_output + \"/\" + filename_excel + \"_vz.output\")\n print(\"Remove the vy output file\")\n os.remove(data_output + \"/\" + filename_excel + \"_results.xlsx\")\n print(\"Remove the data output sent to the user's email.\")",
"def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict",
"def del_rel_sup_csv(request):\n if request.method == \"GET\":\n _get_sup_relations(request.user)\n dataset_index = int(request.GET.get(\"i\"))\n os.remove(\"temp/relation_support_datasets/relation_support_dataset_{}_{}.csv\".format(dataset_index, request.user.username))\n if len(nk_stat) > dataset_index:\n for i in range(dataset_index+1, len(nk_stat)+1):\n os.rename(\"temp/relation_support_datasets/relation_support_dataset_{}_{}.csv\".format(i, request.user.username), \"temp/relation_support_datasets/relation_support_dataset_{}_{}.csv\".format(i-1, request.user.username))\n _get_sup_relations(request.user)\n return redirect('home')",
"def browseforcsv(self, entry):\r\n filename = filedialog.askopenfilename(title='Select CSV')\r\n if filename != '': # Doesn't change if no file name entered\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, filename)",
"def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")",
"def remove_delete_serial_names(self, serial_name):\n # Open the delete .csv file\n # Open a temporary file\n # In the input file, \n # If the serial name exists, \n # Delete the row with the serial name\n input_filename = \"DeleteSerial.csv\"\n temp_filename = \"temp.csv\"\n name = str(serial_name)\n with open(temp_filename, \"wb\") as temp_file: \n with open(input_filename, \"r\") as input_file:\n for input_row in csv.reader(input_file):\n if name not in input_row:\n csv.writer(temp_file).writerow(input_row)\n \n # Remove and replace the delete .csv file with the temporary file\n os.remove(input_filename)\n os.rename(temp_filename, input_filename)",
"def loadCSV(input_file):",
"def update_csv():\n return os.listdir('./data')",
"def get_input_file():\n\n filename = input('Input the file name to save data to: ') + '.csv'\n return filename",
"def rent_book(main_page):\n\n print(\"Which book do you wish to rent? Enter its code\")\n book_code = input('> ')\n\n check_code_and_rent(main_page, book_code)\n\n os.remove('rented.csv')\n os.rename('rented_temp.csv','rented.csv')",
"def delete_xls(inp_dict):\n if inp_dict[\"del\"]:\n for del_f in inp_dict[\"del\"]:\n os.remove(os.path.abspath(del_f))",
"def onLoadCSVList(self, evt):\n dlg = wx.FileDialog(self.view, \"Choose a file:\", wildcard = \"*.txt; *.csv\" ,\n style=wx.FD_DEFAULT_STYLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n print \"You chose %s\" % dlg.GetPath()\n self.config.CSVFilePath = dlg.GetPath()",
"def delete(self, namespace, fname, start=None, end=None):\n fname = f\"{self.user}_{namespace}.csv\"\n fname = str(Path(self.path, fname))\n\n search = _make_search(start, end, fname)\n\n result = []\n\n with open(fname, 'r+', newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n for row in csvreader:\n if row not in search:\n result.append(row)\n\n csvwriter = csv.writer(csvfile, delimiter=',')\n\n for row in result:\n csvwriter.writerow(row)\n\n csvfile.close()",
"def handleRemoveFile(self):\n for w in self.filesList.selectedItems():\n self.filesList.removeFile(w.text(2))\n self.metadataList.clear()\n self.metadataList.setRowCount(0)\n self.metadataList.setHorizontalHeaderLabels([\"Metadata Header\", \"Value\"])\n self.personalDataList.clear()",
"def collect(filename=None): \n if not filename:\n filename = \"regimes/\" + str(raw_input(\"What is the name of the file for the exercise regime you would like to run? \")) + \".csv\"\n read_only_file = open(filename, \"r\")\n file_contents = read_only_file.readlines()\n return file_contents",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def change_new_data_writer_from_file(login):\n\n with open('data.csv', 'r') as data_base_r:\n data_reader = csv.reader(data_base_r)\n # searching for a line with inputed login\n data_line = []\n\n with open(\"data.csv\", 'r') as login_search:\n login_reader = csv.reader(login_search)\n next(login_reader)\n\n for lines in login_reader:\n if lines[2] == login:\n data_line = lines\n return data_line",
"def get_fileName(path):\n fileName = input('Select data file from ' + ','.join(os.listdir(path)) + ' ')\n return fileName",
"def add_delete_serial_names(self, serial_name):\n # Open the delete .csv file\n # Create the reader\n # Check if the serial name exists\n # Remove it the serial name exists\n input_filename = \"HobowareDeleteSerial.csv\"\n delete_serial_name = False \n name = str(serial_name)\n with open(input_filename, \"r\") as input_file:\n for row in csv.reader(input_file):\n if name in row:\n delete_serial_name = True\n \n if delete_serial_name == True:\n self.remove_delete_serial_names(name)\n\n # Open the delete .csv file\n # Append the new delete name to the file\n with open(input_filename, \"a\") as input_file:\n row = (str(name) + \"\\n\")\n input_file.write(row)",
"def main(in_path, keep_path, out_path):\n\t# First open the input csv\n\tcsv_hndl = lambda x: np.array([np.array(r) for r in x])\n\tdata, headers = read_csv(in_path, csv_hndl, use_headers=True, delimiter=',')\n\n\t# Read headers to keep\n\tkeeps = []\n\n\t# Regex for ignoring comments\n\tcmnt_re = re.compile(\"^#\")\n\n\t# Open and read the file\n\twith open(keep_path) as f_obj:\n\t\tfor line in f_obj:\n\t\t\tline = line.strip()\n\t\t\t# If line is commented out, ignore\n\t\t\tif cmnt_re.match(line):\n\t\t\t\tcontinue\n\t\t\t# Otherwise add to list of keeps\n\t\t\tkeeps.append(line)\n\n\t# Prune the csv\n\tnew_data, new_headers = prune_csv(data,headers,keeps)\n\n\t# Write to output csv file\n\twrite_csv(\n\t\tout_path, \n\t\tnew_data, \n\t\tnew_headers, \n\t\tdelimiter=',', \n\t\tquotechar='\"',\n\t\tquoting=csv.QUOTE_MINIMAL\n\t)"
]
| [
"0.66964155",
"0.597888",
"0.5884179",
"0.58648145",
"0.57086045",
"0.57040673",
"0.55802935",
"0.5552325",
"0.55478776",
"0.54975176",
"0.5481775",
"0.54157984",
"0.5387137",
"0.5362531",
"0.5275203",
"0.52694577",
"0.5268417",
"0.5261648",
"0.52415246",
"0.5194671",
"0.51680094",
"0.5165775",
"0.5160103",
"0.51411337",
"0.50949985",
"0.5082259",
"0.5061466",
"0.50247586",
"0.5022066",
"0.5017113"
]
| 0.77248025 | 0 |
Separate out positive and negative attributes in the dataframe with attributes. Outputs two separated dataframes | def separate_pos_neg(attribution):
attribution_pos_val = attribution*(attribution >= 0)
attribution_neg_val = attribution*~(attribution >= 0)
return attribution_pos_val, attribution_neg_val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_negative_data(self):\n negative_df = pd.DataFrame(columns=HeatStrokeDataFiller.important_features, index=np.arange(self.num_negative))\n for field in negative_df.columns:\n parameter_distribution = HeatStrokeDataFiller.negative_default[field]\n negative_df[field].loc[:] = parameter_distribution(self.num_negative)\n return negative_df",
"def convert_attribute(df):\n\n df['Popularity'] = ['Oversold' if x == '(1.0, 2.0]' else 'Unpopular' if x == '(-1.0, 0.0]' or x == '(0.0, 0.5]' else 'Popular' for x in df['percent_of_cap_binRange1']]\n\n return df",
"def make_and_append_negative_data(self):\n negative_df = self.get_negative_data()\n self.df = pd.concat((self.df, negative_df))",
"def speaker_negative_normalization(features: pd.DataFrame, df: pd.DataFrame):\n data = features.values.copy()\n for speaker in df['speaker'].unique():\n indices = df['speaker'] == speaker\n negative_indices = (df['speaker'] == speaker) & (df['covid'] == False)\n print(negative_indices)\n scaler = StandardScaler()\n scaler.fit(data[negative_indices, :])\n data[indices, :] = scaler.transform(data[indices, :])\n return pd.DataFrame(\n data=data,\n index=features.index,\n columns=features.columns\n )",
"def get_negatives(self):\n negative_values = (self.df[self.col_name]<0).sum()\n return negative_values",
"def get_X_y(df):\n X = df.loc[:, df.columns != 'Conditions']\n X = X.loc[:, X.columns != \"Unnamed: 0\"]\n y = df.loc[:, df.columns == 'Conditions']\n return X, y",
"def inverse_transform(self, df):\n return df",
"def split_on_sensitive_attribute(X, y, S):\n\n\tidx_pos = np.where(X[S] == 1)[0]\n\tX_pos, y_pos = X.iloc[idx_pos], y[idx_pos]\n\tX_neg, y_neg = X.drop(X.index[idx_pos]), np.delete(y, idx_pos)\n\n\treturn(X_pos, y_pos, X_neg, y_neg)",
"def prepare_data(df):\n X = df.drop(\"y\",axis=1)\n\n y=df[\"y\"]\n\n return X, y",
"def only_positive_values(df):\n\n\n only_positive_cols_bool = (df <= 0).any()\n only_positive_cols = only_positive_cols_bool[~only_positive_cols_bool].index\n positive_df = df[only_positive_cols]\n\n return positive_df",
"def df_semi_minus(df1, df2, left, right=None):\n if right is None:\n right = left\n\n df2 = df2[right].copy()\n df2['_flag_'] = 1\n joined = pd.merge(df1, df2, left_on=left, right_on=right, how='left', suffixes=('', '_y'))\n joined = joined[joined['_flag_'].isna()]\n return joined.drop([col for col in joined.columns if col.endswith('_y')] + ['_flag_'], axis=1)",
"def ohe_inverse(df_shap_values):\n\n # Auxiliary list to recreate original shap_values dataframe\n list_shap_original = []\n\n # Regular expression to pick attributes names.\n # Since in our case attributes names are the genomic positions (i.e. an integer number), we use the regex below\n import re\n pattern = \"^\\d+\"\n\n # Auxiliary dictionary to create one pd.DataFrame for each sample, summing the shap values for each attribute.\n # Later, these dataframes will be appended together, resulting in the final df.\n dic={}\n\n # for each sample.\n for i, sample in df_shap_values.iterrows():\n # initialize an empty dictionary, that will contain \"attribute : summed shap values\" for\n # all attributes in this sample.\n dic = {}\n # The code below sums the importances for each category in each attribute in this sample.\n for pos in sample.index:\n attr = re.match(pattern, pos).group()\n if attr not in dic.keys():\n dic[attr] = sample[pos]\n else:\n dic[attr] += sample[pos]\n # Create a df containing only the current sample\n df_sample = pd.DataFrame(dic, index=[i])\n # Append it to a list that will become the full dataframe later\n list_shap_original.append(df_sample)\n\n # Create a DataFrame containing the shap values for the \"original\" attributes.\n shap_original = pd.concat(list_shap_original, axis=0)\n return shap_original",
"def scale_and_separate(df, labels=True):\n df.columns = df.columns.str.lstrip()\n y = None\n if labels:\n y = df['ATT_FLAG'] # separate the target values\n df.drop(['ATT_FLAG'], axis=1, inplace=True)\n\n # apply scaling\n scaled_df = pd.DataFrame(StandardScaler().fit_transform(df.values), index=df.index, columns=df.columns)\n return scaled_df, y",
"def create_regressor_attributes(df, attribute, list_of_prev_t_instants) :\n \n list_of_prev_t_instants.sort()\n start = list_of_prev_t_instants[-1] \n end = len(df)\n df['datetime'] = df.index\n df.reset_index(drop=True)\n\n df_copy = df[start:end]\n df_copy.reset_index(inplace=True, drop=True)\n\n for attribute in attribute :\n foobar = pd.DataFrame()\n\n for prev_t in list_of_prev_t_instants :\n new_col = pd.DataFrame(df[attribute].iloc[(start - prev_t) : (end - prev_t)])\n new_col.reset_index(drop=True, inplace=True)\n new_col.rename(columns={attribute : '{}_(t-{})'.format(attribute, prev_t)}, inplace=True)\n foobar = pd.concat([foobar, new_col], sort=False, axis=1)\n\n df_copy = pd.concat([df_copy, foobar], sort=False, axis=1)\n \n df_copy.set_index(['datetime'], drop=True, inplace=True)\n return df_copy",
"def data_column_conversion(data:pandas.core.frame.DataFrame) -> pandas.core.frame.DataFrame:\n data = data.assign(W = (data.label == 'W') + 0,D = (data.label == 'D') + 0,L = (data.label == 'L') + 0)\n data = data.drop(\"label\",axis=1)\n return data",
"def transform(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[['premise', 'hypothesis', 'label']] # type: ignore\n return data",
"def modify_df(dataframe: pd.DataFrame) -> list[pd.DataFrame]:\n\n # Extract the elements and number of them from formula\n def grab_elements_and_number_of(formula):\n element_tup = tuple(np.sort(re.findall(r\"([A-Z][a-z]*)\", formula)))\n return element_tup, len(element_tup)\n\n dataframe[\"element_tup\"], dataframe[\"number_elements\"] = zip(\n *dataframe.formula.apply(grab_elements_and_number_of)\n )\n\n # Filter the df to only include unary, binary materials\n dataframe = dataframe[\n (dataframe[\"number_elements\"] == 1) | (dataframe[\"number_elements\"] == 2)\n ]\n\n # Create a df of minimum values\n min_E_df = dataframe.groupby(by=[\"element_tup\"]).agg({\"energy\": \"min\"})\n min_E_df = min_E_df.reset_index()\n\n return [dataframe, min_E_df]",
"def preprocess_feature(df):",
"def pre_get_data(df):\n\n df_len = len(df.iloc[0, :]) - 1\n\n select_cols = []\n\n for i in range(df_len): #Get Columns that contain number values\n\n if type(df.iloc[0, i + 1]) is np.float64:\n if math.isnan(df.iloc[0, i + 1]) == False:\n select_cols.append(i + 1)\n elif type(df.iloc[0, i + 1]) is np.float:\n if math.isnan(df.iloc[0, i + 1]) == False:\n select_cols.append(i + 1)\n\n\n res_df = df.iloc[:, select_cols]\n\n list_pop = list(res_df)\n list_res = ['B_F1_Bool_Result', 'Event_Date', 'B_WClass']\n list_pop.pop()\n\n for item in list_pop:\n if \"F1\" in item:\n aa = item\n bb = aa.replace(\"F1\", \"F2\")\n if bb in list_pop:\n cc = aa.replace(\"F1\", \"F12\")\n df[cc] = df[aa] - df[bb]\n list_res.append(cc)\n\n elif \"F2\" not in item:\n list_res.append(item)\n\n\n\n bw = df['B_WClass']\n i = -1\n j = df.columns.get_loc('B_WClass')\n\n for item in bw:\n i = i + 1\n if item != item:\n df.iloc[i, j] = np.nan\n else:\n df.iloc[i, j] = get_weight[item]\n\n df['B_WClass'] = df['B_WClass'].astype(float)\n res_df = df[list_res]\n\n return res_df",
"def fetchAndCleanDataframe(self):\n\n df = pd.read_csv('/Users/apple4u/Desktop/goksel tez/results_with_scenarios.csv')\n df.insider_label.fillna(0, inplace=True) # replaces null fields with 0\n df = df.drop(columns=['employee_name', 'scenario', 'role'])\n df = df.rename(columns={'insider_label':'label'})\n #df['label'] = df['insider_label'].astype('int64')\n #df.drop(columns='insider_label', inplace=True)\n df.set_index('user_id', inplace=True)\n X = df.iloc[:, :5].values #fetch all records first 5 columns\n y = df.label.values\n print(df.head())\n return X, y",
"def remove_negative(self, filter_table):\n new_table = []\n for record in filter_table:\n if record[1] < 0.0:\n record[1] = 0.0\n new_table.append(record)\n return new_table",
"def drop_attributes(df, cutoff=25, extra_add=[]):\n\n df_copy = df.copy()\n\n attributs_drop = []\n for var in sorted(df.columns):\n series = df[var]\n perc_missing = 100 - series.count() / len(series) * 100\n\n if perc_missing > cutoff:\n attributs_drop.append(var)\n else:\n continue\n\n if len(extra_add) == 0:\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n else:\n attributs_drop = attributs_drop + extra_add\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n return df_copy",
"def add_climatology_cols(df):\n return df",
"def de_normalize_data(self, df):\n if len(df) == 0:\n return df\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.permitted_range[feature_name][1]\n min_value = self.permitted_range[feature_name][0]\n result[feature_name] = (\n df[feature_name]*(max_value - min_value)) + min_value\n return result",
"def proprocessing(data):\n \n data = data.drop(['ATTR01', 'ATTR02', 'ATTR04', 'ATTR07'], axis = 1)\n X = data.copy()\n X = X.sort_values(by=['ID'])\n X = X.reset_index(drop=True)\n\n df1 = Imputer(X, 'ATTR03')\n df1 = df1.drop(['ID', 'EXECUTIONSTART', 'SCNAME'], axis = 1)\n\n df2 = Imputer(X, 'ATTR05')\n df2 = df2.ATTR05\n\n df = pd.concat([df1, df2], axis = 1)\n df['SCNAME'] = X.SCNAME\n\n for column in df.columns:\n if (df[column] != 0).sum() == 0:\n df = df.drop([column], axis=1)\n \n return df",
"def _unpack_sentiment_data(self):\n get_neg = lambda x: x.get('probability').get('neg')\n get_pos = lambda x: x.get('probability').get('pos')\n get_neutral = lambda x: x.get('probability').get('neutral')\n get_label = lambda x: x.get('label')\n self.dataframe['negative_sentiment'] = self.dataframe['sentiment'].map(get_neg)\n self.dataframe['positive_sentiment'] = self.dataframe['sentiment'].map(get_pos)\n self.dataframe['neutral_sentiment'] = self.dataframe['sentiment'].map(get_neutral)\n self.dataframe['sentiment_label'] = self.dataframe['sentiment'].map(get_label)",
"def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train",
"def prepareSentimentDataset(df,sentcol='sentiment',listcol='token',labelthreshold_pos = 1,labelthreshold_neg = -1,\\\n keep_neutral = False,train_ratio = 1):\n if not keep_neutral:\n data = df[np.logical_or(df.loc[:,sentcol]>=labelthreshold_pos,df.loc[:,sentcol]<=labelthreshold_neg)]\n else:\n data = df.copy()\n trainindex = len(data.index)*train_ratio\n if train_ratio<1:\n return (data.loc[data.index[:trainindex],listcol],data.loc[data.index[:trainindex],sentcol],\\\n data.loc[data.index[trainindex:],listcol],data.loc[data.index[trainindex:],sentcol])\n else:\n return (data.loc[:,listcol],data.loc[:,sentcol])",
"def construct_train_set(tweet_pos, tweet_neg):\n tweet_pos['pred'] = 1\n tweet_neg['pred'] = 0\n tweet_pos.columns = ['tweet', 'pred']\n tweet_neg.columns = ['tweet', 'pred']\n all_tweets = tweet_neg.append(tweet_pos)\n tweet_TR = all_tweets.reset_index().drop(['index'], axis = 1)\n return tweet_TR",
"def add_pos_features(df: pd.DataFrame, drop_scores=False) -> pd.DataFrame:\n # Distance between left and right points in pairs of limbs\n # relative to image size (Euclidean, horizontal and vertical)\n for point_type in ('elbow', 'wrist', 'knee', 'ankle'):\n d = np.apply_along_axis(\n distance, 1, df[[\n f'left_{point_type}_x', f'left_{point_type}_y',\n f'right_{point_type}_x', f'right_{point_type}_y'\n ]].values)\n df[f'{point_type}s_dist'], df[f'{point_type}s_hor_dist'], \\\n df[f'{point_type}s_vert_dist'] = d.transpose()\n\n # Distance between specific keypoint pairs\n for point_1, point_2 in [('wrist', 'ankle'), ('wrist', 'knee'),\n ('wrist', 'hip'), ('wrist', 'elbow'),\n ('wrist', 'shoulder'), ('wrist', 'ear'),\n ('ankle', 'hip'), ('ankle', 'ear'),\n ('elbow', 'knee'), ('knee', 'hip')]:\n for side_1 in ('left', 'right'):\n for side_2 in ('left', 'right'):\n d = np.apply_along_axis(\n distance, 1, df[[\n f'{side_1}_{point_1}_x', f'{side_1}_{point_1}_y',\n f'{side_2}_{point_2}_x', f'{side_2}_{point_2}_y'\n ]].values)\n df[f'{side_1}_{point_1}_{side_2}_{point_2}_dist'], \\\n df[f'{side_1}_{point_1}_{side_2}_{point_2}_hor_dist'], \\\n df[f'{side_1}_{point_1}_{side_2}_{point_2}_vert_dist'] = d.transpose()\n\n # Relative upper / lower positions of specific keypoints (binary values: 0/1)\n for point_1, point_2 in combinations(['ear', 'hip', 'knee', 'ankle', 'wrist', 'elbow'], 2):\n for side_1 in ('left', 'right'):\n for side_2 in ('left', 'right'):\n df[f'{side_1}_{point_1}_{side_2}_{point_2}'] = np.apply_along_axis(\n is_higher, 1, df[[\n f'{side_1}_{point_1}_y', f'{side_2}_{point_2}_y'\n ]].values)\n\n if drop_scores:\n columns = filter(lambda x: x.find('score') == -1, df.columns)\n df = df[columns]\n\n # print('Positional features added. DataFrame shape:', df.shape)\n\n return df"
]
| [
"0.6133841",
"0.5794669",
"0.57934856",
"0.567667",
"0.5615256",
"0.55364317",
"0.5506884",
"0.54435396",
"0.5381989",
"0.5325307",
"0.53219056",
"0.5312544",
"0.5293739",
"0.5288366",
"0.52574766",
"0.5235385",
"0.51943",
"0.5193476",
"0.51709604",
"0.51699626",
"0.51690656",
"0.514984",
"0.51433945",
"0.5122132",
"0.5121069",
"0.50939584",
"0.50937194",
"0.5076248",
"0.506783",
"0.50589883"
]
| 0.58671474 | 1 |
Make a url for download. This will call safe_url_string and then strip the fragment, if one exists. The path will be normalised. If the path is outside the document root, it will be changed to be within the document root. | def safe_download_url(url, encoding='utf8', path_encoding='utf8'):
safe_url = safe_url_string(url, encoding, path_encoding)
scheme, netloc, path, query, _ = urlsplit(safe_url)
if path:
path = _parent_dirs.sub('', posixpath.normpath(path))
if safe_url.endswith('/') and not path.endswith('/'):
path += '/'
else:
path = '/'
return urlunsplit((scheme, netloc, path, query, '')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convert_file_to_url(filename, no_file_check = False):\n if no_file_check: # We already a priori know that the path is\n # correct and in its final form.\n return filename\n relpath = os.path.relpath(filename, settings.SENDFILE_ROOT)\n\n url = [settings.SENDFILE_URL]\n\n while relpath:\n relpath, head = os.path.split(relpath)\n url.insert(1, head)\n\n return u'/'.join(url) # Note: xlates from os.path.sep to '/'",
"def make_safe_url(self, url):\n\n # Split the URL into scheme, netloc, path, query and fragment\n parts = list(urlsplit(url))\n\n # Clear scheme and netloc and rebuild URL\n parts[0] = '' # Empty scheme\n parts[1] = '' # Empty netloc (hostname:port)\n safe_url = urlunsplit(parts)\n return safe_url",
"def _get_file_url(path):\n return urlparse.urljoin(BASE_URL, path)",
"def fix_url_path(url: str) -> str:\n return url if url.endswith(\"/\") else url + \"/\"",
"def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result",
"def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))",
"def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)",
"def full_url(self, path):\n if path[0] == '/':\n path = path[1:]\n return urljoin(self.absolute_root, path)",
"def build_absolute_url(self, path_or_url):\n return urllib.parse.urljoin(self.parsed_url.geturl(), path_or_url)",
"def path_to_url(path):\r\n if os.sep == '/':\r\n return path\r\n else:\r\n return '/'.join(split_all(path))",
"def make_url_safe(url):\n if not urlparse(url).scheme:\n url = \"http://\" + url\n\n parsed_url = urlparse(url)\n\n safe_path = urls.url_fix(parsed_url.path)\n\n return parsed_url.scheme + '://' + parsed_url.netloc + safe_path",
"def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url",
"def _local_path_from_url(url):\n filename = '{}.epub'.format(hashlib.sha224(url).hexdigest())\n return os.path.join(books_settings.LOCAL_BOOKS_PATH, filename)",
"def build_url(base_url, path):\n if absolute_http_url_regexp.match(path):\n return path\n elif base_url:\n return \"{}/{}\".format(base_url.rstrip(\"/\"), path.lstrip(\"/\"))\n else:\n raise exceptions.ParamsError(\"base url missed!\")",
"def normalize(seed_url, link):\n link, _ = urldefrag(link) # remove hash to avoid duplicates\n return urljoin(seed_url, link)",
"def _process_resource(self, url):\n url_parts = urlparse.urlsplit(url)\n rel_path = url_parts.path[1:]\n fs_path = os.path.join(self.fileserver_path, rel_path)\n self.logger.info('Downloading {0} to {1}'.format(url, fs_path))\n self._execute_command('curl --create-dirs -Lo {0} {1}'\n .format(fs_path, url), retries=2)\n url = url.replace(url_parts.netloc, self.fs_base_url)\n url = url.replace(url_parts.scheme, 'http')\n return url",
"def normalize_filename(url):\n fname = url.replace('file://', '')\n if os.sep != '/' and not os.path.exists(fname):\n fname = fname.lstrip('/')\n return fname",
"def url_to_filename(base_url: str):\n\n # Cuts fluff like 'http://' ~and 'www'~\n base_url = prune_url(base_url, cut_chars=[\"https://\", \"http://\", \"www.\"])\n\n # Covers both Windows and Unix\n forbidden_ascii = [\"/\", \"\\\\\", \"\\|\", \":\", \"?\", \"'\", '\"', \"?\", \"*\", \">\", \"<\"]\n\n for ascii_char in forbidden_ascii:\n base_url = base_url.replace(ascii_char, \"-\")\n\n # Replaces '.' with '_' just in case there are file extension issues\n filename_url = base_url.replace(\".\", \"_\")\n\n return filename_url",
"def get_url_straight_filename(url, strip=[], allowdir=False):\n path = urlunquote(urlsplit(url).path)\n path_parts = path.split('/')\n\n if allowdir:\n # strip empty ones\n while len(path_parts) > 1 and not path_parts[-1]:\n path_parts = path_parts[:-1]\n\n if strip:\n while path_parts and path_parts[-1] in strip:\n path_parts = path_parts[:-1]\n\n if path_parts:\n return path_parts[-1]\n else:\n return None",
"def create_absolute_url(path: str) -> str:\n domain = settings.ALLOWED_HOSTS[0]\n return \"https://{domain}{path}\".format(domain=domain, path=path)",
"def _transform_gdrive_url(self):\n fileid = self.parsed.path.replace('/file/d/', '').split('/')[0]\n self.url = self.GDRIVE_LINK_TEMPLATE.format(fileid=fileid)",
"def sanitize_url(self, url):\r\n if not self.markdown.safeMode:\r\n # Return immediately bipassing parsing.\r\n return url\r\n \r\n try:\r\n scheme, netloc, path, params, query, fragment = url = urlparse(url)\r\n except ValueError:\r\n # Bad url - so bad it couldn't be parsed.\r\n return ''\r\n \r\n locless_schemes = ['', 'mailto', 'news']\r\n if netloc == '' and scheme not in locless_schemes:\r\n # This fails regardless of anything else. \r\n # Return immediately to save additional proccessing\r\n return ''\r\n\r\n for part in url[2:]:\r\n if \":\" in part:\r\n # Not a safe url\r\n return ''\r\n\r\n # Url passes all tests. Return url as-is.\r\n return urlunparse(url)",
"def getCompleteUrl(urlPath: str) -> str:\n return os.path.join(BASE_URL, urlPath) if urlPath else BASE_URL",
"def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri",
"def get_file_url(path, config):\n file_url_regex = re.compile(config['file_url_regex'])\n new_path = re.sub(file_url_regex, config['file_url_base'], path)\n return new_path",
"def download_relative_filename(url, output_base, verbose=False):\n \n p = urlparse(url)\n # remove the leading '/'\n assert p.path.startswith('/'); relative_filename = p.path[1:]\n destination_filename = os.path.join(output_base,relative_filename)\n download_url(url, destination_filename, verbose=verbose)",
"def normalize_cdmi_url(self, path):\n # Turn URL path into OS path for manipulation\n mypath = url2pathname(path)\n if not os.path.isabs(mypath):\n mypath = os.path.join(url2pathname(self.pwd()), mypath)\n # normalize path\n mypath = os.path.normpath(mypath)\n if path.endswith(\"/\") and not mypath.endswith(\"/\"):\n mypath += \"/\"\n url = self.cdmi_url + pathname2url(mypath)\n return url",
"def clean_url(url: str) -> str:\n r = urlparse(url)\n parts = list(r)\n # Add a / to the end of the path if it isn't there\n if not parts[2].endswith(\"/\"):\n parts[2] += \"/\"\n return urlunparse(parts)",
"def _get_file_url (url, path) :\n path = path + \"/\" + url.replace (\"/\", \"!\").replace (\":\",\"\").replace (\".\",\"-\")\n spl = path.split (\"-\")\n if len (spl) >= 2 :\n ext = spl [len (spl)-1].lower ()\n if 2 <= len (ext) <= 3 and ext in [\"png\", \"jpg\", \"zip\", \"txt\", \"gif\", \"py\", \"cpp\", \"gz\", \"pdf\", \"tif\", \"py\", \"html\", \"h\"] :\n spl = path.split (\"-\")\n spl = spl [:len(spl)-1]\n path = \"-\".join (spl) + \".\" + ext\n return path",
"def book_rel_url_to_book_abs_url(relative_url):\n return \"https://books.toscrape.com/catalogue/\" + relative_url.removeprefix('../../../')"
]
| [
"0.6344203",
"0.6195235",
"0.6166916",
"0.60148406",
"0.60115445",
"0.59931517",
"0.5974618",
"0.596129",
"0.5951785",
"0.594464",
"0.5894027",
"0.587222",
"0.5842856",
"0.57147574",
"0.55796796",
"0.5578619",
"0.55382127",
"0.55297405",
"0.5521063",
"0.5509687",
"0.5499138",
"0.54859674",
"0.5482203",
"0.5470199",
"0.54548734",
"0.5435615",
"0.54347104",
"0.5429765",
"0.5406848",
"0.54054767"
]
| 0.65303683 | 0 |
Clean URL arguments leaving only those passed in the parameterlist keeping order >>> import w3lib.url >>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ('id',)) 'product.html?id=200' >>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id', 'name']) 'product.html?id=200&name=wired' >>> If `unique` is ``False``, do not remove duplicated keys >>> w3lib.url.url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ['d'], unique=False) 'product.html?d=1&d=2&d=3' >>> If `remove` is ``True``, leave only those not in parameterlist. >>> w3lib.url.url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id'], remove=True) 'product.html?foo=bar&name=wired' >>> w3lib.url.url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'foo'], remove=True) 'product.html?name=wired' >>> By default, URL fragments are removed. If you need to preserve fragments, pass the ``keep_fragments`` argument as ``True``. | def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):
if isinstance(parameterlist, (six.text_type, bytes)):
parameterlist = [parameterlist]
url, fragment = urldefrag(url)
base, _, query = url.partition('?')
seen = set()
querylist = []
for ksv in query.split(sep):
if not ksv:
continue
k, _, _ = ksv.partition(kvsep)
if unique and k in seen:
continue
elif remove and k in parameterlist:
continue
elif not remove and k not in parameterlist:
continue
else:
querylist.append(ksv)
seen.add(k)
url = '?'.join([base, sep.join(querylist)]) if querylist else base
if keep_fragments:
url += '#' + fragment
return url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_remove_with_multiple_removes(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" \"bar\" \"baz=1\" %}',\n query_str='foo=foo&bar=bar&foo=&baz=1&qux=qux')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('qux=qux'))",
"def clean_url(url: str, keys: List[str]) -> str:\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n\n for key in keys:\n query.pop(key, None)\n\n u = u._replace(query=urlencode(query, True))\n \n return urlunparse(u)",
"def _clean_url(self, url):\n return \"\".join(url.split(\"?\")[:1])",
"def normalize_url(url, unsplit=True, sort_query=True, strip_authentication=True,\n strip_trailing_slash=True, strip_index=True, strip_protocol=True,\n strip_irrelevant_subdomains=True, strip_lang_subdomains=False, strip_lang_query_items=False,\n strip_fragment='except-routing', normalize_amp=True, fix_common_mistakes=True,\n infer_redirection=True, quoted=True):\n original_url_arg = url\n\n if infer_redirection:\n url = resolve(url)\n\n if isinstance(url, SplitResult):\n has_protocol = bool(splitted.scheme)\n splitted = url\n else:\n has_protocol = PROTOCOL_RE.match(url)\n\n # Ensuring scheme so parsing works correctly\n if not has_protocol:\n url = 'http://' + url\n\n # Parsing\n try:\n splitted = urlsplit(url)\n except ValueError:\n return original_url_arg\n\n scheme, netloc, path, query, fragment = splitted\n\n # Fixing common mistakes\n if fix_common_mistakes:\n if query:\n query = re.sub(MISTAKES_RE, '&', query)\n\n # Handling punycode\n netloc = decode_punycode(netloc)\n\n # Dropping :80 & :443\n if netloc.endswith(':80'):\n netloc = netloc[:-3]\n elif netloc.endswith(':443'):\n netloc = netloc[:-4]\n\n # Normalizing the path\n if path:\n trailing_slash = False\n if path.endswith('/') and len(path) > 1:\n trailing_slash = True\n path = normpath(path)\n if trailing_slash and not strip_trailing_slash:\n path = path + '/'\n\n # Handling Google AMP suffixes\n if normalize_amp:\n path = AMP_SUFFIXES_RE.sub('', path)\n\n # Dropping index:\n if strip_index:\n segments = path.rsplit('/', 1)\n\n if len(segments) != 0:\n last_segment = segments[-1]\n filename, ext = splitext(last_segment)\n\n if filename == 'index':\n segments.pop()\n path = '/'.join(segments)\n\n # Dropping irrelevant query items\n if query:\n domain_filter = None\n\n if splitted.hostname:\n domain_filter = next(\n (f for d, f in PER_DOMAIN_QUERY_FILTERS if splitted.hostname.endswith(d)),\n None\n )\n\n qsl = parse_qsl(query, keep_blank_values=True)\n qsl = [\n stringify_qs(item)\n for item in qsl\n if not should_strip_query_item(\n item,\n normalize_amp=normalize_amp,\n strip_lang_query_items=strip_lang_query_items,\n domain_filter=domain_filter\n )\n ]\n\n if sort_query:\n qsl = sorted(qsl)\n\n query = '&'.join(qsl)\n\n # Dropping fragment if it's not routing\n if fragment and strip_fragment:\n if strip_fragment is True or not should_strip_fragment(fragment):\n fragment = ''\n\n # Always dropping trailing slash with empty query & fragment\n if path == '/' and not fragment and not query:\n path = ''\n\n # Dropping irrelevant subdomains\n if strip_irrelevant_subdomains:\n netloc = re.sub(\n IRRELEVANT_SUBDOMAIN_AMP_RE if normalize_amp else IRRELEVANT_SUBDOMAIN_RE,\n '',\n netloc\n )\n\n # Dropping language as subdomains\n if strip_lang_subdomains:\n netloc = strip_lang_subdomains_from_netloc(netloc)\n\n # Dropping scheme\n if strip_protocol or not has_protocol:\n scheme = ''\n\n # Dropping authentication\n if strip_authentication:\n netloc = netloc.split('@', 1)[-1]\n\n # Normalizing AMP subdomains\n if normalize_amp and netloc.startswith('amp-'):\n netloc = netloc[4:]\n\n # Dropping trailing slash\n if strip_trailing_slash and path.endswith('/'):\n path = path.rstrip('/')\n\n # Quoting or not\n if quoted:\n path = quote(path)\n query = quote(query, RESERVED_CHARACTERS)\n fragment = quote(fragment, SAFE_CHARACTERS)\n else:\n path = unquote(path)\n query = unquote(query)\n fragment = unquote(fragment)\n\n # Result\n result = SplitResult(\n scheme,\n netloc.lower(),\n path,\n query,\n fragment\n )\n\n if not unsplit:\n return result\n\n # TODO: check if works with `unsplit=False`\n if strip_protocol or not has_protocol:\n result = urlunsplit(result)[2:]\n else:\n result = urlunsplit(result)\n\n return result",
"def normalize_params(url, params):\n # parse the url\n parse = urlparse(url)\n\n # Get the query list\n qs_list = parse_qsl(parse.query, keep_blank_values=True)\n must_encode = False if parse.query == urllib.parse.unquote(parse.query) else True\n if params is None:\n combined_list = qs_list\n else:\n # Needs to be encoded before sorting\n combined_list = [encode_pair(must_encode, key, value) for (key, value) in list(qs_list)]\n combined_list += params.items()\n\n encoded_list = [\"%s=%s\" % (key, value) for (key, value) in combined_list]\n sorted_list = sorted(encoded_list, key=lambda x: x)\n\n return \"&\".join(sorted_list)",
"def clean_params(self, url):\n if isinstance(url, unicode):\n url = url.encode(\"utf-8\")\n parts = list(urlparse.urlsplit(url))\n if not parts[3]:\n return url\n query = urlparse.parse_qsl(parts[3])\n query = [q for q in query if self._is_param_allowed(*q)]\n if query:\n parts[3] = urllib.urlencode(query)\n else:\n parts[3] = ''\n return urlparse.urlunsplit(parts).decode(\"utf-8\")",
"def get_query_string(p, new_params=None, remove=None):\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n\n for r in remove:\n for k in p.keys():\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if k in p and v is None:\n del p[k]\n elif v is not None:\n p[k] = v\n return mark_safe(\n '?' + '&'.join(\n [u'%s=%s' % (k, v) for k, v in p.items()]\n ).replace(' ', '%20')\n )",
"def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()",
"def test_remove_with_multiple_specific_values(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=1\" \"foo=2\" %}',\n query_str='foo=1&foo=2&foo=3')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('foo=3'))",
"def test_additional_query_args():\n assert (normalize_url(\"http://example.com?c=d\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b&c=d\")\n assert (normalize_url(\"http://example.com\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b\")\n assert (normalize_url(\"http://example.com\", [(\"résumé\", \"résumé\")]) ==\n \"http://example.com/?r%C3%A9sum%C3%A9=r%C3%A9sum%C3%A9\")",
"def test_remove_for_specific_key_value_pairs(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"a=4\" %}',\n query_str='a=1&a=2&a=3&a=4')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('a=1&a=2&a=3&'))",
"def test_drop_fragments():\n assert (normalize_url(\"http://example.com/a?b=1#frag\")\n == \"http://example.com/a?b=1\")\n assert (normalize_url(\"http://example.com/a?b=1#frag\", drop_fragments=False)\n == \"http://example.com/a?b=1#frag\")",
"def test_query_sorting():\n assert (normalize_url('http://example.com/a?b=1&c=2') ==\n 'http://example.com/a?b=1&c=2')\n assert (normalize_url('http://example.com/a?c=2&b=1') ==\n 'http://example.com/a?b=1&c=2')",
"def test_remove_with_key_appearing_multiple_times(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&foo=bar&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))",
"def _filter_duplicate_urls(urls: list) -> set:\n clean_urls = set()\n for url in urls:\n cleaned_url = url.split(\"&sa=U\")[0]\n clean_urls.add(cleaned_url)\n return clean_urls",
"def test_remove_with_basic_usage(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('bar=bar'))",
"def good_url(a, start_url):\n for i in range(len(a)):\n par=a[i].find('?')\n if par!=-1:\n a[i]=a[i][:par]\n anc=a[i].find('#')\n if anc!=-1:\n a[i]=a[i][:anc]\n if a[i]!='' and a[i][0]=='/':\n a[i]=str(start_url)+a[i][1:i]\n #print(a[i]) \n return list(set(a))",
"def test_query_string():\n assert (normalize_url(\"http://example.com/?a=1\") ==\n \"http://example.com/?a=1\")\n assert (normalize_url(\"http://example.com?a=1\") ==\n \"http://example.com/?a=1\")\n assert (normalize_url(\"http://example.com/a?b=1\") ==\n \"http://example.com/a?b=1\")\n assert (normalize_url(\"http://example.com/a/?b=1\") ==\n \"http://example.com/a?b=1\")",
"def url_parse_query(query, encoding=None):\n if isinstance(query, unicode):\n if encoding is None:\n encoding = url_encoding\n query = query.encode(encoding, 'ignore')\n query = query.replace('?', '')\n\n l = set()\n for k, v, sep in parse_qsl(query, True):\n k = url_quote_part(k, '/-:,;')\n if not k:\n continue\n if v:\n v = url_quote_part(v, '/-:,;')\n l.add(\"%s=%s\" % (k, v))\n elif v is None:\n l.add(\"%s\" % k)\n else:\n # some sites do not work when the equal sign is missing\n l.add(\"%s=\" % k)\n query = '&'.join(sorted(l))\n return query",
"def clean_url(url):\n return url[:url.find('?')]",
"def __clean_url(links_titles):\n clean_urls = []\n for url, title, flag in links_titles:\n duplicates_words = []\n unique_words = []\n for word in str(url).rstrip('/').split('/'):\n if word not in unique_words:\n unique_words.append(word)\n else:\n if word not in duplicates_words:\n duplicates_words.append(word)\n url = str(url).replace(word+'/', '', 1)\n clean_urls.append((url, title, flag))\n return clean_urls",
"def query_params_sanitize(query_params):\n def allow_func(n, v):\n # This gets rid of any params beginning with \"oauth_\"\n if not n.startswith(\"oauth_\"):\n return True\n else:\n logging.warning(\"Protocol parameter ignored from URL query parameters: `%r`\", n)\n return False\n return query_filter(query_params, allow_func=allow_func)",
"def test_remove_with_no_key(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"=foo\" %}',\n query_str='foo=foo&foo=bar&baz=baz&=foo')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&foo=bar&baz=baz'))",
"def test_remove_with_key_not_in_querystring(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"baz\" %}',\n query_str='foo=foo&bar=bar')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]),\n QueryDict('foo=foo&bar=bar'))",
"def test_remove_with_no_value(self):\n rendered_result = self._render_tag(\n tag='{% querystring \"remove\" \"foo=\" %}',\n query_str='foo=foo&foo=bar&foo=&baz=baz')\n\n self.assertTrue(rendered_result.startswith('?'))\n self.assertEqual(QueryDict(rendered_result[1:]), QueryDict('baz=baz'))",
"def _clean_kwargs(keep_name=False, **kwargs):\n if \"name\" in kwargs and not keep_name:\n kwargs[\"name_or_id\"] = kwargs.pop(\"name\")\n\n return __utils__[\"args.clean_kwargs\"](**kwargs)",
"def _clean_kwargs(keep_name=False, **kwargs):\n if \"name\" in kwargs and not keep_name:\n kwargs[\"name_or_id\"] = kwargs.pop(\"name\")\n\n return __utils__[\"args.clean_kwargs\"](**kwargs)",
"def _clean_kwargs(self, kwargs, fn):\n # Do not do the cleaning if server config\n # doesnt ask to ignore\n if not self.server.IGNORE_UNEXPECTED_KWARGS:\n return kwargs\n\n expected_kwargs = set(inspect.getargspec(fn).args)\n got_kwargs = set(kwargs.keys())\n unexpected_kwargs = got_kwargs - expected_kwargs\n for k in unexpected_kwargs:\n del kwargs[k]\n\n return kwargs",
"def test_clean_query_params(self):\n original_query_params = {\n \"name\": \"FAKE-NAME\",\n \"hostname\": \"FAKE-HOSTNAME\",\n \"version\": None,\n }\n\n cleaned_query_params = {\"name\": \"FAKE-NAME\", \"hostname\": \"FAKE-HOSTNAME\"}\n\n client = Client()\n original_query_params = client._clean_query_params(original_query_params)\n assert original_query_params == cleaned_query_params",
"def _split_url_string(query_string):\r\n parameters = parse_qs(to_utf8(query_string), keep_blank_values=True)\r\n for k, v in parameters.iteritems():\r\n parameters[k] = urllib.unquote(v[0])\r\n return parameters"
]
| [
"0.60887784",
"0.59742236",
"0.586768",
"0.5849657",
"0.5837156",
"0.57429856",
"0.5674179",
"0.56159896",
"0.55954355",
"0.5503327",
"0.54211736",
"0.5406335",
"0.5371822",
"0.53556895",
"0.5342783",
"0.5247226",
"0.52447885",
"0.5231707",
"0.5206317",
"0.5175111",
"0.5144682",
"0.51444477",
"0.51276356",
"0.51198864",
"0.51037234",
"0.50983965",
"0.50983965",
"0.50916094",
"0.5091588",
"0.50886476"
]
| 0.80487144 | 0 |
Add or remove a parameter to a given url >>> import w3lib.url | def add_or_replace_parameter(url, name, new_value):
return _add_or_replace_parameters(url, {name: new_value}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_query_parameter(url, param_name, param_value):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n\n deleted = False\n for _, value in query_params.items():\n if param_value in value:\n deleted = True\n\n if deleted:\n query_params.pop(param_name, None)\n clear = True\n if not deleted:\n query_params[param_name] = param_value\n clear = False\n\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url, clear",
"def delete_query_parameter(url, param_name):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params.pop(param_name, None)\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url",
"def add_or_replace_parameters(url, new_parameters):\n return _add_or_replace_parameters(url, new_parameters)",
"def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url",
"def set_url_query_param(url: str, param_name: str, param_value: str):\n parsed_url: ParseResult = urlparse(url)\n\n query_params: dict = dict(parse_qsl(parsed_url.query))\n query_params[param_name] = param_value\n new_query_string = urlencode(query_params)\n\n return urlunparse((\n parsed_url.scheme,\n parsed_url.netloc,\n parsed_url.path,\n parsed_url.params,\n new_query_string,\n parsed_url.fragment,\n ))",
"def set_params(url, params):\n components = urlparse(url)\n\n query = parse_qs(components.query)\n query.update(params)\n\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)",
"def url_update(url):\n url_lst = url.split('&')\n start_str = url_lst[1]\n max_results_str = url_lst[2]\n idx1, idx2 = start_str.find('='), max_results_str.find('=')\n num1, num2 = int(start_str[idx1+1:]), int(max_results_str[idx2+1:])\n url_lst[1] = 'start=' + str(num1+num2)\n return '&'.join(url_lst)",
"def url_set(self, url):\n self.request('/v1.1/url', 'POST', body={'url': url})",
"def merge_url(url, params):\n req = PreparedRequest()\n req.prepare_url(url, params)\n return req.url",
"def parameterised_url(url, params):\n url_parts = list(urlparse.urlparse(url))\n query = dict(urlparse.parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlparse.urlencode(query)\n url = urlparse.urlunparse(url_parts)\n return url",
"def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()",
"def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)",
"def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)",
"def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)",
"def set_url_param(parser, token):\r\n bits = token.contents.split()\r\n qschanges = {}\r\n for i in bits[1:]:\r\n try:\r\n key, value = i.split('=', 1)\r\n key = key.strip()\r\n value = value.strip()\r\n key_line_iter = six.StringIO(key).readline\r\n keys = list(tokenize.generate_tokens(key_line_iter))\r\n if keys[0][0] == tokenize.NAME:\r\n # workaround bug #5270\r\n value = Variable(value) if value == '\"\"' else parser.compile_filter(value)\r\n qschanges[str(key)] = value\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n raise TemplateSyntaxError(\"Argument syntax wrong: should be\"\r\n \"key=value\")\r\n return SetUrlParamNode(qschanges)",
"def set_query_parameters(url, params):\n url_parts = list(urlparse(url))\n\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n\n return urlunparse(url_parts)",
"def add_new_url(self, url):\n if url is None:\n return \n if url not in self.new_urls and url not in self.old_urls:\n self.new_urls.add(url)",
"def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):\n\n if isinstance(parameterlist, (six.text_type, bytes)):\n parameterlist = [parameterlist]\n url, fragment = urldefrag(url)\n base, _, query = url.partition('?')\n seen = set()\n querylist = []\n for ksv in query.split(sep):\n if not ksv:\n continue\n k, _, _ = ksv.partition(kvsep)\n if unique and k in seen:\n continue\n elif remove and k in parameterlist:\n continue\n elif not remove and k not in parameterlist:\n continue\n else:\n querylist.append(ksv)\n seen.add(k)\n url = '?'.join([base, sep.join(querylist)]) if querylist else base\n if keep_fragments:\n url += '#' + fragment\n return url",
"def add_query_params(url: str, additional_params: dict) -> str:\n url_components = urlparse(url)\n original_params = parse_qs(url_components.query)\n # Before Python 3.5 you could update original_params with\n # additional_params, but here all the variables are immutable.\n merged_params = {**original_params, **additional_params}\n updated_query = urlencode(merged_params, doseq=True)\n # _replace() is how you can create a new NamedTuple with a changed field\n return url_components._replace(query=updated_query).geturl()",
"def add_url(p_id, url):\n for product in all_products:\n if product['id'] == p_id:\n product['url'] = url\n product['product_id'] = p_id\n product.move_to_end('product_id', last=False)",
"def add_query_param(request, key, val):\n iri = request.get_full_path()\n uri = iri_to_uri(iri)\n return escape(replace_query_param(uri, key, val))",
"def add_parameters_to_url(path, **kwargs):\n return path + \"?\" + urllib.urlencode(kwargs)",
"def add_arguments(url, args):\n chunks = list(urlparse(url))\n qs = parse_qsl(chunks[4])\n qs += args\n chunks[4] = urlencode(qs)\n return urlunparse(chunks)",
"def add_fragment(url, args):\n chunks = list(urlparse(url))\n chunks[5] = urlencode(args)\n return urlunparse(chunks)",
"def fix_url(cls, url: str):\r\n ...",
"def remove_id(url):\n u = urlparse(url)\n query = parse_qs(u.query, keep_blank_values=True)\n query.pop(\"eo_id\", None)\n u = u._replace(query=urlencode(query, True))\n return urlunparse(u)",
"def set_url(self, url):\n self.url = url",
"def url_replace(request, field, value):\n _dict = request.GET.copy()\n _dict[field] = value\n return _dict.urlencode()",
"def fixup_parameters(url, backend):\n result = url\n if backend == \"django\":\n result = url.replace(\"{\", \"(?P<\").replace(\"}\", \">.+)\")\n\n return result",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url"
]
| [
"0.6787713",
"0.6758425",
"0.67240405",
"0.6605619",
"0.64564407",
"0.639294",
"0.63426834",
"0.6334928",
"0.63221145",
"0.62897855",
"0.6280858",
"0.6114451",
"0.6068753",
"0.60241115",
"0.6005309",
"0.5964446",
"0.5886868",
"0.588532",
"0.5874374",
"0.5850072",
"0.5789708",
"0.5776217",
"0.5753656",
"0.5737776",
"0.5665912",
"0.56601685",
"0.5643052",
"0.5633101",
"0.5599003",
"0.55916613"
]
| 0.73892564 | 0 |
Add or remove a parameters to a given url >>> import w3lib.url | def add_or_replace_parameters(url, new_parameters):
return _add_or_replace_parameters(url, new_parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url",
"def add_or_replace_parameter(url, name, new_value):\n return _add_or_replace_parameters(url, {name: new_value})",
"def set_params(url, params):\n components = urlparse(url)\n\n query = parse_qs(components.query)\n query.update(params)\n\n components = components._replace(query=urlencode(query, doseq=True))\n return urlunparse(components)",
"def merge_url(url, params):\n req = PreparedRequest()\n req.prepare_url(url, params)\n return req.url",
"def parameterised_url(url, params):\n url_parts = list(urlparse.urlparse(url))\n query = dict(urlparse.parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlparse.urlencode(query)\n url = urlparse.urlunparse(url_parts)\n return url",
"def set_query_parameter(url, param_name, param_value):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n\n deleted = False\n for _, value in query_params.items():\n if param_value in value:\n deleted = True\n\n if deleted:\n query_params.pop(param_name, None)\n clear = True\n if not deleted:\n query_params[param_name] = param_value\n clear = False\n\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url, clear",
"def url_update(url):\n url_lst = url.split('&')\n start_str = url_lst[1]\n max_results_str = url_lst[2]\n idx1, idx2 = start_str.find('='), max_results_str.find('=')\n num1, num2 = int(start_str[idx1+1:]), int(max_results_str[idx2+1:])\n url_lst[1] = 'start=' + str(num1+num2)\n return '&'.join(url_lst)",
"def set_query_parameters(url, params):\n url_parts = list(urlparse(url))\n\n query = dict(parse_qsl(url_parts[4]))\n query.update(params)\n url_parts[4] = urlencode(query)\n\n return urlunparse(url_parts)",
"def replace_query_params(cls, url: str, **params: Mapping[str, str]) -> str:\n url, _ = cls.separate_query_params(url, params.keys())\n return cls.add_query_params(url, **params)",
"def delete_query_parameter(url, param_name):\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params.pop(param_name, None)\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n\n return url",
"def add_query_params(url: str, additional_params: dict) -> str:\n url_components = urlparse(url)\n original_params = parse_qs(url_components.query)\n # Before Python 3.5 you could update original_params with\n # additional_params, but here all the variables are immutable.\n merged_params = {**original_params, **additional_params}\n updated_query = urlencode(merged_params, doseq=True)\n # _replace() is how you can create a new NamedTuple with a changed field\n return url_components._replace(query=updated_query).geturl()",
"def url_query_cleaner(url, parameterlist=(), sep='&', kvsep='=', remove=False, unique=True, keep_fragments=False):\n\n if isinstance(parameterlist, (six.text_type, bytes)):\n parameterlist = [parameterlist]\n url, fragment = urldefrag(url)\n base, _, query = url.partition('?')\n seen = set()\n querylist = []\n for ksv in query.split(sep):\n if not ksv:\n continue\n k, _, _ = ksv.partition(kvsep)\n if unique and k in seen:\n continue\n elif remove and k in parameterlist:\n continue\n elif not remove and k not in parameterlist:\n continue\n else:\n querylist.append(ksv)\n seen.add(k)\n url = '?'.join([base, sep.join(querylist)]) if querylist else base\n if keep_fragments:\n url += '#' + fragment\n return url",
"def add_arguments(url, args):\n chunks = list(urlparse(url))\n qs = parse_qsl(chunks[4])\n qs += args\n chunks[4] = urlencode(qs)\n return urlunparse(chunks)",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def append_query_params(original_url, **kwargs):\n scheme, netloc, path, query_string, fragment = urlsplit(original_url)\n query_params = parse_qs(query_string)\n if kwargs is not None:\n for key, value in kwargs.items():\n query_params[key] = [value]\n\n new_query_string = urlencode(query_params, doseq=True)\n new_url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return new_url",
"def set_url_query_param(url: str, param_name: str, param_value: str):\n parsed_url: ParseResult = urlparse(url)\n\n query_params: dict = dict(parse_qsl(parsed_url.query))\n query_params[param_name] = param_value\n new_query_string = urlencode(query_params)\n\n return urlunparse((\n parsed_url.scheme,\n parsed_url.netloc,\n parsed_url.path,\n parsed_url.params,\n new_query_string,\n parsed_url.fragment,\n ))",
"def url_set(self, url):\n self.request('/v1.1/url', 'POST', body={'url': url})",
"def append_to_query_string(url, key, value) -> str:\n url = list(urlparse(url))\n query = dict(parse_qsl(url[4]))\n query[key] = value\n url[4] = '&'.join(f'{p}={v}' for p, v in query.items())\n\n return urlunparse(url)",
"def add_query_params(\n url: str, **params: Mapping[str, Union[str, List[str]]]\n ) -> str:\n o = urlparse(url)\n qp = parse_qs(o.query, keep_blank_values=True)\n\n for k, v in params.items():\n if isinstance(v, str):\n v = [v]\n try:\n qp[k].extend(v)\n except KeyError:\n qp[k] = v\n\n qs = urlencode(qp, doseq=True, quote_via=quote)\n return urlunparse(o._replace(query=qs))",
"def add_parameters_to_url(path, **kwargs):\n return path + \"?\" + urllib.urlencode(kwargs)",
"def clean_params(self, url):\n if isinstance(url, unicode):\n url = url.encode(\"utf-8\")\n parts = list(urlparse.urlsplit(url))\n if not parts[3]:\n return url\n query = urlparse.parse_qsl(parts[3])\n query = [q for q in query if self._is_param_allowed(*q)]\n if query:\n parts[3] = urllib.urlencode(query)\n else:\n parts[3] = ''\n return urlparse.urlunsplit(parts).decode(\"utf-8\")",
"def param_remove(params, arg):\n d = params.copy()\n if arg in d:\n del d[arg]\n return d.urlencode()",
"def url_add_query(url, extra_query_params, allow_func=None):\n scheme, netloc, path, params, query, fragment = urlparse_normalized(url)\n\n d = query_add(query, extra_query_params)\n qs = urlencode_s(d, allow_func=allow_func)\n return urlunparse((scheme, netloc, path, params, qs, fragment))",
"def append_query_param(url: str, key: str, value: str) -> str:\n template = '?' in url and '{}&{}={}' or '{}?{}={}'\n return template.format(url, key, value)",
"def url_append_query(url, query_params):\n if not query_params:\n return url\n scheme, netloc, path, params, query, fragment = urlparse_normalized(url)\n query = (query + \"&\") if query else query\n query_string = query + urlencode_s(query_unflatten(query_params))\n return urlunparse((scheme, netloc, path, params, query_string, fragment))",
"def add_fragment(url, args):\n chunks = list(urlparse(url))\n chunks[5] = urlencode(args)\n return urlunparse(chunks)",
"def url_with_querystring(url, **kwargs):\n return url + '?' + urlencode(kwargs)",
"def set_url_param(parser, token):\r\n bits = token.contents.split()\r\n qschanges = {}\r\n for i in bits[1:]:\r\n try:\r\n key, value = i.split('=', 1)\r\n key = key.strip()\r\n value = value.strip()\r\n key_line_iter = six.StringIO(key).readline\r\n keys = list(tokenize.generate_tokens(key_line_iter))\r\n if keys[0][0] == tokenize.NAME:\r\n # workaround bug #5270\r\n value = Variable(value) if value == '\"\"' else parser.compile_filter(value)\r\n qschanges[str(key)] = value\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n raise TemplateSyntaxError(\"Argument syntax wrong: should be\"\r\n \"key=value\")\r\n return SetUrlParamNode(qschanges)",
"def set_params(self, params):\n\n self.url_params.update(params)\n return self",
"def add_url(p_id, url):\n for product in all_products:\n if product['id'] == p_id:\n product['url'] = url\n product['product_id'] = p_id\n product.move_to_end('product_id', last=False)"
]
| [
"0.7262166",
"0.6983453",
"0.6931309",
"0.6851787",
"0.670423",
"0.6686313",
"0.6619075",
"0.66121083",
"0.6594955",
"0.655649",
"0.6538528",
"0.6431628",
"0.64223516",
"0.6336409",
"0.6336409",
"0.63182306",
"0.63001937",
"0.62852514",
"0.62325287",
"0.62116474",
"0.6195118",
"0.61408216",
"0.60630393",
"0.60551465",
"0.5988425",
"0.5981159",
"0.59059656",
"0.5858716",
"0.58353573",
"0.58115166"
]
| 0.70467913 | 1 |
If given a path name, return its File URI, otherwise return it unmodified | def any_to_uri(uri_or_path):
if os.path.splitdrive(uri_or_path)[0]:
return path_to_file_uri(uri_or_path)
u = urlparse(uri_or_path)
return uri_or_path if u.scheme else path_to_file_uri(uri_or_path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_file_uri(path):\n p = urlparse.urlparse(path)\n\n if p.scheme in [\"https\", \"http\"]:\n return True, path\n elif p.scheme == \"file\":\n # url to path name, i.e: convert %20 to space\n path = urllib.url2pathname(p.path)\n return False, os.path.abspath(os.path.join(p.netloc, path))\n else:\n # treat as a local file\n return False, urllib.unquote(path)",
"def file2path (x,name):\n if isinstance(x,file):\n x.close()\n return x.name\n if isinstance(x,str):\n return x\n raise ValueError(name,x)",
"def qualify(path: str) -> str:\n if path.startswith(\"/\"):\n return f\"file://{path}\"\n else:\n return path",
"def get_file_from_path(file_path):\n return Utils.get_real_file_path(file_path)",
"def normalize_scheme(path, ext):\n path = addextension(path, ext)\n\n parsed = urlparse(path)\n if parsed.scheme:\n # this appears to already be a fully-qualified URI\n return path\n else:\n # this looks like a local path spec\n import os\n dirname, filename = os.path.split(path)\n if not os.path.isabs(dirname):\n # need to make relative local paths absolute\n dirname = os.path.abspath(dirname)\n path = os.path.join(dirname, filename)\n return \"file://\" + path",
"def get_file_path(filename):\n if 'http' in filename:\n parsed_uri = urlparse(filename)\n f = '/' + parsed_uri.path[1:]\n f = '/'.join(f.split('/')[3:]) # split the xxx dir, remove the leading /\n else:\n filename = ('/' + filename) if filename[0] != '/' else filename # make sure starts with /\n # split local img path from path\n f = filename.replace(settings.FILE_PATH, '/')\n f = f.replace(settings.IMAGE_PATH, '/')\n f = f.replace(settings.DERIVED_PATH, '/')\n f = '/'.join(f.split('/')[2:]) # split the xxx dir, remove the leading /\n\n return f",
"def get_file_url(path, config):\n file_url_regex = re.compile(config['file_url_regex'])\n new_path = re.sub(file_url_regex, config['file_url_base'], path)\n return new_path",
"def pathToURI(path):\n ret = libxml2mod.xmlPathToURI(path)\n return ret",
"def pathToFileName(self, path):\n\t\t# Find the path, and strip the leading slash.\n\t\tpath =urlparse.urlparse(self.path)[2].lstrip(\"/\")\n\t\t# Process url escape codes, and normalize the path.\n\t\tpath = os.path.normpath(urllib2.unquote(path))\n\t\t# normpath strips the last slash\n\t\tif os.path.isdir(path):\n\t\t\treturn path + '/'\n\t\telse:\n\t\t\treturn path",
"def get_urifilename(uri):\n up=urlparse.urlparse(uri)\n return split(up[2],\"/\")[-1]",
"def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)",
"def stringyfy(path):\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')",
"def qualify_full_filepath(filename, path=None):\n filepath = os.path.join(path or \"\", filename)\n if not os.path.isfile(filepath):\n raise OSError(f\"No available file found at: {filename}.\")\n return filepath",
"def get_filename(link):\r\n return link[link.rfind(\"/\") + 1:]",
"def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None",
"def normalizeURIPath(path):\n ret = libxml2mod.xmlNormalizeURIPath(path)\n return ret",
"def _get_file_url(path):\n return urlparse.urljoin(BASE_URL, path)",
"def getfilename(path):\r\n return path.split('\\\\').pop().split('/').pop().rsplit('.', 1)[0]",
"def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return",
"def normalize_filename(url):\n fname = url.replace('file://', '')\n if os.sep != '/' and not os.path.exists(fname):\n fname = fname.lstrip('/')\n return fname",
"def url(self, name):\n if self.base_url is None:\n raise ValueError(\"This file is not accessible via a URL.\")\n url = filepath_to_uri(name)\n if url is not None:\n url = url.lstrip('/')\n return urljoin(self.base_url, url)",
"def realpath(path: str) -> str:\n pass",
"def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None",
"def path_filename_representation(path):\n # Strip leading / and replace / with .\n return re.sub(r\"^/(.*)$\", r\"\\1\", path).replace(\"/\", \".\")",
"def abspath(path: str) -> str:\n pass",
"def get_file(_file):\n _file = pathlib.Path(_file)\n if not _file.is_file():\n _file = None\n return _file",
"def resolvePath_(cls, path):\r\n try:\r\n fsref, isFolder, wasAliased = FSResolveAliasFile(os.path.realpath(path), 1)\r\n return os.path.abspath(fsref.as_pathname().decode(u\"utf-8\"))\r\n except MacOS.Error as e:\r\n return None",
"def _ref_name_from_path(self, path: str) -> str:\n prefix = \"%s/\" % self._path\n assert path.startswith(prefix)\n return path[len(prefix) :]",
"def convertToURL( cPathname ):\n if len( cPathname ) > 1:\n if cPathname[1:2] == \":\":\n cPathname = \"/\" + cPathname[0] + \"|\" + cPathname[2:]\n cPathname = cPathname.replace( \"\\\\\", \"/\" )\n cPathname = \"file://\" + cPathname\n return cPathname",
"def get_path(self, path):\n return abspath(join(self.origin, *path))"
]
| [
"0.67616785",
"0.643403",
"0.64174753",
"0.62495047",
"0.6189978",
"0.6108651",
"0.6086867",
"0.60460186",
"0.60352784",
"0.59979",
"0.5986743",
"0.5967465",
"0.59603",
"0.59325385",
"0.5932342",
"0.59256554",
"0.5908476",
"0.58884764",
"0.58875656",
"0.58660144",
"0.58607817",
"0.58396095",
"0.5834125",
"0.5832556",
"0.58275986",
"0.57938474",
"0.5785277",
"0.57795906",
"0.5771163",
"0.5770898"
]
| 0.7083271 | 0 |
Checks value is a positive integer, returns True if so, else raise error. | def check_positive(value):
try:
ivalue = int(value)
if ivalue <= 0:
# is int but non-positive
raise argparse.ArgumentTypeError(
'{} is an invalid positive integer value'.format(value))
return ivalue
except ValueError:
# not int
raise argparse.ArgumentTypeError('{} is not an integer'.format(value)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_positive_int(item):\n if not isinstance(item, int):\n return False\n return item > 0",
"def _is_non_negative_int(item):\n if not isinstance(item, int):\n return False\n return item >= 0",
"def check_positive(value):\n ivalue = int(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(f'must be positive value, but got {value}')\n return ivalue",
"def check_pos_int(v):\n status = True\n try:\n val = int(v)\n if val <= 0:\n status = False\n except ValueError:\n status = False\n return status",
"def must_be_positive_or_zero(cls, value):\n if value < 0:\n raise ValueError('must be positive or zero')\n return value",
"def positive_int(value):\n try:\n fvalue = int(value)\n except (ValueError, TypeError) as e:\n raise BirdVoxClassifyError(\n 'Expected a positive int, error message: {}'.format(e))\n if fvalue <= 0:\n raise BirdVoxClassifyError('Expected a positive integer')\n return fvalue",
"def validate(self, value):\n super(PositiveInteger, self).validate(value)\n if value is not None and value < 0:\n raise ValidationError('must be positive integer')\n\n return True",
"def validate_positive_integer(\n value: Any, none_allowed: bool, display_name: str\n) -> None:\n if none_allowed and value is None:\n return\n\n if not isinstance(value, int):\n raise TypeError(f\"{display_name} must be a positive integer\")\n if value <= 0:\n raise ValueError(f\"{display_name} must be a positive integer\")",
"def check_not_negative(value):\n ivalue = int(value)\n if ivalue < 0:\n raise argparse.ArgumentTypeError(\"%s can't be less than 0\" % value)\n return ivalue",
"def is_int(value):\n try:\n int(value)\n except ValueError:\n return False\n else:\n return True",
"def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False",
"def is_int(value):\n try:\n int(value)\n return True\n except ValueError:\n return False",
"def _is_pos_int(number: int) -> bool:\n return type(number) == int and number >= 0",
"def is_valid_positive_integer(input_string):\n\n assert input_string is not None\n try:\n input_string = int(input_string)\n if int(input_string) <= 0:\n raise ValueError\n return True\n except ValueError:\n return False",
"def nonnegative(value):\n my_error = argparse.ArgumentTypeError(\n \"%s is not a non-negative integer value\" % value)\n try:\n my_value = int(value)\n except ValueError:\n raise my_error\n if my_value < 0:\n raise my_error\n return my_value",
"def negint_p(value):\n # check if the value has the expected type\n if type(value) is not int:\n raise Invalid(\"invalid value type {value}\".format(value=value))\n if value >= 0:\n raise Invalid(\"invalid value {value}, negative integer expected\".format(value=value))",
"def unsigned_int_check(val):\n\n is_valid_number = True\n try:\n val = int(val)\n if val < 0:\n raise ValueError(\"Not an unsigned int\")\n except ValueError as e:\n is_valid_number = False\n \n return is_valid_number",
"def _non_negative_int(value):\n try:\n value_int = int(value)\n if value_int < 0:\n raise ValueError\n except (ValueError, TypeError):\n raise argparse.ArgumentTypeError(\n f\"invalid non-negative int value: {value!r}\")\n return value_int",
"def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False",
"def validate_positive(value: float):\n if value < 0:\n err = f\"{value} n`est pas positif\"\n raise ValidationError(err)",
"def negint_zero_p(value):\n # check if the value has the expected type\n if type(value) is not int:\n raise Invalid(\"invalid value type {value}\".format(value=value))\n if value > 0:\n raise Invalid(\"invalid value {value}, negative value or zero expected\".format(value=value))",
"def is_int_value(int_value):\n try:\n int(int_value)\n except ValueError:\n return False\n return True",
"def validate(self, value):\n if super().validate(value):\n return (value is None) or (isinstance(value, int) and self._validate_value(value))\n else:\n return False",
"def _is_int(test_val):\n try:\n int(test_val)\n return True\n except ValueError:\n return False",
"def is_positive_integer(string:str) -> bool:\n try:\n value = int(string)\n return value >= 0\n except ValueError:\n return False",
"def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False",
"def pos_int_validator(arg):\n num = int(arg)\n if num > 0:\n return num\n else:\n raise argparse.ArgumentTypeError(\"{} - must be a positive number\".format(arg))",
"def isNumber(num):\n try:\n abs(num)\n return True\n except:\n return False",
"def could_be_int(val):\n if val == None:\n return False\n\n if isinstance(val, int):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n i = int(val)\n if not isinstance(i, int):\n raise ValueError\n else:\n return True\n except:\n return False\n\n # otherwise\n return False",
"def is_int(self, val):\n try:\n int(val)\n return True\n except ValueError:\n return False"
]
| [
"0.80366474",
"0.79576945",
"0.7938288",
"0.79097056",
"0.7845891",
"0.7826583",
"0.76731074",
"0.76619655",
"0.75931734",
"0.758719",
"0.74770504",
"0.74770504",
"0.7437898",
"0.74136573",
"0.74024177",
"0.73540705",
"0.73346096",
"0.73321503",
"0.72591674",
"0.7258475",
"0.7227209",
"0.7225686",
"0.72057533",
"0.71129704",
"0.7108009",
"0.7096282",
"0.70896894",
"0.7087653",
"0.7070778",
"0.70701504"
]
| 0.7988756 | 1 |
Counts how many kmers exists in a given file | def count_kmers(file_name, k, verbose=False):
if verbose:
start = time.time()
print('Counting kmers in {}'.format(file_name))
total_kmers = 0
with open(file_name, 'r') as f:
line_num = 0
for line in f:
if line_num % 4 == 1: # dna sequence
total_kmers += len(line) - k # eliminate new-line
line_num += 1
if verbose:
end = time.time()
print('{} kmers are counted in {:.2f} seconds'.format(
total_kmers, end - start))
return total_kmers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter",
"def count_meme_entries(motif_path):\n with open(motif_path, \"r\") as f:\n counter = 0\n for line in f:\n if line[:6] == \"letter\":\n counter += 1\n return counter",
"def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)",
"def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)",
"def fileCounter(directory):",
"def get_kmers(file, size):\n\tkmers = defaultdict(int)\n\tregex = re.compile('[' + string.punctuation + ']')\n\tfor line in open(file):\n\t\tfor word in [regex.sub('', w) for w in line.lower().split()]:\n\t\t\tnkmers = len(word) - size + 1\n\t\t\tfor kmer in [word[i:i+size] for i in range(nkmers)]:\n\t\t\t\tkmers[kmer] += 1\n\treturn kmers",
"def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count",
"def total_number():\r\n total_number = 0\r\n file_read = read_file()\r\n for key in file_read:\r\n total_number = total_number + len(file_read[key])\r\n return total_number",
"def map_count(filename):\n f = open(filename, \"r+\")\n buf = mmap.mmap(f.fileno(), 0)\n lines = 0\n readline = buf.readline\n while readline():\n lines += 1\n return lines",
"def count_n_grams_fasta(fasta_dict, name, kmin, kmax):\n # get the number of files in the names directory\n num_fastas = len(fasta_dict[name])\n # initialyze the counter\n counter = Counter()\n # iterates through the list of paths\n for filename in fasta_dict[name]:\n # reads the file and parse the content\n print(f'Reading and parsing the filename {filename}')\n for name, sequence in parse_fasta(filename):\n # counting the kmers\n cnt = count_kmers(sequence, kmin, kmax, counter=None)\n # add the count of the current file to the counter\n counter.update(cnt)\n # to get the mean of the kmer count for all the files\n final_counter = {k: (c // num_fastas) for k, c in counter.items()}\n return final_counter",
"def get_counts_from_list(string_list, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n for string in string_list:\n # check if kmer/string is in the counter\n if string in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[string] += 1\n return counter",
"def get_file_counts(filename):\n new_file = open(filename, \"r\")\n d = dict()\n for line in new_file: \n split_line = line.split()\n for word in split_line:\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n new_file.close()\n return d",
"def count_words(filename):",
"def num_instances_mgf(infile_name):\n\tinfile = open(infile_name)\n\tnum_instances = 0\n\tfor line in infile:\n\t\tif line.startswith(\"BEGIN IONS\"):\n\t\t\tnum_instances += 1\n\treturn(num_instances)",
"def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())",
"def doCountTask(filename):\n f = open(filename)\n dataDict = json.load(f)\n weridCount = 0\n unweridCount = 0\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n weridCount += 1\n else:\n unweridCount += 1\n return [unweridCount, weridCount]",
"def find_dimesion(filename):\n file = open(filename,\"r\")\n\n line = file.readline()\n file.close()\n return len(line.split())",
"def part2(fname: dict) -> int:\n return sum(len(set.intersection(*[set(pax) for pax in group])) for group in get_data(fname))",
"def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)",
"def count():",
"def count(train_dir):\r\n path = train_dir\r\n count = 0\r\n for fn in os.listdir(path): #fn 表示的是文件名\r\n count = count + 1\r\n return count",
"def get_counts_from_kmer_list(filenames_lst, alphabet, kmin, kmax):\n # initialize the array container\n dic_list = []\n # iterates through the file paths\n for filename in filenames_lst:\n # get the sequences and ids\n for n, seq in parse_fasta(filename):\n # append the counts to the array\n dic_list.append(count_kmers(seq, alphabet, kmin, kmax))\n return dic_list",
"def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))",
"def num_iters(docs_file):\n\n with open(docs_file, 'r') as docs:\n num_docs = len(docs.readlines())\n num_iters = num_docs/1000 + 1\n\n return num_iters",
"def count_entries(path, dedupe=True):\n if dedupe:\n return len(get_identities(path))\n else:\n if not os.path.exists(path):\n return 0\n with open(path, 'rb') as f:\n return sum(1 for line in f)",
"def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts",
"def countsubcatchments(inputfilename=FileSettings.settingsdict['inputfilename']):\r\n global count\r\n with open(inputfilename, 'r') as swmmput:\r\n contents = swmmput.readlines()\r\n count = len(contents)\r\n return(count)",
"def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2",
"def num_instances_msp(infile_name):\n\tinfile = open(infile_name)\n\tnum_instances = 0\n\tfor line in infile:\n\t\tif line.startswith(\"Name: \"):\n\t\t\tnum_instances += 1\n\treturn(num_instances)",
"def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes"
]
| [
"0.73698807",
"0.69172156",
"0.6849723",
"0.6753836",
"0.6691487",
"0.66859126",
"0.6676495",
"0.6538376",
"0.6530189",
"0.6526704",
"0.646393",
"0.6354559",
"0.62536365",
"0.6241987",
"0.620828",
"0.61729515",
"0.6157252",
"0.6145035",
"0.6124907",
"0.61203504",
"0.6100982",
"0.60964227",
"0.60703796",
"0.60595006",
"0.6005677",
"0.6000554",
"0.5998103",
"0.599208",
"0.598097",
"0.59709525"
]
| 0.78231025 | 0 |
Implementation of DSK, kmer counting with very low memory algorithm Hashes each kmer and puts them into different files according to their hash values. By using target disk and memory spaces, determines how many distinct files should be used and in how many iterations the program needs to perform. | def dsk(file_name, k, n, capacity, error_rate, iters, parts, verbose=False):
if verbose:
start = time.time()
# Assign functions to local variables for performance improvement
hash_function = mmh3.hash
heap_pushpop = heapq.heappushpop
CHUNK_LIMIT = math.floor(capacity / 10) # write approximately in 10 calls
heap = []
for i in range(n):
heap.append((0, ''))
for it in range(iters): # iteration
if verbose:
start_iter = time.time()
print('Iteration#{} started.'.format(it + 1))
files = [open('{}'.format(j), 'w') for j in range(parts)] # open files
# Write to files in chunks to have less file.write calls
chunks = [[] for j in range(parts)]
# Assign functions to local variables for performance improvement
writers = [files[j].write for j in range(parts)]
chunk_appender = [chunks[j].append for j in range(parts)]
chunk_cleaner = [chunks[j].clear for j in range(parts)]
chunk_joiner = ''.join
with open(file_name, 'r') as f:
line_num = 0
for line in f:
if line_num % 4 == 1: # dna sequence
kmer_count = len(line) - k
for i in range(kmer_count):
kmer = line[i:i + k]
h = hash_function(kmer)
if h % iters == it: # belongs to this iteration
j = (h / iters) % parts
_j = int(j)
chunk_appender[_j](kmer + '\n')
if len(chunks[_j]) == CHUNK_LIMIT:
# write to file
writers[_j](chunk_joiner(chunks[_j]))
chunk_cleaner[_j]()
line_num += 1
# Write remaining kmers
for j in range(parts):
writers[j](chunk_joiner(chunks[j]))
for f in files:
f.close() # close files
del chunks
if verbose:
end_disk_write = time.time()
print('Disk write is completed in {:.2f} seconds.'.format(
end_disk_write - start_iter
))
for j in range(parts):
bf = BloomFilter(capacity, error_rate, 'kmer_bf')
kmer_counter = defaultdict(lambda: 1)
# Assign functions to local variables for performance improvement
add_to_bf = bf.add
if verbose:
start_partition = time.time()
print('Reading partition#{} started.'.format(j + 1))
with open(str(j), 'r') as f:
for kmer in f:
if kmer not in bf: # not in Bloom Filter
add_to_bf(kmer)
else: # in Bloom Filter
kmer_counter[kmer] += 1
if verbose:
end_partition = time.time()
print('Reading partition#{} is completed '.format(j + 1) +
'in {:.2f} seconds.'.format(
end_partition - start_partition))
start_populate = time.time()
print('Populating the heap...')
for kmer, count in kmer_counter.items():
# insert to the heap if count is bigger than minimum
if count > heap[0][0]:
heap_pushpop(heap, (count, kmer.rstrip()))
if verbose:
end_populate = time.time()
print('Heap is populated in {:.2f} seconds.'.format(
end_populate - start_populate
))
os.remove(str(j))
os.remove('kmer_bf')
if verbose:
end = time.time()
print('DSK Duration: {:.2f} seconds.'.format(end - start))
return heap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bf_counter(file_name, k, n, capacity, error_rate, verbose=False):\n if verbose:\n start = time.time()\n print('BFCounter started.')\n\n heap = []\n for i in range(n):\n heap.append((0, ''))\n\n bf = BloomFilter(capacity, error_rate, 'kmer_bf')\n\n kmer_counter = defaultdict(lambda: 1)\n\n # Assign functions to local variables for performance improvement\n add_to_bf = bf.add\n heap_pushpop = heapq.heappushpop\n\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n kmer_count = len(line) - k\n for i in range(kmer_count):\n kmer = line[i:i + k]\n if kmer not in bf: # not in Bloom Filter\n add_to_bf(kmer)\n else: # in Bloom Filter\n kmer_counter[kmer] += 1\n line_num += 1\n if verbose:\n end_hash = time.time()\n hash_table_size = sys.getsizeof(kmer_counter) / (1024 ** 2)\n print('Hash table is created in {:.2f} seconds.'.format(\n end_hash - start))\n print('Hash table size: {:.2f} MB.'.format(hash_table_size))\n start_populate = time.time()\n print('Populating the heap...')\n\n for count, kmer in kmer_counter.items():\n # insert to the heap if count is bigger than minimum\n if count > heap[0][0]:\n heap_pushpop(heap, (count, kmer))\n\n if verbose:\n end_populate = time.time()\n print('Heap is populated in {:.2f} seconds.'.format(\n end_populate - start_populate\n ))\n\n os.remove('kmer_bf')\n if verbose:\n end = time.time()\n print('BFCounter is completed in {:.2f} seconds.'.format(end - start))\n\n return heap",
"def ksegment(options_dict):\n\n print(datetime.now())\n\n random.seed(options_dict[\"rnd_seed\"])\n np.random.seed(options_dict[\"rnd_seed\"])\n\n # Set output pickle filename\n hasher = hashlib.md5(repr(sorted(options_dict.items())).encode(\"ascii\"))\n hash_str = hasher.hexdigest()[:10]\n model_dir = path.join(options_dict[\"model_dir\"], hash_str)\n if not os.path.isdir(model_dir):\n os.makedirs(model_dir)\n\n options_dict_fn = path.join(model_dir, \"options_dict.pkl\")\n print(\"Writing: \" + options_dict_fn)\n with open(options_dict_fn, \"wb\") as f:\n pickle.dump(options_dict, f, -1)\n print(\"Options: \" + str(options_dict))\n\n print(\"Reading from directory: \" + options_dict[\"data_dir\"])\n landmarks_dict_fn = path.join(options_dict[\"data_dir\"], \"landmarks.pkl\")\n dense_embeddings_fn = path.join(options_dict[\"data_dir\"], \"dense_embeddings.npz\")\n vec_ids_dict_fn = path.join(options_dict[\"data_dir\"], \"vec_ids.pkl\")\n durations_dict_fn = path.join(options_dict[\"data_dir\"], \"durations.pkl\")\n # phone_gt_dict_fn = path.join(data_dir, \"phone_gt.pkl\")\n with open(landmarks_dict_fn, \"rb\") as f:\n landmarks_dict = pickle.load(f)\n dense_embeddings = dict(np.load(dense_embeddings_fn))\n with open(vec_ids_dict_fn, \"rb\") as f:\n vec_ids_dict = pickle.load(f)\n with open(durations_dict_fn, \"rb\") as f:\n durations_dict = pickle.load(f)\n print(\"No. of utterances: \" + str(len(landmarks_dict)))\n\n n_landmarks = sum([len(i) for i in landmarks_dict.values()])\n print(\"No. of landmarks: \" + str(n_landmarks))\n if \"landmarks\" in str(options_dict[\"K_max\"]):\n # The number of components are set as a proportion of landmarks\n proportion = float(options_dict[\"K_max\"].replace(\"landmarks\", \"\"))\n K_max = int(np.floor(proportion * n_landmarks))\n else:\n K_max = int(options_dict[\"K_max\"])\n print(\"K_max: \" + str(K_max))\n\n D = dense_embeddings[dense_embeddings.keys()[0]].shape[1]\n print(\"Embedding dimensionality: \" + str(D))\n\n print(datetime.now())\n print(\"Normalizing embeddings\")\n n_embeds = 0\n for utt in dense_embeddings:\n for i in range(dense_embeddings[utt].shape[0]):\n n_embeds += 1\n cur_embed = dense_embeddings[utt][i, :]\n norm = np.linalg.norm(cur_embed)\n assert norm != 0.\n dense_embeddings[utt][i, :] = cur_embed / np.linalg.norm(cur_embed)\n print(\"No. of embeddings: \" + str(n_embeds))\n\n # Setup model\n print(\"Setting up model\")\n ksegmenter = eskmeans_wordseg.ESKmeans(\n K_max=K_max,\n embedding_mats=dense_embeddings, vec_ids_dict=vec_ids_dict,\n durations_dict=durations_dict, landmarks_dict=landmarks_dict,\n p_boundary_init=options_dict[\"p_boundary_init\"],\n n_slices_min=options_dict[\"n_slices_min\"],\n n_slices_max=options_dict[\"n_slices_max\"],\n min_duration=options_dict[\"min_duration\"],\n init_assignments=options_dict[\"init_am_assignments\"],\n wip=options_dict[\"wip\"]\n )\n\n # Initialize acoustic model by training for a few iterations\n if options_dict[\"init_am_n_iter\"] > 0:\n print(\"Performing initial K-means iterations\")\n am_init_record = ksegmenter.acoustic_model.fit(\n options_dict[\"init_am_n_iter\"], consider_unassigned=False\n )\n fn = path.join(model_dir, \"am_init_record_dict.pkl\")\n print(\"Writing: \" + fn)\n with open(fn, \"wb\") as f:\n pickle.dump(am_init_record, f, -1)\n\n # Perform segmentation\n if options_dict[\"segment_n_iter\"] > 0:\n if options_dict[\"n_cpus\"] > 1:\n segmenter_record = ksegmenter.segment_parallel(\n options_dict[\"segment_n_iter\"],\n options_dict[\"n_iter_inbetween_kmeans\"],\n n_cpus=options_dict[\"n_cpus\"],\n n_batches=options_dict[\"n_batches\"]\n )\n else:\n segmenter_record = ksegmenter.segment(\n options_dict[\"segment_n_iter\"],\n options_dict[\"n_iter_inbetween_kmeans\"]\n )\n\n fn = path.join(model_dir, \"segmenter_record_dict.pkl\")\n print(\"Writing: \" + fn)\n with open(fn, \"wb\") as f:\n pickle.dump(segmenter_record, f, -1)\n\n # Obtain clusters and landmarks (frame indices)\n unsup_transcript = {}\n unsup_landmarks = {}\n unsup_landmark_indices = {}\n for i_utt in xrange(ksegmenter.utterances.D):\n utt = ksegmenter.ids_to_utterance_labels[i_utt]\n unsup_transcript[utt] = ksegmenter.get_unsup_transcript_i(i_utt)\n if -1 in unsup_transcript[utt]:\n print(\n \"Warning: Unassigned cuts in: \" + utt + \" (transcript: \" + str(unsup_transcript[utt]) + \")\"\n )\n unsup_landmarks[utt] = ksegmenter.utterances.get_segmented_landmarks(i_utt)\n unsup_landmark_indices[utt] = ksegmenter.utterances.get_segmented_landmark_indices(i_utt)\n fn = path.join(model_dir, \"clusters_landmarks.pkl\")\n print(\"Writing: \" + fn)\n with open(fn, \"wb\") as f:\n pickle.dump(unsup_transcript, f, -1)\n pickle.dump(unsup_landmarks, f, -1)\n pickle.dump(unsup_landmark_indices, f, -1)\n\n # Write model\n fn = path.join(model_dir, \"eskmeans.pkl\")\n print(\"Writing: \" + fn)\n with open(fn, \"wb\") as f:\n ksegmenter.save(f)\n\n print(\"Model directory: \" + model_dir)\n\n print(datetime.now())",
"def FSC2(input_dir, num_reps=50, min_sims=100000, max_ecm=20, calc_CI=False, numcores=1, scratch_mb='200', time_scratch=\"01:50:00\", mem=\"200\", print1=False, overwrite=\"None\", fsc2_path=\"/storage/plzen1/home/holcovam/programs/fsc26_linux64/fsc26\"):\n Data_Files = []\n tpl_files = []\n est_files = []\n CI_Data_Files = []\n shlist = []\n\n if input_dir.endswith(\"/\") is False:\n input_dir += \"/\"\n\n for path in os.listdir(input_dir):\n if os.path.isdir(input_dir + path) and path.startswith(\"FSC2input\"):\n samp_name = path.split(\"_\")[1]\n #folder_name = samp_name\n if samp_name + \"_DSFS.obs\" in os.listdir(input_dir + path):\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + samp_name + \"_DSFS.obs\") as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n Data_Files.append(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\")\n else:\n print(\"Did not find input data file for: \", samp_name)\n if calc_CI == \"True\":\n num_files = 0\n for file in os.listdir(input_dir + path):\n if file.endswith(\"_DSFS.obs\") and file.split(\"_\")[-2].split(\".\")[-1][0:3] == \"rep\" and file != samp_name + \"_DSFS.obs\":\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + file) as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n CI_Data_Files.append(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\")\n num_files += 1\n if len(CI_Data_Files) < 1:\n print(\"Did not find bootstrap replicates for: \", samp_name)\n else:\n print(\"Found \", num_files, \" replicate dsfs files for CI calculation for \", samp_name)\n if path.endswith(\".tpl\"):\n tpl_files.append(path)\n est_files.append(path.split(\".\")[0])\n if len(tpl_files) == 0:\n print(\"Did not find any tpl files!! Aborting!!\")\n else:\n if calc_CI == \"True\":\n Data_Files = CI_Data_Files\n for file in Data_Files:\n name = file.split(\"_DSFS\")[0]\n samp_name = name.split(\"/\")[-1]\n folder_name = samp_name [0:11]\n for tpl in tpl_files:\n tpl_name = tpl.split(\".tpl\")[0]\n if os.path.isdir(name + \"_\" + tpl_name) is False or overwrite == \"hard\":\n new_tpl = open(name + \"_\" + tpl_name + \".tpl\", 'w')\n new_data = open(name + \"_\" + tpl_name + \"_DSFS.obs\", 'w')\n\n with open(file, 'r') as data:\n for i, line in enumerate(data):\n if i == 1:\n pop_info = line.strip(\"\\n\").strip(\"\\t\").split(\"\\t\")\n pop_num = int(pop_info[0])\n samp_nums = pop_info[-pop_num:]\n new_data.write(line)\n with open(input_dir + tpl, 'r') as template:\n samp_num_lines = pop_num + 4\n for i, line in enumerate(template):\n if i < samp_num_lines:\n new_tpl.write(line)\n elif i == samp_num_lines:\n for num in samp_nums:\n new_tpl.write(num + \"\\n\")\n elif i >= samp_num_lines + len(samp_nums):\n new_tpl.write(line)\n new_est = open(name + \"_\" + tpl_name + \".est\", 'w')\n try:\n with open(input_dir + tpl_name + \".est\") as est:\n for line in est:\n new_est.write(line)\n except FileNotFoundError:\n print(\"Did not find est file for: \", tpl)\n #folder_name = samp_name ''.join(i for i in s if not i.isdigit())\n shname = name + \"_\" + tpl_name + \".sh\"\n shfile5 = open(shname, 'w')\n shfile5.write('#!/bin/bash -e\\n' +\n '#PBS -N '+samp_name+'\\n' +\n '#PBS -l walltime='+str(time_scratch)+'\\n' +\n '#PBS -l select=1:ncpus='+str(numcores)+':mem='+str(mem)+'mb:scratch_local='+str(scratch_mb)+'mb\\n' +\n '#PBS -m abe\\n' +\n '#PBS -j oe\\n\\n' +\n 'module add python-3.4.1-gcc\\n'+\n 'module add python34-modules-gcc\\n'+\n 'trap \\'clean_scratch\\' TERM EXIT\\n'+\n 'if [ ! -d \"$SCRATCHDIR\" ] ; then echo \"Scratch not created!\" 1>&2; exit 1; fi \\n' +\n 'DATADIR=\"/storage/plzen1/home/holcovam/ScanTools\"\\n' +\n 'cp $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+ \"/\" + samp_name + \"_\" + tpl_name + '* $SCRATCHDIR || exit 1\\n'+\n 'cp '+fsc2_path+' $SCRATCHDIR || exit 1\\n'+\n 'cd $SCRATCHDIR || exit 2\\n' +\n 'echo data loaded at `date`\\n\\n' +\n 'chmod +x fsc26 \\n' +\n #'ls -l \\n' +\n './fsc26 -t ' + samp_name + \"_\" + tpl_name + '.tpl -e ' + samp_name + \"_\" + tpl_name + '.est -n ' + str(min_sims) + ' -u -d -q -L ' + str(max_ecm) + ' -M \\n' + \n 'rm seed.txt \\n'+\n 'rm fsc26\\n'+\n 'rm *DSFS.obs\\n'+\n 'rm *.sh\\n'+\n 'rm *.tpl \\n'+\n 'rm *.est \\n'+\n #'ls -l \\n' +\n 'cp $SCRATCHDIR/*.par $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+' || exit 1\\n'+\n 'rm *.par \\n'+\n 'cp -r $SCRATCHDIR/* $DATADIR/'+input_dir+' || export CLEAN_SCRATCH=false\\n'+\n 'printf \"\\\\nFinished\\\\n\\\\n\"\\n')\n shfile5.close()\n shlist.append(shname)\n\n############IF PROBLEM WITH EXCESS OF NONCONVERGED CHAINS, COPY /home/majda/alpine/fastsimcoal2/afterWPSG/scripts/notConverged.py here ###################\n\n else:\n print(\"Output for \" + samp_name + \"_\" + tpl_name + \" already exists. Use hard_overwrite = True to overwrite.\")\n return shlist",
"def chunk(wb_run,sample_run,ei_guess,rebin,mapingfile,nchunk,**kwargs):\n global reducer,rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n \n reducer.energy_bins = rebin\n \n mon_list1=reducer.ei_mon_spectra\n mon_list2=reducer.mon1_norm_spec\n mon_list1.append(mon_list2)\n #mon_list1.sort()\n print 'Monitors for this chunk are: ',mon_list1\n # monitors for merlin[69634,69638]\n \n if inst_name == 'MER':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=69632\n spectrum_start=1\n if inst_name == 'MAP':\n #number of spectrums per instrument and where the detectors start (i.e. 5 for mari but 1 for merlin)\n numspec=41472\n spectrum_start=1\n \n if kwargs.has_key('det_cal_file'):\n cal_file = kwargs.get('det_cal_file') \n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n \n reducer.det_cal_file =None\n reducer.relocate_dets = False\n nums=range(spectrum_start,numspec,nchunk)\n output_wkspName=wksp_out\n for i in nums:\n print '=========================================================================='\n print 'start spectra for this chunk',i\n chunk=range(i,i+nchunk)\n endIndex=nchunk-1\n if i+nchunk > numspec:\n chunk=range(i,numspec+1)\n endIndex=len(chunk)-1\n print 'end spectra for this chunk ', i+endIndex\n \n speclist=mon_list1+chunk\n #print speclist\n LoadRaw(Filename=wb_run,OutputWorkspace=\"wb_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n LoadRaw(Filename=sample_run,OutputWorkspace=\"run_wksp\",LoadLogFiles=\"0\",SpectrumList=speclist)\n \n tmp=arb_units(\"wb_wksp\",\"run_wksp\",ei_guess,rebin,'none_for_this_run_type',one2one=True,bleed=False,**kwargs)\n \n \n DeleteWorkspace(Workspace=\"wb_wksp\")\n DeleteWorkspace(Workspace=\"run_wksp\")\n #DeleteWorkspace(\"_wksp.spe\")\n #DeleteWorkspace(\"_wksp.spe-white\")\n \n if i == spectrum_start:\n #crop the workspace to remove the monitors, the workpsace seems sorted on specnumber so this is ok for instruments where the monitors are at the end of the \n # spectrum list\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=wksp_out,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n else:\n CropWorkspace(InputWorkspace=tmp,OutputWorkspace=tmp,StartWorkSpaceIndex=0,EndWorkSpaceIndex=endIndex)\n ConjoinWorkspaces(InputWorkspace1=wksp_out,InputWorkspace2=tmp,CheckOverlapping='0')\n print int(((float(i+endIndex))/float(numspec))*100),'% complete'\n print '===============================================================================' \n \n GroupDetectors(InputWorkspace=output_wkspName,OutputWorkspace=output_wkspName,MapFile=mapingfile)\n\n \n \n print 'Elapsed time =',time.time()-start_time, 's'\n return mtd[wksp_out]",
"def estimate_map_output_materialized_bytes(num_words, num_reducers, key_num_bytes, value_num_bytes):\n SPILL_FILE_PARTITION_INDICATOR_NUM_BYTES = 6\n\n return (num_words * (zero_compress.size_of_zero_compressed_int64(key_num_bytes) +\n key_num_bytes +\n zero_compress.size_of_zero_compressed_int64(value_num_bytes) +\n value_num_bytes) +\n (SPILL_FILE_PARTITION_INDICATOR_NUM_BYTES * num_reducers))",
"def findTopKViaSharding(filename, k, shards):\n \n # First, we calculate the number of lines for each shard\n filelength = findFLength(filename)\n shardsize = int(np.ceil(filelength / shards))\n \n # Now we split the input file into shards\n splitFileIntoShards(filename, shardsize)\n sleep(1) #might change this to sleep(shards/100) if 1 second is too little\n \n # Some Initial Set-Up...\n filenames = [\"x{:04d}_shard\".format(i) for i in range(shards)]\n totaldict = Counter()\n #shardtimes = []\n \n # Iterate through the Shard Files\n # In each Shard, run countWords(shardfilename) to get a wordcount Counter\n # Update the overall Counter in totaldict with more wordcounts from the Shard\n # Delete the Shard File when we're done with it.\n for f in filenames:\n #print(\"Running Shard {}...\".format(f), end=' ')\n #startf = time()\n \n try:\n wordcounts = countWords(f)\n totaldict.update(wordcounts)\n deleteSingleFile(f)\n #print(\"Shard {} Complete.\".format(f))\n except OSError as e:\n print(\"Something unexpected happened in the OS. Error: {}. Moving on...\".format(e))\n \n #endf = time()\n #shardtimes.append(endf - startf)\n \n #print(\"\\nDone Sharding\")\n #deleteShards()\n #print(\"Average Shard Processing Time for K={} and Shards={} is: {}\".format(k, shards, np.mean(shardtimes)))\n #print(\"Average Shard Processing Time for K={} and Shards={} is: {}\".format(k, shards, np.std(shardtimes)))\n \n # All Shard Processing is complete. Return the Top K words from the overall Counter\n return totaldict.most_common(k)",
"def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)",
"def eval_kd_cluster(kmers):\n shuffle(kmers)\n search_set, kmers = kmers[:1000], kmers[1000:]\n for H in [0.15, 0.125, 0.1, 0.075, 0.05]:\n kdrft_cover = KDRFTCover(H)\n\n start = clock()\n for kmer in kmers:\n kdrft_cover.add(kmer)\n kdrft_cover.greedy_clusters()\n build_time = clock() - start\n\n stats = kdrft_cover.stats()\n stats['build_time'] = build_time\n stats['H'] = H\n\n start = clock()\n for kmer in search_set:\n kdrft_cover.search(kmer, 2)\n stats['search_time'] = (clock() - start)\n yield stats",
"def count_kmers(file_name, k, verbose=False):\n if verbose:\n start = time.time()\n print('Counting kmers in {}'.format(file_name))\n total_kmers = 0\n with open(file_name, 'r') as f:\n line_num = 0\n for line in f:\n if line_num % 4 == 1: # dna sequence\n total_kmers += len(line) - k # eliminate new-line\n line_num += 1\n if verbose:\n end = time.time()\n print('{} kmers are counted in {:.2f} seconds'.format(\n total_kmers, end - start))\n return total_kmers",
"def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs",
"def quant(input_folder,fastq_dict,species_kmers,output_folder,transcriptome_folder,bsub_out=\"bsub_out\"):\n print(\"Starting new quantification run for batch of %d samples from %s\"%(len(fastq_dict),input_folder))\n mkdir_p(bsub_out)\n #no subfolders needed for SRA data\n print(\"bsub logs stored in %s folder\"%bsub_out)\n mkdir_p(output_folder)\n print(\"kallisto output in %s\"%output_folder)\n for i in fastq_dict:\n print(\"===processing fastq files from sample ID: %s===\"%i)\n outdir = path.join(output_folder,i) #separate folder for each fastq, within the output folder\n mkdir_p(outdir)\n cmd = kw.CMD_BASE.format(fastq_id=i,bsub_out=bsub_out)\n cmd = shlex.split(cmd) #convert to list of arguments\n species = fastq_dict[i][\"species\"]\n t_index = path.join(transcriptome_folder,kw.species2transcriptomeindex(species,kmer_size=species_kmers[species]))\n f1 = [path.join(input_folder,r+\"_1.fastq.gz\") for r in fastq_dict[i][\"fastq_list\"]]\n if fastq_dict[i][\"is_paired_end\"]:\n f2 = [path.join(input_folder,r+\"_2.fastq.gz\") for r in fastq_dict[i][\"fastq_list\"]]\n flist = \" \".join(imap(lambda x,y: x+\" \"+y,f1,f2))\n cmd.append(\"kallisto quant -i {ti} -o {out} {flist}\".format(ti=t_index,out=outdir,flist = flist))\n else: #case of single end reads\n flen = median(array(fastq_dict[i][\"avgLengths\"]))\n flist = \" \".join(f1)\n cmd.append(\"kallisto quant --single -i {ti} -o {out} -l {flen} -s {fsd} {flist}\".format(ti=t_index,out=outdir,flen=flen,fsd=flen/5.0,flist = flist))\n #note, fsd is the standard deviation of the fragment length distribution. flen/5 is just a placeholder. We should actually estimate this in the future!\n #print(cmd)\n subprocess.call(cmd)",
"def create_input_chunks_distributed(cs, partition, data_dir, file_format):\n if not file_format == \"HDF5\":\n print(\"File format not supported yet. Aborting...\")\n sys.exit(1)\n\n for i in range(6):\n for filename in os.listdir('/disk' + str(i) + '/gtimothee'):\n if filename.endswith(\".json\") or filename.endswith(\".hdf5\"):\n os.remove(os.path.join('/disk' + str(i) + '/gtimothee', filename))\n print(f\"Creating input chunks...\")\n\n disk_index = 0\n repartition_dict = dict()\n\n for i in range(partition[0]):\n for j in range(partition[1]):\n for k in range(partition[2]):\n print(f\"Creating random array... shape: {cs}\")\n arr = da.random.uniform(size=cs)\n print(f\"Done, converting to float16...\")\n arr = arr.astype(np.float16)\n out_filename = f'{i}_{j}_{k}.hdf5'\n print(f\"Building {out_filename} with shape {cs}\")\n data_dirpath = os.path.join('/disk' + str(disk_index), 'gtimothee')\n outfilepath = os.path.join(data_dirpath, out_filename)\n print(f\"Storing on {data_dirpath}...\")\n da.to_hdf5(outfilepath, '/data', arr, chunks=None, compression=None)\n\n repartition_dict[str((i,j,k))] = outfilepath\n\n disk_index += 1\n if disk_index == 6:\n disk_index = 0\n\n print(f\"Writing repartition file...\")\n json_file = os.path.join('/disk0', 'gtimothee', 'repartition_dict.json')\n if os.path.isfile(json_file):\n os.remove(json_file)\n\n with open(json_file, 'w+') as outfile:\n json.dump(repartition_dict, outfile)",
"def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)",
"def training_by_counting(K, D, X, Z):\n init_probs = [ 0 for i in range(K) ]\n trans_probs = [ [ 0 for i in range(K) ] for j in range(K) ]\n emi_probs = [ [ 0 for i in range(D) ] for j in range(K) ] \n occurrences_z = [0 for i in range(K)]\n occurrences_x = [0 for i in range(D)]\n \n for x,z in zip(X,Z):\n z_indices = translate_path_to_indices(z)\n x_indices = translate_observations_to_indices(x)\n\n num_sequences = len(z_indices)/3\n\n for i in range(0,len(z_indices),3):\n init_probs[z_indices[i]] += 1\n\n for j in range(K):\n init_probs[j] = init_probs[j] / num_sequences\n\n trans, emi = count_transitions_and_emissions(K,D,x_indices,z_indices)\n \n for i in range(K):\n for j in range(K):\n trans_probs[i][j] += trans[i][j]\n \n for i in range(K):\n for j in range(D):\n emi_probs[i][j] += emi[i][j]\n \n for i in range(len(z_indices)):\n occurrences_z[z_indices[i]] +=1\n\n for i in range(len(x_indices)):\n occurrences_x[x_indices[i]] += 1\n \n for i in range(K):\n for j in range(K):\n trans_probs[i][j] /= occurrences_z[j]\n\n for i in range(K):\n for j in range(D):\n emi_probs[i][j] /= occurrences_x[j]\n\n my_hmm = hmm(init_probs,trans_probs,emi_probs)\n print(init_probs)\n print(\"\\n\",trans_probs)\n print(\"\\n\",emi_probs)\n return my_hmm",
"def _get_sho_chunk_sizes(self, max_mem_mb):\n # Step 1: Find number of FORC cycles and repeats (if any), DC steps, and number of loops\n # dc_offset_index = np.argwhere(self._sho_spec_inds.attrs['labels'] == 'DC_Offset').squeeze()\n num_dc_steps = np.unique(self._sho_spec_inds[self._fit_spec_index, :]).size\n all_spec_dims = list(range(self._sho_spec_inds.shape[0]))\n all_spec_dims.remove(self._fit_spec_index)\n\n # Remove FORC_cycles\n sho_spec_labels = self.h5_main.spec_dim_labels\n has_forcs = 'FORC' in sho_spec_labels or 'FORC_Cycle' in sho_spec_labels\n if has_forcs:\n forc_name = 'FORC' if 'FORC' in sho_spec_labels else 'FORC_Cycle'\n try:\n forc_pos = sho_spec_labels.index(forc_name)\n except Exception:\n raise\n # forc_pos = np.argwhere(sho_spec_labels == forc_name)[0][0]\n self._num_forcs = np.unique(self._sho_spec_inds[forc_pos]).size\n all_spec_dims.remove(forc_pos)\n\n # Remove FORC_repeats\n has_forc_repeats = 'FORC_repeat' in sho_spec_labels\n if has_forc_repeats:\n try:\n forc_repeat_pos = sho_spec_labels.index('FORC_repeat')\n except Exception:\n raise\n # forc_repeat_pos = np.argwhere(sho_spec_labels == 'FORC_repeat')[0][0]\n self._num_forc_repeats = np.unique(self._sho_spec_inds[forc_repeat_pos]).size\n all_spec_dims.remove(forc_repeat_pos)\n\n # calculate number of loops:\n if len(all_spec_dims) == 0:\n loop_dims = 1\n else:\n loop_dims = get_dimensionality(self._sho_spec_inds, all_spec_dims)\n loops_per_forc = np.product(loop_dims)\n\n # Step 2: Calculate the largest number of FORCS and positions that can be read given memory limits:\n size_per_forc = num_dc_steps * loops_per_forc * len(self.h5_main.dtype) * self.h5_main.dtype[0].itemsize\n \"\"\"\n How we arrive at the number for the overhead (how many times the size of the data-chunk we will use in memory)\n 1 for the original data, 1 for data copied to all children processes, 1 for results, 0.5 for fit, guess, misc\n \"\"\"\n mem_overhead = 3.5\n max_pos = int(max_mem_mb * 1024 ** 2 / (size_per_forc * mem_overhead))\n if self._verbose:\n print('Can read {} of {} pixels given a {} MB memory limit'.format(max_pos,\n self._sho_pos_inds.shape[0],\n max_mem_mb))\n self.max_pos = int(min(self._sho_pos_inds.shape[0], max_pos))\n self.sho_spec_inds_per_forc = int(self._sho_spec_inds.shape[1] / self._num_forcs / self._num_forc_repeats)\n self.metrics_spec_inds_per_forc = int(self._met_spec_inds.shape[1] / self._num_forcs / self._num_forc_repeats)\n\n # Step 3: Read allowed chunk\n self._sho_all_but_forc_inds = list(range(self._sho_spec_inds.shape[0]))\n self._met_all_but_forc_inds = list(range(self._met_spec_inds.shape[0]))\n if self._num_forcs > 1:\n self._sho_all_but_forc_inds.remove(forc_pos)\n met_forc_pos = np.argwhere(get_attr(self._met_spec_inds, 'labels') == forc_name)[0][0]\n self._met_all_but_forc_inds.remove(met_forc_pos)\n\n if self._num_forc_repeats > 1:\n self._sho_all_but_forc_inds.remove(forc_repeat_pos)\n met_forc_repeat_pos = np.argwhere(get_attr(self._met_spec_inds, 'labels') == 'FORC_repeat')[0][0]\n self._met_all_but_forc_inds.remove(met_forc_repeat_pos)\n\n return",
"def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')",
"def part1():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n return sum(size for size in all_sizes if size <= 100000)",
"def kmer_frequencies(kmertable_all, kmertable_filtered, kmertable_nonDT_hi, kmertable_nonDT_lo, data_mm, codon_seqs):\n\n def codon_bgfreq(codon_seqs, data_mm):\n \"\"\"\n get codon background frequencies from mRNA seqs\n seqs: dictionary of yeast mRNA sequences\n data_mc: dictionary of multi-mapping boolean\n \"\"\"\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts\n\n\n def codonfreqs_kmerdf(kmertable):\n \"\"\"\n get codon frequencies from kmertable\n \"\"\" \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)\n\n #kmertable_threshold = kmertable_all[kmertable_all['threshold']==1]\n kmertable_all2 = kmertable_all[kmertable_all['threshold']==0]\n\n\n cc_bg = codon_bgfreq(codon_seqs, data_mm)\n cc_all = codonfreqs_kmerdf(kmertable_all2)\t\t\t# without hits\n cc_theta = codonfreqs_kmerdf(kmertable_filtered)\n cc_nDT_hi = codonfreqs_kmerdf(kmertable_nonDT_hi) # min 16 max 4 at 1090\n cc_nDT_lo = codonfreqs_kmerdf(kmertable_nonDT_lo) # min 16 max 4 at 1090\n\n output = pd.DataFrame({'codon': list(codons_nonstop), \n 'kmer_theta': list(cc_theta), \n 'redundant': list(cc_all), \n 'background': list(cc_bg),\n 'nDThi': list(cc_nDT_hi),\n 'nDTlo': list(cc_nDT_lo) } ) \n output.to_csv(\"../data/figures/figure3/kmer_frequencies.txt\", header=True, index=False, sep='\\t')\n\n return output",
"def _cluster_k_medoids_minibatch(self, num_variants, tolerance, batch_size, cache, max_cycles):\n avail_medoid_indices = [self.index[name] for name in self.tree.get_ordered_names() if name in self.available]\n chsn_indices = [self.index[n] for n in self.chosen]\n num_chsn = len(chsn_indices)\n dists = self._transform_distances(tolerance)\n # This spaces the initial centroids randomly around the tree\n seq_chunk = len(avail_medoid_indices) // (num_variants - num_chsn)\n rand_inds = []\n for i in range(num_variants - num_chsn):\n rand_inds.append(avail_medoid_indices[random.randint(i*seq_chunk, (i+1)*seq_chunk-1)])\n best_med_inds = np.array(chsn_indices + rand_inds)\n # Initial random sets\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n best_score = sum(best_scores)\n # Using a simple greedy algorithm, typically converges after 2-5 iterations.\n num_cycles = 0\n improvement = True\n while improvement == True:\n improvement = False\n med_inds = best_med_inds.copy()\n if len(avail_medoid_indices) > batch_size:\n avail_minibatch_inds = random.sample(avail_medoid_indices, batch_size)\n else:\n avail_minibatch_inds = avail_medoid_indices\n for i in range(num_chsn, num_variants):\n for ind in avail_minibatch_inds:\n if ind in med_inds: continue\n med_inds[i] = ind\n score = self._score_pattern(med_inds, dists)\n if score < best_score:\n best_score = score\n best_med_inds[i] = ind\n improvement = True\n else:\n med_inds[i] = best_med_inds[i]\n num_cycles += 1\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n break\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n improvement = False\n break\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n return best_med_inds, best_scores",
"def pass1(self, verbose):\n \n for root, dirs, files in os.walk(self.dir_to_check, topdown=False):\n t_size = 0\n for f in files:\n new_f = os.path.join(root,f) #complete path in case of homonyms\n size = os.path.getsize(new_f)\n t_size += size\n self.cache[new_f] = HumanReadableSize(size)\n t_size += sum ([self.cache[os.path.join(root,d)].val for d in dirs])\n self.cache[root] = HumanReadableSize(t_size)\n if verbose:\n print ('.................... Computing size of {}!'.format(root))\n \n #print (self.cache) #debugging",
"def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''",
"def main():\n\n \"\"\"\n nodes, hd3 = erdos_rennie_like(100,8333,5)\n export('d3',hd3)\n\n nodes, hd5 = erdos_rennie_like(100,8333,6)\n export('d5',hd5)\n\n nodes, hd6 = erdos_rennie_like(100,8333,7)\n export('d6',hd6)\n \"\"\"\n\n \"\"\"\n nodes, sparse1 = erdos_rennie_like(600, 1200, 3)\n export('sparse_diag1', sparse1)\n\n nodes, sparse2 = erdos_rennie_like(600, 2400, 3)\n export('sparse_diag2',sparse2)\n\n nodes, sparse3 = erdos_rennie_like(600, 5800, 3)\n export('sparse_diag3',sparse3)\n\n nodes, sparse4 = erdos_rennie_like(600,11600, 3)\n export('sparse_diag4',sparse4)\n\n nodes, sparse5 = erdos_rennie_like(600,23200, 3)\n export('sparse_diag5',sparse5)\n \"\"\"\n\n nodes, size1 = erdos_rennie_like(100, 500, 3)\n nodes, size2 = erdos_rennie_like(200,1000,3)\n nodes,size3 = erdos_rennie_like(300,1500,3)\n nodes,size4 = erdos_rennie_like(400,2000,3)\n nodes,size5 = erdos_rennie_like(500,2500,3)\n\n export('size_diag1',size1)\n export('size_diag2',size2)\n export('size_diag3',size3)\n export('size_diag4',size4)\n export('size_diag5',size5)",
"def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter",
"def generate_set(input_path, output_path, size=200, layover=0.1, input_size=1000, thread_count=8):\n\n # Assuming that the files are located in the folders 'labels' and 'examples'\n label_paths = utils.get_file_paths(\"{}/labels\".format(input_path))\n example_paths = utils.get_file_paths(\"{}/examples\".format(input_path))\n\n # Defines the output path based on the size\n output_path = \"{0}/{1}x{1}\".format(output_path, size)\n\n export_path_example = \"{}/examples/\".format(output_path)\n export_path_label = \"{}/labels/\".format(output_path)\n\n # Make the path if it does not exist\n utils.make_path(export_path_example)\n utils.make_path(export_path_label)\n\n path_length = len(label_paths)\n\n q = Queue()\n for i in range(path_length):\n q.put(i)\n\n # Starts n threads\n for i in range(thread_count):\n # Create a new database connection for each thread.\n t = threading.Thread(\n target=work,\n args=(\n q,\n example_paths,\n label_paths,\n path_length,\n export_path_example,\n export_path_label,\n size,\n layover,\n input_size\n )\n )\n\n # Sticks the thread in a list so that it remains accessible\n t.daemon = True\n t.start()\n\n q.join()\n \n # Empty the console after progress print\n print(\"\")",
"def processFiles(fileName):\n print fileName\n count_t1 = 0\n inFile=open(fileName,'r')\n all_angleList = Counter()\n rep_angleList = Counter()\n all_lengthsList = Counter()\n maxDist_List = Counter()\n global xCord, yCord, zCord\n aminoAcidName={}\n xCord={}\n yCord={}\n zCord={}\n seq_number={}\n counter=0\n for i in inFile:\n if (i[0:6].rstrip()==\"NUMMDL\"):\n numOfModels=i[10:14].rstrip()\n if ((i[0:6].rstrip()==\"ENDMDL\")or (i[0:6].rstrip()=='TER')):\n break\n if (i[0:6].rstrip()==\"MODEL\" and int(i[10:14].rstrip())>1):\n break\n \n if(i[0:4].rstrip())==\"ATOM\" and(i[13:15].rstrip())==\"CA\" and(i[16]=='A'or i[16]==' ')and i[17:20]!= \"UNK\" :\n aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])\n xCord[counter]=(float(i[30:38]))\n yCord[counter]=(float(i[38:46]))\n zCord[counter]=(float(i[46:54]))\n seq_number[counter]=str(i[22:27])\n counter+=1\n\n protLen=len(yCord)\n initialLabel=[]\n sortedLabel=[]\n sortedIndex=[]\n outDist={}\n for m in range(0,3):\n initialLabel.append(0)\n sortedLabel.append(0)\n sortedIndex.append(0)\n\n for i in range(0,protLen-2):\n for j in range(i+1,protLen-1):\n for k in range(j+1, protLen):\n global i1,j1,k1\n i1=i\n j1=j\n k1=k\n keepLabelIndex={}\n keepLabelIndex[aminoAcidName[i]]=i\n keepLabelIndex[aminoAcidName[j]]=j\n keepLabelIndex[aminoAcidName[k]]=k\n initialLabel[0]=aminoAcidName[i]\n initialLabel[1]=aminoAcidName[j]\n initialLabel[2]=aminoAcidName[k]\n sortedLabel=list(initialLabel)\n sortedLabel.sort(reverse=True)\n\n #Perform Rule- based labelling\n\n if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):\n dist1_2Temp=calcDist(i,j)\n dist1_3Temp=calcDist(i,k)\n dist2_3Temp=calcDist(j,k)\n if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):\n indexOf0=i\n indexOf1=j\n indexOf2=k\n elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):\n indexOf0=i\n indexOf1=k\n indexOf2=j\n else:\n indexOf0=j\n indexOf1=k\n indexOf2=i\n elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]): \n for index_ in range(0,3):\n sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]\n indexOf0=sortedIndex[0]\n indexOf1=sortedIndex[1]\n indexOf2=sortedIndex[2]\n elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):\n indexOf2=keepLabelIndex[sortedLabel[2]]\n indices=indexFind(indexOf2,i,j,k)\n a=indexOf2\n b=indices[0]\n c=indices[1]\n dist1_3Temp=calcDist(b,a)\n dist2_3Temp=calcDist(c,a)\n if dist1_3Temp>=dist2_3Temp:\n indexOf0=indices[0]\n indexOf1=indices[1] \n else:\n indexOf0=indices[1]\n indexOf1=indices[0]\n elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):\n indexOf0=keepLabelIndex[sortedLabel[0]]\n indices=indexFind(indexOf0,i,j,k)\n if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):\n indexOf1=indices[0]\n indexOf2=indices[1] \n else:\n indexOf2=indices[0]\n indexOf1=indices[1]\n dist01=calcDist(indexOf0,indexOf1)\n s2=dist01/2\n dist02=calcDist(indexOf0,indexOf2)\n s1=dist02\n dist12=dist01\n dist03=calcDist(indexOf1,indexOf2)\n\n # All lengths calculation \n all_lengthsList[round(dist01,round_off_to)] += 1\n all_lengthsList[round(dist02,round_off_to)] += 1\n all_lengthsList[round(dist03,round_off_to)] += 1\n\n maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1\n\n s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2\n +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2\n +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5\n \n \n Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14\n if Theta1<=90:\n all_angleList[round(Theta1,round_off_to)] +=1\n rep_angleList[round(Theta1,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta1),round_off_to)] +=1\n rep_angleList[round(abs(180-Theta1),round_off_to)] +=1\n \n #if Theta1>90: \n # Theta1=abs(180-Theta1)\n #print 'Second Theta1, ',Theta1\n #Theta 2\n dist02=calcDist(indexOf1,indexOf0)\n s1=dist02\n dist01=calcDist(indexOf1,indexOf2)\n s2=dist01/2\n s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2\n +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2\n +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5\n \n Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 \n #if Theta2 > 90:\n # Theta2 = abs(180-Theta2)\n if Theta2<=90:\n all_angleList[round(Theta2,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta2),round_off_to)] +=1\n\n #Theta 3\n dist02=calcDist(indexOf2,indexOf1)\n s1=dist02\n dist01=calcDist(indexOf2,indexOf0)\n s2=dist01/2\n s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+\n ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+\n ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5\n \n Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 \n #if Theta3 > 90:\n # Theta3 = abs(180-Theta3)\n if Theta3<=90:\n all_angleList[round(Theta3,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta3),round_off_to)] +=1\n # Either writting output to a file or using dictionary or \n # counter will save you from memory exceptions in this case.\n #all_angleList[round(Theta1,round_off_to)] +=1\n #all_angleList[round(Theta2,round_off_to)] +=1\n #all_angleList[round(Theta3,round_off_to)] +=1\n\n #rep_angleList[round(Theta1,round_off_to)] +=1\n\n count_t1 = count_t1+1\n\n print 'count_t1:',count_t1\n\n return [all_angleList,rep_angleList,all_lengthsList,maxDist_List]",
"def main(seed, numpoints, dimensions, num_centres, fragments, mode, iterations,\n epsilon, arity, use_storage):\n start_time = time.time()\n\n # Generate the data\n fragment_list = []\n # Prevent infinite loops in case of not-so-smart users\n points_per_fragment = max(1, numpoints // fragments)\n\n for l in range(0, numpoints, points_per_fragment):\n # Note that the seed is different for each fragment.\n # This is done to avoid having repeated data.\n r = min(numpoints, l + points_per_fragment)\n\n fragment_list.append(\n generate_fragment(r - l, dimensions, mode, seed + l, use_storage)\n )\n\n compss_barrier()\n print(\"Generation/Load done\")\n initialization_time = time.time()\n print(\"Starting kmeans\")\n\n # Run kmeans\n centres = kmeans_frag(fragments=fragment_list,\n dimensions=dimensions,\n num_centres=num_centres,\n iterations=iterations,\n seed=seed,\n epsilon=epsilon,\n arity=arity)\n compss_barrier()\n print(\"Ending kmeans\")\n kmeans_time = time.time()\n\n # Run again kmeans (system cache will be filled)\n print(\"Second kmeans\")\n centres = kmeans_frag(fragments=fragment_list,\n dimensions=dimensions,\n num_centres=num_centres,\n iterations=iterations,\n seed=seed,\n epsilon=epsilon,\n arity=arity)\n compss_barrier()\n print(\"Ending second kmeans\")\n kmeans_2nd = time.time()\n\n print(\"-----------------------------------------\")\n print(\"-------------- RESULTS ------------------\")\n print(\"-----------------------------------------\")\n print(\"Initialization time: %f\" % (initialization_time - start_time))\n print(\"Kmeans time: %f\" % (kmeans_time - initialization_time))\n print(\"Kmeans 2nd round time: %f\" % (kmeans_2nd - kmeans_time))\n print(\"Total time: %f\" % (kmeans_2nd - start_time))\n print(\"-----------------------------------------\")\n centres = compss_wait_on(centres)\n print(\"CENTRES:\")\n print(centres)\n print(\"-----------------------------------------\")",
"def count_kmer(gene_list, codon_seqs, R, kmer_size=3):\n\n kmer = kmer_size\n MM = 'yes'\n\n list_seqfile = list( codon_seqs.keys() )\n kmer_dict = {}\n\n for orf in gene_list:\n if orf in list_seqfile:\n current_seq = np.array(codon_seqs[orf])\n\n for pos in range(len(current_seq) - (kmer + 1) ):\n if MM == 'yes' and orf in list( mm_consensus.keys() ):\n current_mm = mm_consensus[orf]\n if np.all(current_mm[pos:(pos+kmer)]): # check that no kmer position is MM\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n elif MM == 'no':\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n new_dict = {}\n list_redundant = []\n for k in kmer_dict.keys():\n if kmer_dict[k] > R:\n if k not in list_redundant:\n \t list_redundant.append(k)\n \n return list_redundant",
"def estimate_num_spill_files(num_words, key_num_bytes, value_num_bytes, mapreduce_task_io_sort_mb, mapreduce_map_sort_spill_percent):\n # extra bytes added when each (k,v) pair is added to output buffer\n KEY_VALUE_META_DATA_NUM_BYTES = 16\n\n key_len_num_bytes = zero_compress.size_of_zero_compressed_int64(key_num_bytes)\n value_len_num_bytes = zero_compress.size_of_zero_compressed_int64(value_num_bytes)\n\n return math.ceil((num_words * (KEY_VALUE_META_DATA_NUM_BYTES + key_len_num_bytes + key_num_bytes + value_len_num_bytes + value_num_bytes)) /\n (util.MiB_to_bytes(mapreduce_task_io_sort_mb) * mapreduce_map_sort_spill_percent))",
"def get_counts_from_kmer_list(filenames_lst, alphabet, kmin, kmax):\n # initialize the array container\n dic_list = []\n # iterates through the file paths\n for filename in filenames_lst:\n # get the sequences and ids\n for n, seq in parse_fasta(filename):\n # append the counts to the array\n dic_list.append(count_kmers(seq, alphabet, kmin, kmax))\n return dic_list",
"def scan(paths, recursive, size, min_size, max_size, hash_function):\n if min_size is None:\n min_size = size // 4\n if max_size is None:\n max_size = size * 8\n\n bytes_total = 0\n bytes_dupe = 0\n fingerprints = set()\n supported = supported_hashes()\n if hash_function not in supported:\n msg = \"'{}' is not a supported hash.\\nTry one of these:\\n{}\".format(\n hash_function, \", \".join(supported)\n )\n raise click.BadOptionUsage(\"hf\", msg)\n\n hf = getattr(hashlib, hash_function)\n files = []\n for path in paths:\n files += list(iter_files(path, recursive))\n t = Timer(\"scan\", logger=None)\n t.start()\n with click.progressbar(files) as pgbar:\n for entry in pgbar:\n try:\n chunker = fastcdc.fastcdc(entry.path, min_size, size, max_size, hf=hf)\n except Exception as e:\n click.echo(\"\\n for {}\".format(entry.path))\n click.echo(repr(e))\n continue\n for chunk in chunker:\n bytes_total += chunk.length\n if chunk.hash in fingerprints:\n bytes_dupe += chunk.length\n fingerprints.add(chunk.hash)\n t.stop()\n if bytes_total:\n data_per_s = bytes_total / Timer.timers.mean(\"scan\")\n dd_ratio = bytes_dupe / bytes_total * 100\n click.echo(\"Files: {}\".format(intcomma(len(files))))\n click.echo(\n \"Chunk Sizes: min {} - avg {} - max {}\".format(min_size, size, max_size)\n )\n click.echo(\"Unique Chunks: {}\".format(intcomma(len(fingerprints))))\n click.echo(\"Total Data: {}\".format(naturalsize(bytes_total)))\n click.echo(\"Dupe Data: {}\".format(naturalsize(bytes_dupe)))\n click.echo(\"DeDupe Ratio: {:.2f} %\".format(dd_ratio))\n click.echo(\"Throughput: {}/s\".format(naturalsize(data_per_s)))\n else:\n click.echo(\"No data.\")"
]
| [
"0.59754854",
"0.5919573",
"0.57606095",
"0.5759139",
"0.5740336",
"0.56900924",
"0.5643245",
"0.562379",
"0.5617651",
"0.5582634",
"0.55727494",
"0.5527192",
"0.5526655",
"0.55222845",
"0.54942554",
"0.54903567",
"0.54834044",
"0.5477402",
"0.54609895",
"0.54141855",
"0.5397813",
"0.5390324",
"0.5387536",
"0.53807145",
"0.53787047",
"0.53541887",
"0.5352655",
"0.535151",
"0.53421587",
"0.5333927"
]
| 0.7018501 | 0 |
Implementation of Bloom Filter kmer Counting algorithm Creates a Bloom Filter and check the kmer is previously encountered or not. Only previously encountered kmers are added to the Hash Table, which drastically reduced the size of the Hash Table. | def bf_counter(file_name, k, n, capacity, error_rate, verbose=False):
if verbose:
start = time.time()
print('BFCounter started.')
heap = []
for i in range(n):
heap.append((0, ''))
bf = BloomFilter(capacity, error_rate, 'kmer_bf')
kmer_counter = defaultdict(lambda: 1)
# Assign functions to local variables for performance improvement
add_to_bf = bf.add
heap_pushpop = heapq.heappushpop
with open(file_name, 'r') as f:
line_num = 0
for line in f:
if line_num % 4 == 1: # dna sequence
kmer_count = len(line) - k
for i in range(kmer_count):
kmer = line[i:i + k]
if kmer not in bf: # not in Bloom Filter
add_to_bf(kmer)
else: # in Bloom Filter
kmer_counter[kmer] += 1
line_num += 1
if verbose:
end_hash = time.time()
hash_table_size = sys.getsizeof(kmer_counter) / (1024 ** 2)
print('Hash table is created in {:.2f} seconds.'.format(
end_hash - start))
print('Hash table size: {:.2f} MB.'.format(hash_table_size))
start_populate = time.time()
print('Populating the heap...')
for count, kmer in kmer_counter.items():
# insert to the heap if count is bigger than minimum
if count > heap[0][0]:
heap_pushpop(heap, (count, kmer))
if verbose:
end_populate = time.time()
print('Heap is populated in {:.2f} seconds.'.format(
end_populate - start_populate
))
os.remove('kmer_bf')
if verbose:
end = time.time()
print('BFCounter is completed in {:.2f} seconds.'.format(end - start))
return heap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, n, m, k=2):\n # expecting to hold n elements\n self.n = n\n if m%4: m += (4-m%4)\n self.m = m*8\n print \"bit map size set to %d (%d bytes)after round up to 32bits\"%(self.m, self.m/8)\n self.bm = BitVector(size=self.m, intVal=0)\n if k in BloomFilter.KRange:\n self.k = k\n else:\n self.k = BloomFilter.KRange[-1]\n # round k to closest allowed value\n for i in range(len(BloomFilter.KRange)-1):\n if k < BloomFilter.KRange[i]:\n self.k = BloomFilter.KRange[i]\n break\n elif k < BloomFilter.KRange[1+i]:\n if (BloomFilter.KRange[+i]-k) >= k-BloomFilter.KRange[1+i]:\n self.k = BloomFilter.KRange[i]\n else:\n self.k = BloomFilter.KRange[i+1]\n break\n print \"k set to %d after validation\"%(self.k)\n p=BloomFilter.calPFP(self.n, self.m, self.k)\n print \"false positive probability will be %f when filtering %d elements\"%(p, self.n)\n #slice bitmap into k slices\n self.ms = self.m/self.k\n self.hashf = MurmurHash3_x86_32",
"def add_clump_forming_kmers(counts, clumpFormingKmers):\n for kmer in counts:\n if counts[kmer] >= t:\n clumpFormingKmers.add(kmer)\n\n return clumpFormingKmers",
"def train_bloom_filter(self, train_data):\n for val in train_data:\n if self.debug:\n print('val: ', val)\n for i in range(0, self.hash_size):\n k = self.hashes[i](val[0])\n if self.debug:\n print('k: ', k)\n self.bitarray[k] = 1\n if self.debug:\n print('___end training____')",
"def count_kmers(dna, k):\n kmer_count = Counter()\n for i in range(len(dna)):\n kmer = dna[i:(i+k)]\n if len(kmer) == k:\n kmer_count[kmer] += 1\n return kmer_count",
"def count_kmers_observed(read, k):\n counts = {}\n num_kmers = len(read) - k + 1\n for i in range (num_kmers):\n kmer= read[i:i+k]\n if kmer not in counts:\n counts[kmer] = 0\n counts[kmer] +=1\n return len(counts)",
"def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)",
"def kmer_dict(s, k):\n kmer = {}\n #calculating the length as n.\n n = len(s)\n for x in range(0, n - k + 1):\n #checking if the entry alread in the dictionary kmer\n if s[x:x+k] in kmer:\n #if the entry is available then increament 1\n kmer[s[x:x + k]] += 1\n else:\n #else initialize the kmer value as 1\n kmer[s[x:x+k]] = 1\n return kmer",
"def _generate_table(self):\n for i in xrange(32):\n self._table.append(\n BloomFilter(\n capacity=self.__capacity,\n error_rate=self.__error_rate\n )\n )",
"def get_kmer_counts(kmer_list, kmer_counts):\n counts = defaultdict(int)\n for kmer in kmer_list:\n counts[kmer] = counts.get(kmer, 0) + kmer_counts[kmer]\n return counts",
"def kmerHashMap(reads, k):\n kmers_dict = {}\n # loop through all reads\n for i in range(len(reads)):\n # loop read's bases, except for the last k, to obtain its kmers\n for j in range(1+len(reads[i])-k):\n kmer = reads[i][j:k+j]\n if kmers_dict.has_key(kmer):\n kmers_dict[kmer].add(i)\n else:\n kmers_dict[kmer] = set([i])\n \n return kmers_dict",
"def introduceData(self, kmerCounts):\n for block in self.blocks:\n if len(block.kmers) > 0:\n count = sum(kmerCounts[k] * 2.0 for k in block.getKmers())\n #count = sum(kmerCounts[k] * self.G.weights[k] for k in block.getKmers())\n adjustedCount = (1.0 * count) / (len(block.kmers) * self.normalizing)\n block.adjustedCount = adjustedCount\n self.constrain_approximately_equal(adjustedCount, sum(block.getVariables() + [block.getTrash()]), \n penalty=self.dataPenalty)",
"def find_new_kbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n\n #---> j loop\n for j in range(Ly):\n self.kbl[j] = N #initialize search at top\n\n # in fortran k=N-1,1,-1\n for k in range(N-1,0,-1):\n #INDEX MAP\n k_w = k\n k_r = k-1\n \n for j in range(Ly):\n if z_u_w[j,k_w] > z_u_w[j,N] - self.hbls[j]:\n self.kbl[j] = k_w",
"def is_in_bloom_filter(self, value):\n for i in range(0, self.hash_size):\n k = self.hashes[i](value)\n if self.debug:\n print(k)\n if self.bitarray[k] == 0:\n unique, counts = np.unique(self.bitarray, return_counts=True)\n if self.debug: # print how many 0 and 1 are in bitarray\n print(dict(zip(unique, counts)))\n return False\n if self.debug: # print how many 0 and 1 are in bitarray\n unique, counts = np.unique(self.bitarray, return_counts=True)\n print(dict(zip(unique, counts)))\n return True",
"def kmer_frequencies(kmertable_all, kmertable_filtered, kmertable_nonDT_hi, kmertable_nonDT_lo, data_mm, codon_seqs):\n\n def codon_bgfreq(codon_seqs, data_mm):\n \"\"\"\n get codon background frequencies from mRNA seqs\n seqs: dictionary of yeast mRNA sequences\n data_mc: dictionary of multi-mapping boolean\n \"\"\"\n codon_counts = np.zeros(( len(codons_nonstop) ))\n list_orfs = list( data_mm.keys() )\n\n for ix, orf in enumerate(list_orfs):\n current_seq = codon_seqs[orf]\n current_mm = data_mm[orf]\n\n for pos in range( len(current_mm) ):\n if current_mm[pos] and current_seq[pos] in codons_nonstop:\n current_index = codons_nonstop.index(current_seq[pos])\n codon_counts[current_index] += 1\n codon_counts = np.around( codon_counts / np.sum(codon_counts), 5)\n\n return codon_counts\n\n\n def codonfreqs_kmerdf(kmertable):\n \"\"\"\n get codon frequencies from kmertable\n \"\"\" \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)\n\n #kmertable_threshold = kmertable_all[kmertable_all['threshold']==1]\n kmertable_all2 = kmertable_all[kmertable_all['threshold']==0]\n\n\n cc_bg = codon_bgfreq(codon_seqs, data_mm)\n cc_all = codonfreqs_kmerdf(kmertable_all2)\t\t\t# without hits\n cc_theta = codonfreqs_kmerdf(kmertable_filtered)\n cc_nDT_hi = codonfreqs_kmerdf(kmertable_nonDT_hi) # min 16 max 4 at 1090\n cc_nDT_lo = codonfreqs_kmerdf(kmertable_nonDT_lo) # min 16 max 4 at 1090\n\n output = pd.DataFrame({'codon': list(codons_nonstop), \n 'kmer_theta': list(cc_theta), \n 'redundant': list(cc_all), \n 'background': list(cc_bg),\n 'nDThi': list(cc_nDT_hi),\n 'nDTlo': list(cc_nDT_lo) } ) \n output.to_csv(\"../data/figures/figure3/kmer_frequencies.txt\", header=True, index=False, sep='\\t')\n\n return output",
"def codonfreqs_kmerdf(kmertable): \n codon_counts_kmer = np.zeros(( len(codons_nonstop) ))\n for kmer in kmertable['kmer']:\n current_kmer_codons = [ kmer[(i*3):((i*3)+3)] for i in range(3) ] # ! hard coded for length L=3\n for codon in current_kmer_codons:\n current_index = codons_nonstop.index(codon)\n codon_counts_kmer[current_index] += 1 \n codon_counts_kmer /= np.sum(codon_counts_kmer)\n\n return np.around(codon_counts_kmer, 5)",
"def find_clumps(genome, k, L, t):\n assert (is_dna(genome))\n counts = collections.defaultdict(int)\n\n # compute counts of kmers in first L-length part of genome\n for k_start in range(L - k + 1):\n counts[genome[k_start:k_start + k]] += 1\n kmers = _get_keys(counts, t)\n\n # slide L-length window and update counts\n # remove previous leftmost kmer and add new kmer being rightmost in current window\n for L_start in range(1, len(genome) - L + 1):\n counts[genome[L_start - 1:L_start + k - 1]] -= 1\n new_kmer = genome[L_start + L - k:L_start + L]\n counts[new_kmer] += 1\n if counts[new_kmer] >= t:\n kmers.add(new_kmer)\n return kmers",
"def kmerNeighbors(text,k):\r\n L=set()\r\n for i in range(0,len(text)-k+1):\r\n for d in range(0,k+1):\r\n L.update(Neighbors(kmer(text,i,k),d))\r\n D=dict()\r\n for l in L:\r\n D[l]=minHamm(text,l)\r\n return D",
"def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i] += 1\n\t\tself.n += 1",
"def final_kmer_counts(seq_dict, num_seqs, alphabet, min_k, max_k):\n counted = Counter()\n len_seqs = 0\n for name, sequence in seq_dict.items():\n seq = seq_cleaner(sequence, alphabet)\n len_seqs += len(seq)\n counted.update(count_kmers_cython(seq, min_k, max_k))\n final_count = {k: (v // num_seqs) for k, v in counted.items()}\n # total_len = (len_seqs // num_seqs)\n return final_count, len_seqs",
"def find_clumps(DNA, k, L, t):\n assert len(DNA) >= L\n clumps = set()\n\n # Construct the frequency dict for the first region of size L in the DNA\n freq_dict = dictionaries.FrequencyDict(DNA[:L], k)\n\n # For each kmer in the first window, check if frequency >= t and correspondingly\n # add the kmer to the clumps set\n kmers = set()\n for i in range(L - k + 1):\n kmer = DNA[i: i + k]\n if not kmer in kmers:\n kmers.add(kmer)\n _t = freq_dict[kmer]\n if _t >= t:\n clumps.add(kmer)\n\n # Decrease the frequency of the first kmer for the next iteration, as our\n # sliding window will escape it\n first_kmer = DNA[0:k]\n freq_dict[first_kmer] -= 1\n\n # Cool beans -- the initial freqs are set up and the window is in place.\n # Now, we're ready to go through all other regions of length L in the DNA\n for i in range(1, len(DNA) - L + 1):\n\n # If not the first iteration, increase the frequency of the recently added\n # last kmer. If that frequency >= t, add the kmer to the set of clumps\n last_kmer = DNA[i+L-k : i+L]\n freq_dict[last_kmer] += 1\n if freq_dict[last_kmer] >= t:\n clumps.add(last_kmer)\n\n # Decrease the frequency of the first kmer in the region, as\n # the sliding window will escape it\n first_kmer = DNA[i:i+k]\n freq_dict[first_kmer] -= 1\n\n return clumps # Victory",
"def count_kmer(gene_list, codon_seqs, R, kmer_size=3):\n\n kmer = kmer_size\n MM = 'yes'\n\n list_seqfile = list( codon_seqs.keys() )\n kmer_dict = {}\n\n for orf in gene_list:\n if orf in list_seqfile:\n current_seq = np.array(codon_seqs[orf])\n\n for pos in range(len(current_seq) - (kmer + 1) ):\n if MM == 'yes' and orf in list( mm_consensus.keys() ):\n current_mm = mm_consensus[orf]\n if np.all(current_mm[pos:(pos+kmer)]): # check that no kmer position is MM\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n elif MM == 'no':\n current_kmer = \"\".join( current_seq[pos:pos+kmer])\n if current_kmer in kmer_dict.keys():\n kmer_dict[current_kmer] += 1\n else:\n kmer_dict[current_kmer] = 1\n\n new_dict = {}\n list_redundant = []\n for k in kmer_dict.keys():\n if kmer_dict[k] > R:\n if k not in list_redundant:\n \t list_redundant.append(k)\n \n return list_redundant",
"def count_kmers(seq, k=3):\n # Start with an empty dictionary\n counts = {}\n # Calculate how many kmers of length k there are\n num_kmers = len(str(seq)) - k + 1\n # Loop over the kmer start positions\n for i in range(num_kmers):\n # Slice the string to get the kmer\n kmer = str(seq)[i:i+k]\n # Add the kmer to the dictionary if it's not there\n if kmer not in counts:\n counts[kmer] = 0\n # Increment the count for this kmer\n counts[kmer] += 1\n # Return the final counts\n return counts",
"def count_kmers(dna: str, k: int, alphabet: str = \"ACGT\"):\n c = Counter(dna[i:i + k] for i in range(len(dna) - k + 1))\n result = []\n for k_mer in enumerate_kmers(alphabet, k):\n result.append(c[k_mer])\n return result",
"def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd",
"def __init__(self, k, num_buckets, fp_size, bucket_size, max_iter):\n self.children: List[Node] = []\n self.parent: Optional[Node] = None\n self.filter = CuckooFilterBit(num_buckets, fp_size, bucket_size, max_iter)\n\n self.dataset_id: Optional[str] = None\n self.k = k",
"def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''",
"def __init__(self, bbox, init_time, point_of_interest=\"centroid\"):\n # define constant velocity model\n self.kf = KalmanFilter(dim_x=7, dim_z=4)\n self.kf.F = np.array(\n [[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])\n self.kf.H = np.array(\n [[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0]])\n self.kf.R[2:, 2:] *= 10.\n self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities\n self.kf.P *= 10.\n self.kf.Q[-1, -1] *= 0.01\n self.kf.Q[4:, 4:] *= 0.01\n self.kf.x[:4] = convert_bbox_to_z(bbox[:4]) # convert_bbox_to_z(bbox)\n\n\n self.time_since_update = 0\n self.history = []\n self.hits = 0\n self.hit_streak = 0\n self.age = 0\n self.objConfidence = bbox[4]\n self.objclass = bbox[5]\n self.id = KalmanBoxTracker.count\n KalmanBoxTracker.count += 1\n\n self.point_of_interest = point_of_interest\n self.init_time = init_time\n self.end_time = None",
"def count_mers(sequence, alphabet, kmin, kmax):\n alphabet = set(alphabet)\n counts = defaultdict(int)\n for kmer in get_kmers_from_sequence(sequence, kmin, kmax):\n if set(kmer).issubset(alphabet):\n counts[kmer] = counts.get(kmer, 0) + 1\n return counts",
"def getKmers(self):\n return self.kmers",
"def divide_and_count(L_windows, k, t):\n\n results = set()\n\n for L_mer in L_windows:\n k_windows = divide_genome(L_mer, k) # We extract in a list all the possible k-mers\n\n # Generate a set of unique elements to avoid multicounts...\n k_windows_set = set(k_windows)\n\n for k_window in k_windows_set:\n if k_windows.count(k_window) == t:\n results.add(k_window)\n\n\n print(\"\\t\".join(results))"
]
| [
"0.61989313",
"0.5929431",
"0.561827",
"0.56155145",
"0.5550853",
"0.5537526",
"0.55260515",
"0.5457855",
"0.5456327",
"0.54321456",
"0.54122543",
"0.53974813",
"0.5378987",
"0.53754586",
"0.5350419",
"0.53478485",
"0.53310055",
"0.5256477",
"0.5185556",
"0.51369804",
"0.51291156",
"0.50852805",
"0.5047113",
"0.5022481",
"0.49968788",
"0.49582714",
"0.48889747",
"0.48835793",
"0.48613435",
"0.48594764"
]
| 0.6309868 | 0 |
Test the DefaultDialogues class. | def test_default_dialogues(self):
_, dialogue = self.default_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=DefaultMessage.Performative.BYTES,
content=b"some_content",
)
assert dialogue.role == DefaultDialogue.Role.AGENT
assert dialogue.self_address == self.skill.skill_context.agent_address | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_default(self, dm):\n request = create_request(\"other\", \"other\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"default\"",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')",
"def test_default(self):\r\n self.assertEqual(self.option.default, 'hello')",
"def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")",
"def test_default(self):\r\n self.assertEqual(self.option.default, 1234)",
"def test_prompt_msg_noask_default_fails(self):\n self.expected['failed'] = True\n self.expected['msg'] = \"Unexpected 'default' in non-question prompt.\"\n\n self.assertEquals(\n self.prompt._prompt(self.response, {\n \"say\": \"Hello World\",\n \"default\": \"foobar\"\n }),\n self.expected\n )",
"def test_empty_ui(self):",
"def test_default(self):\r\n self.assertEqual(self.option.default, False)",
"def test_prompt_msg_confirm_defaults_fails(self):\n self.expected['failed'] = True\n self.expected['msg'] = \"Unexpected 'default' provided with confirmation question.\"\n\n self.assertEquals(\n self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": True,\n \"default\": \"some other thing\"\n }),\n self.expected\n )",
"def test_init_default(self):\n self._test_init_default()",
"def test_defaults(self) -> None:\n\n scratch_view: sublime.View = sublime.active_window().new_file()\n tabs: List[Tab] = [Tab(scratch_view)]\n\n data_set: Tuple[Tuple[TabSetting, bool, str], ...] = (\n (\n ShowCaptionsTabSetting,\n DEFAULT_SETINGS[\"show_captions\"],\n \"show_captions\"\n ),\n (\n IncludePathTabSetting,\n DEFAULT_SETINGS[\"include_path\"],\n \"include_path\"\n ),\n (\n ShowGroupCaptionTabSetting,\n DEFAULT_SETINGS[\"show_group_caption\"],\n \"show_group_caption\"\n )\n )\n\n for (cls, enabled, caption) in data_set:\n with self.subTest(cls=cls, enabled=enabled, caption=caption):\n inst = cls(\n self.settings,\n sublime.active_window()\n ) # type: ignore\n self.assertEqual(enabled, inst.is_enabled())\n self.assertListEqual(tabs, inst.apply(tabs))",
"def test_default(self):\n control_channel = ControlChannel(123)\n\n self.assertEqual(control_channel.index, 123)\n self.assertEqual(control_channel.name, \"u123\")",
"def test_openDialog_pass(self):\n self.run_script(\"\"\"\n foo.openDialog(\"foo\")\n foo.openDialog(\"chrome://foo/bar\")\n \"\"\")\n self.assert_silent()",
"def test_default(self):\r\n self.assertEqual(self.option.default, '/tmp')",
"def setUp(self):\n self.dialog = QtDialog(None, uuid4().hex, QtLocalPipe(uuid4))\n self.dialog.create()",
"def test_prompt_msg_shows_default(self):\n with mock.patch('__builtin__.raw_input', return_value=\"Andrew\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"First Name\",\n \"ask\": \"first_name\",\n \"default\": \"foobar\"\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"First Name [foobar]? \", args[0])\n self.assertEquals(result['ansible_facts']['first_name'], 'Andrew')",
"def tearDown(self):\n # self.dialog = None",
"def test_prompt_msg_defaults(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"First Name\",\n \"ask\": \"first_name\",\n \"default\": \"foobar\"\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"First Name [foobar]? \", args[0])\n self.assertEquals(result['ansible_facts']['first_name'], 'foobar')",
"def test_show_correctness_default(self):\n assert ShowCorrectness.correctness_available()",
"def test_ui_menu(test):\n assert hl.test_help_ui_menu(test) == test",
"def test_showanswer_default(self):\r\n # default, no due date, showanswer 'closed', so problem is open, and show_answer\r\n # not visible.\r\n problem = CapaFactory.create()\r\n self.assertFalse(problem.answer_available())",
"def tearDown(self):\n self.dialog = None",
"def test_default(self):\n drive_channel = DriveChannel(123)\n\n self.assertEqual(drive_channel.index, 123)\n self.assertEqual(drive_channel.name, \"d123\")",
"def dlg_initialize(self):\n pass # override",
"def test_init_game_2(self):\n utils.init_game()\n self.assertEqual(pg.display.get_caption()[0], utils.NAME)",
"def test_prompt_msg_confirm_blank_default_no(self):\n with mock.patch('__builtin__.raw_input', return_value=\"\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": False\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [yN]? \", args[0])\n self.assertEquals(result['ansible_facts']['result'], False)",
"def ask_dialog(self, title=\"\", vars=[], help=\"\"):\n\t\tpass",
"def test_openDialog_flag_var(self):\n self.run_script(\"\"\"\n foo.openDialog(bar)\n \"\"\")\n self.assert_notices()",
"def test_default(self):\n acquire_channel = AcquireChannel(123)\n\n self.assertEqual(acquire_channel.index, 123)\n self.assertEqual(acquire_channel.name, \"a123\")",
"def test_default(self):\n for n in range(1, 5):\n for prefix in ['', 'git-', 'gbp-']:\n parser = GbpOptionParser('%scmd%d' % (prefix, n))\n self.assertEqual(parser.config['default_option'], 'default_default1')"
]
| [
"0.6822309",
"0.6626286",
"0.660622",
"0.6486287",
"0.6461427",
"0.64539313",
"0.6384414",
"0.6358669",
"0.627209",
"0.62540084",
"0.6202093",
"0.619923",
"0.6177934",
"0.6042947",
"0.60372436",
"0.6017006",
"0.5988623",
"0.5987767",
"0.596759",
"0.5953054",
"0.5951422",
"0.5925969",
"0.5908512",
"0.582663",
"0.5751428",
"0.57327634",
"0.5713864",
"0.57110786",
"0.56781816",
"0.56648177"
]
| 0.8006611 | 0 |
Test the OefSearchDialogues class. | def test_oef_search_dialogues(self):
_, dialogue = self.oef_search_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=OefSearchMessage.Performative.SEARCH_SERVICES,
query="some_query",
)
assert dialogue.role == OefSearchDialogue.Role.AGENT
assert dialogue.self_address == str(self.skill.skill_context.skill_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search_4(self):\n\n # search for \"cheese\"\n form = FrontSearchForm()\n form.search_box.set_value('cheese')\n form.submit.click()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)",
"def test_search_show(self):\n self.assertEquals(\n len(self.t['life on mars'].search('the', key='episodename')),\n 10\n )",
"def test_search_3(self):\n\n # search for \"cheese\"\n FrontSearchForm() \\\n .populate_form({'search_box' : 'cheese'}) \\\n .submit_form()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)",
"def test_search_1(self):\n\n # import pdb; pdb.set_trace()\n\n # type \"cheese\" into the search field\n s(by.css('[name=\"q\"]')) \\\n .set_value('cheese')\n\n # click the \"Google Search\" button\n s(by.css('[name=\"btnK\"]')) \\\n .click()\n\n # check that results are shown\n s(by.css('#resultStats')) \\\n .should(be.visible)",
"def test_get_foods_search(self):\n pass",
"def test_search_2(self):\n\n # type \"cheese\" into the search field\n s('[name=\"q\"]') \\\n .set_value('cheese')\n\n # click the \"Google Search\" button\n s('[name=\"btnK\"]') \\\n .click()\n\n # check that results are shown\n s('#resultStats') \\\n .should(be.visible)",
"def test_autocomplete_recipe_search(self):\n pass",
"def test_act_is_searching(self):\n # setup\n self.strategy._is_searching = True\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(1)\n has_attributes, error_str = self.message_has_attributes(\n actual_message=self.get_message_from_outbox(),\n message_type=OefSearchMessage,\n performative=OefSearchMessage.Performative.SEARCH_SERVICES,\n to=self.skill.skill_context.search_service_address,\n sender=str(self.skill.public_id),\n query=self.skill.skill_context.strategy.get_location_and_service_query(),\n )\n assert has_attributes, error_str",
"def test_search_form(self):\n set_up_one_user(self, 1, 1)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.post(reverse('index'), {'terms_en': 'Test Search', 'websites': [self.website.pk]})\n s = Search.objects.filter(terms_en=\"Test Search\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(s), 1)",
"def test_post_foods_search(self):\n pass",
"def searchInspector(search):\n \n try:\n from PyQt4.QtGui import QApplication, QDialog\n from dialog import Ui_Dialog\n except Exception:\n print \"Missing a required library - please install pyQt4.\"\n return\n \n app = QApplication(sys.argv)\n window = QDialog()\n ui = Ui_Dialog()\n ui.setupUi(window)\n ui.updateList(search)\n window.show()\n app.exec_()",
"def test_search(self):\n d = self._search()\n self._response([2, 5, 10])\n self.assertEqual(self.successResultOf(d), [2, 5, 10])",
"def test_search(self):\n project = factories.ProjectFactory.create(title=\"Test\")\n self.client.force_login(project.owned_by)\n\n response = self.client.get(\"/search/\")\n self.assertContains(response, \"Search query missing.\")\n\n response = self.client.get(\"/search/?q=Test\")\n self.assertContains(response, project.get_absolute_url())\n\n self.assertContains(response, \"projects\")\n self.assertContains(response, \"organizations\")\n self.assertContains(response, \"people\")\n self.assertContains(response, \"invoices\")\n self.assertContains(response, \"recurring-invoices\")\n self.assertContains(response, \"offers\")\n self.assertContains(response, \"deals\")\n\n with override_settings(FEATURES={\"controlling\": False}):\n response = self.client.get(\"/search/?q=Test\")\n self.assertContains(response, project.get_absolute_url())\n\n self.assertContains(response, \"projects\")\n self.assertContains(response, \"organizations\")\n self.assertContains(response, \"people\")\n self.assertNotContains(response, \"invoices\")\n self.assertNotContains(response, \"recurring-invoices\")\n self.assertNotContains(response, \"offers\")\n self.assertNotContains(response, \"deals\")",
"def test_search(self):\n from importCsv.models import City, Hotel\n path = reverse(\"search\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n city = mixer.blend(City, abbrev=\"tes\", name=\"test\")\n mixer.blend(Hotel, city=city, data=\"testData\", name=\"test hotel\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"tes\": \"on\"})\n assert r.status_code == 200\n assert r.content.find(b'test hotel')",
"def test_locationSearch(self):\n sel = self.selenium\n \n # Login\n self.login()\n\n # L2inL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L2inL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L2inL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL0\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # @ToDo: Verify that the result is stored correctly\n # How do we get name from number without submitting? SHould we just submit every time?\n \n\n # L2inL1withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L2inL1withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L2inL1withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL1withNoParent\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L3inL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L3inL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L3inL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"L3inL0\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L3inL1withL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L3inL1withL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L3inL1withL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Ouest\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"L3inL1withL0\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L3inL1withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L3inL1withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L3inL1withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"L3inL1withNoParent\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL0\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL1withL0\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL1withL0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL1withL0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Ouest\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL1withL0\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL1withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL1withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL1withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL1withNoParent\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # L4inL2withL1L0 \n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withL1L0\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withL1L0\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Ouest\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"Port-Au-Prince\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withL1L0\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L4inL2withL1only\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withL1only\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withL1only\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"L1withNoParent\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL1withNoParent\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withL1only\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L4inL2withL0only\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withL0only\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withL0only\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Haiti\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2inL0\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withL0only\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n \n # L4inL2withNoParent\n # Create a new Shelter\n self.create_header()\n # Open the Search box\n sel.click(\"gis_location_search-btn\")\n # Verify it opens\n self.failUnless(sel.is_visible(\"gis_location_autocomplete_div\"))\n # & that button disappears\n self.failIf(sel.is_visible(\"gis_location_search-btn\"))\n # Enter the search String\n sel.type(\"gis_location_autocomplete\", \"L4inL2withNoParent\")\n # Trigger the event to get the AJAX to send\n sel.fire_event(\"gis_location_autocomplete\", \"keydown\")\n # Wait for the popup menu\n for i in range(60):\n try:\n if \"L4inL2withNoParent\" == sel.get_text(\"css=ul.ui-autocomplete li:first-child a\"):\n break\n except:\n pass\n time.sleep(1)\n else:\n self.fail(\"time out\")\n # Select the Result\n sel.fire_event(\"css=ul.ui-autocomplete li:first-child a\", \"mouseover\")\n sel.click(\"css=ul.ui-autocomplete li:first-child a\")\n time.sleep(4)\n # Verify that the dropdowns are set/opened\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.assertEqual(\"Select a location...\", sel.get_selected_label(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.assertEqual(\"L2withNoParent\", sel.get_selected_label(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.assertEqual(\"L4inL2withNoParent\", sel.get_selected_label(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_search-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.assertEqual(\"No locations registered at this level\", sel.get_selected_label(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_autocomplete_div\"))\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))",
"def testSelectSiteSearchFunctionality(self):\n auth.checkIfUserIsLoggedIn(self.driver, 0, 'CRUDO')\n auth.login(self.driver, config.users['CRUDO']['username'], config.users['CRUDO']['password'])\n\n try:\n WebDriverWait(self.driver, 50).until(\n EC.presence_of_element_located((By.ID, \"com.view.viewglass:id/search_image_view\")))\n except TimeoutException:\n raiseExceptions(\"Search field is missing\")\n search = self.driver.find_element_by_id(\"com.view.viewglass:id/search_image_view\")\n search.click()\n search_text = self.driver.find_element_by_id(\"com.view.viewglass:id/search_site_edit_text\")\n # search for the site and press ENTER\n search_text.send_keys(config.sites['Default'])\n # self.self.driver.press_keycode(66)\n size = self.driver.find_element_by_id(\"com.view.viewglass:id/viewLogoLL\").size\n location = self.driver.find_element_by_id(\"com.view.viewglass:id/viewLogoLL\").location\n x = size['width'] / 2\n y = location['y'] + size['height'] * 2\n self.driver.tap([(x, y)])\n if len(self.driver.find_elements(By.ID, \"com.view.viewglass:id/viewLogoLL\")) > 0:\n y = location['y'] + size['height'] * 2.5\n self.driver.tap([(x, y)])\n else:\n raiseExceptions(\"Search function did not return any results.\")",
"def test_determine_search_method(): # ***Incomplete test\n ##########################\n # Arrange.\n query_exten = \"query_exten\"\n db_exten = \"db_exten\"\n\n ##########################\n # Act.\n #x = determine_search_method(query_exten,\n #\t\tdb_exten)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_search_720(self):\n self.driver.get(self.domain)\n self.assertTrue(u'XXXX' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n search.click()\n search_field = self.driver.find_element_by_css_selector(\"#XXXX\")\n search_field.send_keys(\"XXXX\")\n search_field.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))",
"def search(self, *args, **kwargs):",
"def test_search_test_search_returns_correct_menu(self):\n # create some db records\n dataset = self.create_mixed_test_data()\n test_search_string = 'bravo'\n\n with patch('builtins.input', side_effect=test_search_string):\n result = self.menu.search_text_search()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)",
"def __search(self):\n self.resultList.clear()\n self.infoLabel.clear()\n \n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.searchButton.setEnabled(False)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__canceled = False\n \n self.__query = [term for term in self.searchEdit.text().strip().split()\n if term not in PipSearchDialog.Stopwords]\n self.__client.call(\n \"search\",\n ({\"name\": self.__query, \"summary\": self.__query}, \"or\"),\n self.__processSearchResult,\n self.__searchError\n )",
"def search():\n pass",
"def test_search_page(self):\n result = self.client.get(\"/search\")\n self.assertIn(b\"Search\", result.data)",
"def testMclPclSearch(self):\n driver = self.driver\n #finds the parent cell line field and enters a parent cell line, tabs out of the field then clicks the Search button\n driver.find_element(By.ID, \"parentCellLine\").send_keys('RENKA')\n time.sleep(2)\n actions = ActionChains(driver) \n actions.send_keys(Keys.TAB)\n actions.perform()\n time.sleep(2)\n driver.find_element(By.ID, 'searchButton').click()\n time.sleep(2)\n #find the search results table\n results_table = self.driver.find_element(By.ID, \"resultsTable\")\n table = Table(results_table)\n #Iterate and print the search results headers\n cell1 = table.get_row_cells(0)\n cell2 = table.get_row_cells(1)\n cell3 = table.get_row_cells(2)\n symbol1 = iterate.getTextAsList(cell1)\n symbol2 = iterate.getTextAsList(cell2)\n symbol3 = iterate.getTextAsList(cell3)\n print(symbol1)\n #Assert the correct antigen is returned\n self.assertEqual(symbol1, ['Not Specified'])\n self.assertEqual(symbol2, ['Not Specified'])\n self.assertEqual(symbol3, ['Not Specified'])",
"def load_search_gui(self):\n pass",
"def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()",
"def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)"
]
| [
"0.75819606",
"0.75819606",
"0.75819606",
"0.70504874",
"0.69273454",
"0.6920223",
"0.68934745",
"0.67331684",
"0.6732177",
"0.6622524",
"0.6607059",
"0.6467548",
"0.64579827",
"0.64343286",
"0.6417334",
"0.64078",
"0.63583404",
"0.6355816",
"0.6352986",
"0.62550294",
"0.62258977",
"0.6156258",
"0.6152634",
"0.6128833",
"0.6111404",
"0.61037415",
"0.610277",
"0.60516196",
"0.6043324",
"0.6032172"
]
| 0.78425246 | 0 |
Test the TacDialogues class. | def test_tac_dialogues(self):
_, dialogue = self.tac_dialogues.create(
counterparty=COUNTERPARTY_AGENT_ADDRESS,
performative=TacMessage.Performative.REGISTER,
agent_name="some_agent_name",
)
assert dialogue.role == TacDialogue.Role.CONTROLLER
assert dialogue.self_address == self.skill.skill_context.agent_address | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_win(self):\n self.T.board[0] = ['x']*3\n assert self.T.tic_tac_toe(self.T.board)",
"def test_play_game(self):\r\n\r\n \r\n a_players = [RandomPlayer(1), RandomPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n #Game is played to competion\r\n a_game.play_game()\r\n\r\n a_history = a_game.get_history()\r\n\r\n #Go through each move and check to be sure it's valid\r\n for i in range(1,len(a_history)):\r\n #Get copy of the board\r\n prev_board = a_history[i-1]\r\n cur_board = a_history[i]\r\n\r\n #Check if the board chosen is in valid states\r\n self.assertTrue(cur_board in prev_board.get_states(a_players[0].get_id()) or cur_board in prev_board.get_states(a_players[1].get_id()),\\\r\n \"An invalid board state was added to the history\")\r\n\r\n if i == len(a_history) - 1:\r\n self.assertTrue(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())\r\n else: \r\n self.assertFalse(cur_board.check_win(a_num_to_win, a_players[0].get_id()) or cur_board.check_win(a_num_to_win, a_players[1].get_id()) or cur_board.check_tie())",
"def test_is_winner(self):\n TestGame = TTT_Game(\"John\", \"Jane\")\n\n # Check verticals\n TestGame.board = [1, 2, \"X\", 4, 5, \"X\", 7, 8, \"X\"]\n self.assertTrue(TestGame.is_winner(\"X\"))\n TestGame.board = [\"O\", 2, 3, \"O\", 5, \"X\", \"O\", 8, \"X\"]\n self.assertTrue(TestGame.is_winner(\"O\"))\n TestGame.board = [1, \"X\", \"O\", \"O\", \"X\", 6, 7, \"X\", \"X\"]\n self.assertTrue(TestGame.is_winner(\"X\"))\n\n # Check horizontals\n TestGame.board = [\"O\", \"O\", \"O\", \"O\", 5, \"X\", 7, 8, 9]\n self.assertTrue(TestGame.is_winner(\"O\"))\n TestGame.board = [1, 2, 3, \"X\", \"X\", \"X\", 7, 8, 9]\n self.assertTrue(TestGame.is_winner(\"X\"))\n TestGame.board = [1, 2, 3, \"O\", 5, 6, \"O\", \"O\", \"O\"]\n self.assertTrue(TestGame.is_winner(\"O\"))\n\n # Check diagonals\n TestGame.board = [\"O\", \"X\", 3, 4, \"O\", \"X\", \"X\", \"O\", \"O\"]\n self.assertTrue(TestGame.is_winner(\"O\"))\n TestGame.board = [1, 2, \"X\", 4, \"X\", 6, \"X\", 8, 9]\n self.assertTrue(TestGame.is_winner(\"X\"))",
"def test_turn_ai_and_players(lab):\n print('Test turn')\n lab.update_game()\n test_print(lab)\n print(\"Turn: {}\".format(lab.turn_count))\n print('Test completed')",
"def setUp(self):\n self.game = TTTBoard(3)",
"def TicTacToe(): #Written by Cody West\n current_board = [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"] #Empty board\n players = 0 #Number of players\n human_turn = 0 #Indicates whether the human goes first or second (is 0 for two player games)\n turn = 1 #Turn number\n while players != 1 and players != 2: #While a valid number of players has not been chosen\n players = int(raw_input(\"How many players are there?\")) #Asks how many players there are\n if players < 1 or players > 2: #If the choice is not valid\n print(\"Please pick 1 or 2 players\") #Prints error message\n if players == 1: #If 1 player\n difficulty = 0 #Difficulty variable\n while difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #While a valid difficulty has not been chose\n difficulty = int(raw_input(\"Pick a difficulty. 1 is easiest, 4 is hardest\")) #Ask for a difficulty\n if difficulty != 1 and difficulty != 2 and difficulty != 3 and difficulty != 4: #If difficulty choice is not valid\n print(\"Please pick a difficulty between 1 and 4\") #Prints error message\n while human_turn != 1 and human_turn != 2: #While a human turn has not been chosen\n human_turn = int(raw_input(\"Would you like to go first (1) or second (2)?\")) #Ask for human turn\n if human_turn != 1 and human_turn != 2: #If a valid turn is not chosen\n print(\"Please pick turn 1 or 2\") #Print error message\n if human_turn == 1: #If human goes first\n player1 = \"human\" #Player 1 is human\n player2 = \"AI\" #Player 2 is AI\n elif human_turn == 2: #If human goes second\n player1 = \"AI\" #Player 1 is AI\n player2 = \"human\" #Player 2 is human\n else: #If neither\n player1 = \"human\" #Player 1 is human\n player2 = \"human\" #Player 2 is human\n while turn < 10: #While the number of turns in Tic Tac Toe has not been exceeded\n if turn < 3: #For the first three turns\n draw_example_board() #Draw a board showing the slot numbers\n draw_board(current_board) #Draw current board\n ## You could write this logic much more compactly -- try to avoid having so many\n ## lines of code that look identical. You have four different update_board calls\n ## here where you could have just one.\n if turn%2 == 1: #If it's an odd numbered turn\n if player1 == \"human\":\n print(\"human\")\n update_board(current_board, get_input(current_board, turn), \"X\") #Update board with player 1's selection and X\n else:\n print(\"AI\")\n update_board(current_board, AI(current_board,\"X\",\"O\", difficulty), \"X\") #Update board with AI selection\n else:\n if player2 == \"human\":\n print(\"human\")\n update_board(current_board, get_input(current_board, turn), \"O\") #Update board with player 2's selection and X\n else:\n print(\"AI\")\n update_board(current_board, AI(current_board,\"O\",\"X\", difficulty), \"O\") #Update board with AI selection\n if check_victory(current_board) == \"done\":\n return \"whatever\"#Check victory\n turn = turn + 1 #Increase turn number",
"def test_human_move_acceptable(self):\n for i in range(9):\n self.ri.return_value = str(i)\n T = TicTacToe()\n assert self.T.human_move() == True\n T = None",
"def playttt():\n board = \" \" * 9\n print(\"Welcome to Tic-Tac-Toe, brought to you by GamesCrafters!\\n\")\n print(\"We've 'solved' the game, so you can see the value (win, lose, tie)\")\n print(\"of moves to make. Just type V whenever you want to see the values.\")\n prettyprint(board)\n moves = getmovesfromoracle(board)\n while(moves):\n move = input(\"\\nChoose your move (e.g., A1, B3, etc), V for values, Q for quit: \").upper()\n if (move == \"Q\"):\n break\n elif (move == \"U\"):\n print(\"http://nyc.cs.berkeley.edu:8080/gcweb/service/gamesman/puzzles/ttt/getNextMoveValues;board=\" + urlify(board) + \";width=3;height=3;pieces=3\")\n elif (move == \"V\"):\n print(\"\\nHere are the values for this position's moves (W=win, T=tie, L=lose)\")\n prettyprint(getmovevalues(moves))\n elif (move not in availablemoves(moves)):\n print(\"\\nPlease choose V or one of (without quotes): \" + str(availablemoves(moves)))\n else:\n board = domove(board, move)\n moves = getmovesfromoracle(board)\n prettyprint(board)\n print(\"Thanks for the game!\")",
"def test_brute_force_ttt(self):\n def simulate(moves, human_first): \n T = TicTacToe()\n for move in moves:\n self.ri.return_value = str(move)\n moves = [T.human_move, T.computer_move]\n for i in range(2):\n moves[(i + human_first) % 2]()\n winner = T.tic_tac_toe(T.board)\n if winner:\n return winner == T.computer or winner == 'cat'\n return True\n sys.stdout, tmp = open(os.devnull, 'w'), sys.stdout\n assert True == all(simulate(moves, True) for moves in combinations_with_replacement(range(9), 5))\n assert True == all(simulate(moves, False) for moves in combinations_with_replacement(range(9), 5))\n sys.stdout = tmp",
"def test_win(self):\n game = self.ending(['bw.wwwww'], 8, 1)\n game.man_move(0, 2)\n self.assertEqual(game.finish_state, (250, game.first_player, 'Win'))",
"def __init__(self, tictactoe, players,\n window=Window(name=\"TicTacToe\", size=(500, 500)),\n restart_at_end=False,\n telling=True):\n self.tictactoe = tictactoe\n self.players = players\n self.window = window\n self.default_size = window.size\n self.restart_at_end = restart_at_end\n # =>Easier to manipulate than having nasty infinities to deal with\n self.choice = None\n self.on = False\n self.telling = telling\n self.start_duration = 1\n self.end_duration = 1\n self.start_time = None\n self.end_time = None\n self.game_number = 0",
"def PlayTicTacToe(numPlayers):\n\tteams = {} # maps the teams onto players or computer\n\tif numPlayers == 0:\n\t\tteams['X'] = 'C'\n\t\tteams['O'] = 'C'\n\telif numPlayers == 1:\n\t\tteams['X'] = 'H'\n\t\tteams['O'] = 'C'\n\telse:\n\t\tteams['X'] = 'H'\n\t\tteams['O'] = 'H'\n\n\tnumberBoard = (\n\t\t\t('0', '1', '2'),\n\t\t\t('3', '4', '5'),\n\t\t\t('6', '7', '8')\n\t\t)\n\tprint('Thank you. The board is numbered like this:')\n\tprint(StringFromBoard(numberBoard))\n\tturn = 'X'\n\tboard = [\n\t\t\t[' ', ' ', ' '],\n\t\t\t[' ', ' ', ' '],\n\t\t\t[' ', ' ', ' ']\n\t\t]\n\tnextMover = 'X'\n\tgame = []\n\twhile True:\n\t\tindex = IndexBoard(board)\n\t\tgame.append('I {}'.format(index))\n\t\tnextPlayer = teams[nextMover]\n\t\tif nextPlayer == 'H':\n\t\t\tmove = GetNextMove(board, index, teams, nextMover)\n\t\telse:\n\t\t\tmove = GetComputerMove(board, index, nextMover)\n\t\t\tprint('The Computer has chosen {}.'.format(move))\n\t\tMove(board, nextMover, move)\n\t\tgame.append('M {} {}'.format(nextMover, move))\n\t\tprint(StringFromBoard(board))\n\n\t\tcanonicalBoard, index, rotations, flips = CanonicalizeBoard(board)\n\t\tif rotations > 0:\n\t\t\tprint('Rotate {} times'.format(rotations))\n\t\t\tgame.append('R {}'.format(rotations))\n\t\tif flips > 0:\n\t\t\tprint ('Flip Horizontally')\n\t\t\tgame.append('F {}'.format(flips))\n\t\tif rotations > 0 or flips > 0:\n\t\t\tboard = canonicalBoard\n\t\t\tprint(StringFromBoard(board))\n\t\t\n\t\tif IsWinner(board, nextMover):\n\t\t\tprint ('{} is the Winner!'.format(nextMover))\n\t\t\tgame.append('W {}'.format(nextMover))\n\t\t\tbreak\n\t\t\n\t\tif IsCatsGame(board):\n\t\t\tprint(\"No winner! Cat's game.\")\n\t\t\tgame.append('C')\n\t\t\tbreak\n\n\t\tif nextMover == 'X':\n\t\t\tnextMover = 'O'\n\t\telse:\n\t\t\tnextMover = 'X'\n\tLearnFromGames(game)\n\treturn game",
"def test_three_arms_two_winners(self):\n self._test_three_arms_two_winners()",
"def __init__(self):\n # create game object\n self.game = Game()\n self.players = (\"X's\", \"O's\")\n\n # define string constants for UI\n self.BG_COLOR = \"#DBF6E9\"\n self.FONT = \"Verdana\"\n self.PROMPT = \"{0}, it's your turn.\"\n self.SCORE_LABEL = \"{0}: {1}\"\n self.TIE_LABEL = \"Ties: {0}\"\n\n # create window and instructions at the top\n self.window = tk.Tk()\n self.window.title(\"Tic-tac-toe\")\n self.window.configure(padx=30, pady=30, bg=self.BG_COLOR)\n self.window.geometry(\"450x450\")\n self.instructions = self.create_label(self.window, self.PROMPT.format(self.players[self.game.whose_turn]))\n self.instructions.grid(row=0, column=0)\n # create score frame to hold results of previous games in this session\n self.score_frame = tk.Frame(self.window, bg=self.BG_COLOR)\n self.score_frame.grid(row=1, column=1, padx=20, pady=20, sticky='n')\n self.score_label = self.create_label(self.score_frame, 'Score')\n self.score_label.grid(row=0, column=0, sticky='w')\n self.player_0_score_label = self.create_label(self.score_frame,\n self.SCORE_LABEL.format(self.players[0], self.game.player_0_score))\n self.player_0_score_label.grid(row=1, column=0)\n self.player_1_score_label = self.create_label(self.score_frame,\n self.SCORE_LABEL.format(self.players[1], self.game.player_1_score))\n self.player_1_score_label.grid(row=2, column=0)\n self.num_ties_label = self.create_label(self.score_frame, self.TIE_LABEL.format(self.game.num_ties))\n self.num_ties_label.grid(row=3, column=0, sticky='w')\n # create game frame; each of the nine squares on the grid is represented as a button\n self.game_frame = tk.Frame(self.window)\n self.game_frame.grid(row=1, column=0, pady=20)\n self.button_list = self.create_buttons()\n self.place_buttons()\n\n self.window.mainloop()",
"def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass",
"def tic_tac_toe(board, player_1, player_2):\n # do an initial clear\n os.system('clear')\n winner = False\n cur_player = player_1\n player_num = 1\n # do Tic-Tac-Toe until we have found a winner\n while not winner:\n print_board(board)\n move = raw_input('\\n%s, where would you like to go? ' % (cur_player))\n row, col = parse_move(move)\n # if we couldn't parse the move then try again\n if row == None or col == None:\n os.system('clear')\n print \"I didn't recognize your move!\"\n print \"Make sure your move is a row and column with no spaces (A4)\\n\"\n continue\n # if they moved somewhere there is already a mark then try again\n if not valid_move(board, row, col):\n os.system('clear')\n print \"You can't move there! Try again.\\n\"\n continue\n # mark the move on the board\n make_move(board, player_num, row, col)\n # see if there is a winner\n winner = get_winner(board)\n # switch turns\n cur_player = player_2 if cur_player == player_1 else player_1\n player_num = 2 if player_num == 1 else 1\n os.system('clear')\n # the winner will either be 1 or 2. If 1 then outcome is True (for player 1)\n outcome = True if winner == 1 else False\n return outcome",
"async def tictactoe(self, ctx, opponent: discord.Member):\n\n if opponent == ctx.message.author:\n await ctx.send(\"**You cannot play against yourself!**\")\n return\n\n invitation = lambda d=False: [\n [\n Button(label=\"Decline\", style=ButtonStyle.red, disabled=d),\n Button(label=\"Accept\", style=ButtonStyle.green, disabled=d)\n ]\n ]\n\n msg = await ctx.send(f\"**{opponent.mention}, {ctx.message.author.mention} invited you to a game of TicTacToe!**\", components=invitation())\n\n try:\n\n invite = await bot.wait_for(\"button_click\", check=lambda res: res.user.id == opponent.id and res.message.id == msg.id, timeout=60)\n\n if invite.component.label == \"Decline\":\n await invite.respond(type=InteractionType.UpdateMessage, content=f\"**{opponent.mention} declined the invitation!**\", components=invitation(True))\n return\n \n else:\n await invite.respond(type=InteractionType.UpdateMessage, content=f\"**{opponent.mention} accepted the invitation!**\", components=invitation(True))\n await asyncio.sleep(1)\n pass\n \n except asyncio.TimeoutError:\n await msg.edit(type=InteractionType.UpdateMessage, content=f\"**Timed out!**\", components=invitation(True))\n return\n\n options = [\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]\n ]\n\n O = 1\n X = -1\n\n #the person who has the turn\n turn = random.choice([O, X])\n\n\n#----------------------------------------------------------------------------------------------------------------\n\n\n #updates the board to match the options\n def board(disabled: bool=False):\n\n board = [[0]*3 for i in range(3)]\n\n for i in range(3):\n for j in range(3):\n if options[i][j] == O:\n board[i][j] = Button(style=ButtonStyle.green, label=\"O\", id=f\"{i} {j}\", disabled=True)\n elif options[i][j] == X:\n board[i][j] = Button(style=ButtonStyle.red, label=\"X\", id=f\"{i} {j}\", disabled=True)\n else:\n board[i][j] = Button(style=ButtonStyle.grey, label=\"\\u200b\", id=f\"{i} {j}\", disabled=disabled)\n return board\n\n #check if there is a winner\n def has_won():\n\n #check horizontal\n for x in options:\n if sum(x) == 3 or sum(x) == -3:\n return True\n\n #check vertical\n for y in range(3):\n v = options[0][y] + options[1][y] + options[2][y]\n if v == 3 or v == -3:\n return True\n\n #check diagonals\n d = options[0][2] + options[1][1] + options[2][0]\n if d == 3 or d == -3:\n return True\n\n d = options[0][0] + options[1][1] + options[2][2]\n if d == 3 or d == -3:\n return True\n\n def is_tie():\n\n if not (\"0\" in str(options)) and not has_won():\n return True\n\n def get_player(team):\n\n if team == 1:\n return opponent\n else:\n return ctx.message.author\n\n\n#----------------------------------------------------------------------------------------------------------------\n\n\n await msg.edit(f\"**{get_player(turn).mention}({turn}) goes first**\", components=board())\n\n\n while True:\n try:\n\n #wait 60 seconds for the user who has this turn to react\n res = await bot.wait_for(\"button_click\", check=lambda res: res.user.id == get_player(turn).id and res.message.id == msg.id, timeout=60) \n\n #changes the selected option's value depending on who's turn it is\n options[int(res.component.id.split()[0])][int(res.component.id.split()[1])] = turn\n\n #if there is a winner\n if has_won():\n await res.respond(type=InteractionType.UpdateMessage, content=f\"**🎉 {get_player(turn).mention} is the winner! 🎉**\", components=board(True))\n return\n elif is_tie():\n await res.respond(type=InteractionType.UpdateMessage, content=f\"**Draw!**\", components=board(True))\n return\n else:\n turn = -turn\n await res.respond(type=InteractionType.UpdateMessage, content=f\"**{get_player(turn).mention}'s turn**\", components=board())\n pass\n\n #if the player in turn times out\n except asyncio.TimeoutError:\n await msg.edit(f\"**Timed out! 🎉 {get_player(-turn).mention} is the winner! 🎉**\", components=board(True))\n return",
"def play(self, tictactoe):\n raise Exception(\"You implement this method to use it.\")",
"def test_init_game_2(self):\n utils.init_game()\n self.assertEqual(pg.display.get_caption()[0], utils.NAME)",
"def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")",
"def test_switch_player(self):\n\n previous_player = self.controller.game_state.player\n\n #compare the current player to the previous player after calling the\n #flip_current_player() function\n self.controller.game_state.flip_current_player()\n self.assertNotEqual(self.controller.game_state.player, previous_player)",
"def test_gameAddText(self):\n # this is tested graphically, it is UI\n pass",
"def test_current_player(self):\n TestGame = TTT_Game(\"John\", \"Jane\")\n\n # if odd, returns \"John\"\n TestGame.turn_count = 1\n self.assertEqual(TestGame.current_player(), \"John\")\n TestGame.turn_count = 5\n self.assertEqual(TestGame.current_player(), \"John\")\n\n # if even, returns \"O\"\n TestGame.turn_count = 8\n self.assertEqual(TestGame.current_player(), \"Jane\")",
"def setup_game(self):",
"def testPlayModeAndSwitches(self):\n\t c = Controller()\n\t self.failUnless(c.isRecording)\n\t self.failIf(c.isPlayingBack)\n\t c.replay()\n\t self.failIf(c.isRecording)\n\t self.failUnless(c.isPlayingBack)",
"def test_legit_player(self):\n board = Board()\n player1 = LegitPlayer()\n player2 = LegitPlayer()\n player_guard1 = PlayerGuard(player1)\n player_guard2 = PlayerGuard(player2)\n\n # set ids\n p1id = uuid.uuid4()\n p2id = uuid.uuid4()\n player_guard1.set_id(p1id)\n player_guard2.set_id(p2id)\n\n # test methods don't error out\n player_guard1.start_of_game()\n player_guard2.start_of_game()\n board.place_worker(*player_guard1.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard2.place_worker(board))\n board.place_worker(*player_guard1.place_worker(board))\n player_guard1.play_turn(board)\n player_guard2.play_turn(board)\n player_guard1.end_of_game(\"legit player\")\n player_guard2.end_of_game(\"legit player\")",
"def test_current_symbol(self):\n TestGame = TTT_Game(\"John\", \"Jane\")\n\n # if odd, returns \"X\"\n TestGame.turn_count = 1\n self.assertEqual(TestGame.current_symbol(), \"X\")\n TestGame.turn_count = 3\n self.assertEqual(TestGame.current_symbol(), \"X\")\n\n # if even, returns \"O\"\n TestGame.turn_count = 4\n self.assertEqual(TestGame.current_symbol(), \"O\")",
"def test_actor_matches_activity(self):",
"def main():\n\tprint(\"Welcome to TicTacToe\")\n\tboard = Board()\n\twhile (not board.isOver()):\n\t\tprint(\"It is {0}'s turn\".format(board.current) + board.__str__())\n\t\tmove = input('Where would you like to go? : ').strip()\n\t\tif (move == 'q'):\n\t\t\tbreak\n\t\telif (board.makeMove(move) == 1):\n\t\t\tboard.switchPlayer()\n\t\telse:\n\t\t\tprint(\"I didn't understand your input, these are the valid inputs:\\nentering 'q' will quit out of the game.\\n\")\n\t\t\tprint(\"entering a number will place the peice in that box, the numbers are as follows:\\n \\n1|2|3\\n-----\\n4|5|6\\n-----\\n7|8|9\\n\")\n\tprint(board.__str__() + \"\\nGame Over\")\n\tif (board.isOver() is Piece.EX or board.isOver() is Piece.OH):\n\t\tprint(\"Player {0} wins!\".format(board.isOver())) \n\telse:\n\t\tprint(\"It was a draw\")",
"def test_game():\n \n # check that all of the functions are callable\n assert callable(g.AdventureGame)\n \n # Load up the rooms for the test game\n testGame = g.AdventureGame(tg.testRooms, tg.starting_room)\n \n # Test that the constructor properly loads the rooms\n assert testGame.rooms == tg.testRooms\n assert testGame.starting_room == tg.starting_room\n assert testGame.current_room == tg.starting_room"
]
| [
"0.7204975",
"0.639281",
"0.63556343",
"0.62928545",
"0.62355614",
"0.6217936",
"0.61023337",
"0.6055885",
"0.59701693",
"0.5949051",
"0.5938375",
"0.59304994",
"0.59215665",
"0.58301795",
"0.5829346",
"0.58051157",
"0.5797176",
"0.578909",
"0.5774417",
"0.57690454",
"0.5751079",
"0.5686772",
"0.5682659",
"0.56344",
"0.56254256",
"0.56169325",
"0.56093055",
"0.5589724",
"0.5578821",
"0.5577185"
]
| 0.7255627 | 0 |
Checks if a is valid point. Points are tuples with two elements wich are integer coordinates | def is_point(a):
return isinstance(a, tuple) and isinstance(a[0], int) and isinstance(a[1], int) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_point(self, value):\n if not isinstance(value, (list, tuple)):\n return \"Points must be a list of coordinate pairs\"\n elif not len(value) == 2:\n return \"Value (%s) must be a two-dimensional point\" % repr(value)\n elif not isinstance(value[0], (float, int)) or not isinstance(\n value[1], (float, int)\n ):\n return \"Both values (%s) in point must be float or int\" % repr(value)",
"def validate_coordinates_input(points: tuple) -> None:\n\n for coordinate in points:\n if not isinstance(coordinate, tuple):\n raise InvalidGroundValueError(\n f\"Object must be a tuple\"\n f\" with format like (1, 2), not {coordinate}\"\n )",
"def isPoint(point, widgetType = 'widget'):\n if not(isinstance(point, list) or isinstance(point, tuple)):\n raise pgUIException(str(point) + ' is not a valid tuple/list for ' +\n widgetType,\n code = 31)\n if len(point) != 2:\n raise pgUIException(str(point) + ' has to have two elements',\n code = 32)\n if not(isinstance(point[0], int)) or not(isinstance(point[1], int)):\n raise pgUIException(str(point) + ' is not a valid point for ' +\n widgetType + ' position',\n code = 33)\n if point[0] < 0 or point[1] < 0:\n raise pgUIException(str(point) +\n ' both coordinates have to be 0 or positive',\n code = 34)\n return True",
"def test_point__tuple(self):\n\n p = tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 4.3, -4.2, 3.1, 1)\n\n self.assertEqual(p.x, 4.3)\n self.assertEqual(p.y, -4.2)\n self.assertEqual(p.z, 3.1)\n self.assertEqual(p.w, 1)",
"def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)",
"def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False",
"def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)",
"def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)",
"def __validatePoint(self, point):\n # print point\n if point[0] > self.scn_width:\n raise ValueError('X coordinate: %d out of range.' % point[0])\n if point[1] > self.scn_height:\n raise ValueError('Y coordinate: %d out of range.' % point[1])\n return point",
"def is_valid_point(self, P):\n x, y = P\n assert 0 <= x < self.fp and 0 <= y < self.fp, 'Point outside the group'\n LS = (y ** 2) % self.fp\n PS = (x ** 3 + self.a * x + self.b) % self.fp\n assert LS == PS, 'Point not valid - equation'",
"def _position_is_valid(position):\n\n # Make sure that...\n # position is a tuple\n # position's length is 2\n # every value in the tuple is an int\n # every int in the tuple is either 0, 1 or 2\n # if not, return False\n\n if not isinstance(position, tuple) \\\n or len(position) != 2 \\\n or not all(isinstance(x, int) for x in position) \\\n or any(x for x in position if not 0 <= x <= 2):\n return False\n\n return True",
"def validate_points(self, data):\n if data> 1:\n data = 1\n elif data < 0:\n data=0\n return data",
"def pointInputValid(C):\r\n P = point(0, 0, C)\r\n P.x = int( input(\"enter the x value for point:\") )\r\n P.y = int( input(\"enter the y value for point:\") )\r\n if not P.pointValid():\r\n print(\"The given point is not valid.\")\r\n return 0\r\n return P",
"def check_point(point,points):\n if point in points:\n return True\n else:\n return False",
"def __getPointXYs(self, raw_string):\n try:\n pointsRE = re.compile('^\\((\\d*\\D*, *\\D*\\d*)\\)\\D*\\((\\d*\\D*, *\\D*\\d*)\\)$')\n points = pointsRE.search(raw_string.strip()).groups()\n startPoint = (int(points[0].split(',')[0].strip()), int(points[0].split(',')[1].strip()))\n endPoint = (int(points[1].split(',')[0].strip()), int(points[1].split(',')[1].strip()))\n return self.__validatePoint(startPoint), self.__validatePoint(endPoint)\n except AttributeError:\n traceback.print_exc()\n raise ValueError('Failed to get point coordinates.')",
"def _is_positive_int_tuple(item):\n if not isinstance(item, tuple):\n return False\n for i in item:\n if not _is_positive_int(i):\n return False\n return True",
"def test_point(self):\n\n p = points.Point(4.3, -4.2, 3.1)\n\n self.assertEqual(p.x, 4.3)\n self.assertEqual(p.y, -4.2)\n self.assertEqual(p.z, 3.1)\n self.assertEqual(p.w, 1)",
"def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))",
"def point_valid(self, pt, samples):\n\n\t cell_coords = self.get_cell_coords(pt)\n\t for idx in self.get_neighbours(cell_coords):\n\t nearby_pt = samples[idx]\n\t # Squared distance between or candidate point, pt, and this nearby_pt.\n\t distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2\n\t if distance2 < (self.r)**2:\n\t # The points are too close, so pt is not a candidate.\n\t return False\n\t # All points tested: if we're here, pt is valid\n\t return True",
"def test_make_point(self):\n\n self.assertEqual(sppasPoint(3., 0.005), sppasANTX.make_point(\"132300\"))\n with self.assertRaises(TypeError):\n sppasANTX.make_point(\"3a\")\n with self.assertRaises(TypeError):\n sppasANTX.make_point(\"3.\")",
"def __init__(self, point_a: AbstractPoint, point_b: AbstractPoint, point_c: AbstractPoint):\n if all(isinstance(entry, AbstractPoint) for entry in [point_a, point_b, point_c]):\n self.__point_a = point_a\n self.__point_b = point_b\n self.__point_c = point_c\n else:\n raise TypeError(\"Not all parameters are of type %s\" % type(AbstractPoint))",
"def isPositionValid(self, x, y):\n if x >= self._width:\n return False\n if y >= self._height:\n return False\n if x < 0:\n return False\n if y < 0:\n return False\n return not (x, y) in self._invalidPositions",
"def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0",
"def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True",
"def HasPoint(self, vtkAMRBox, , , p_float_6, p_float_7, p_float_8):\n ...",
"def test_point_positive_on_one_line(self):\n a = Point(1, 0)\n b = Point(34, 0)\n c = Point(42, 0)\n\n self.assertTrue(Point.on_one_line(a, b, c),\n \"Test of Point.on_one_line(a, b, c) failed, returned value != True.\")\n d = Point(1, 2)\n e = Point(34, 43)\n f = Point(42, 54)\n\n self.assertFalse(Point.on_one_line(d, e, f),\n \"Test of Point.on_one_line(d, e, f) failed, returned value != False.\")\n\n self.assertTrue(Point.on_one_line(a), \"Test of Point.on_one_line(a) failed, returned value != True.\")",
"def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False",
"def _coerce_pointslike_arg(\n points: Union[NumericArray, VectorArray], copy: bool = False\n) -> Tuple[np.ndarray, bool]:\n if isinstance(points, collections.abc.Sequence):\n points = np.asarray(points)\n\n if not isinstance(points, np.ndarray):\n raise TypeError(\"Given points must be convertible to a numerical array.\")\n\n if points.ndim > 2:\n raise ValueError(\"Array of points must be 1D or 2D\")\n\n if points.ndim == 2:\n if points.shape[1] != 3:\n raise ValueError(\"Array of points must have three values per point (shape (n, 3))\")\n singular = False\n\n else:\n if points.size != 3:\n raise ValueError(\"Given point must have three values\")\n singular = True\n points = np.reshape(points, [1, 3])\n\n if copy:\n return points.copy(), singular\n return points, singular",
"def point_valid(pt):\n\n cell_coords = get_cell_coords(pt)\n for idx in get_neighbours(cell_coords):\n nearby_pt = samples[idx]\n # Squared distance between or candidate point, pt, and this nearby_pt.\n distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2\n if distance2 < r**2:\n # The points are too close, so pt is not a candidate.\n return False\n # All points tested: if we're here, pt is valid\n return True",
"def __getPointXY(self, raw_string):\n try:\n # print 'input:',str\n pointRE = re.compile('^\\((\\d*, *\\d*)\\)$')\n x, y = pointRE.search(raw_string.strip()).groups()[0].split(',')\n # print 'x: %s, y: %s' % (x,y)\n return self.__validatePoint((int(x), int(y.strip())))\n except AttributeError:\n raise ValueError('Failed to get point coordinates.')"
]
| [
"0.78360605",
"0.77310133",
"0.75255525",
"0.66908485",
"0.6647603",
"0.66428196",
"0.66331434",
"0.66331434",
"0.6566558",
"0.6560455",
"0.6509387",
"0.64592165",
"0.6357859",
"0.6274078",
"0.62555736",
"0.6234108",
"0.61824715",
"0.61678946",
"0.6166192",
"0.61394876",
"0.6135534",
"0.61253214",
"0.6120187",
"0.6041025",
"0.60330987",
"0.6020298",
"0.595471",
"0.5940862",
"0.59382236",
"0.5937725"
]
| 0.84881026 | 0 |
Return distance betwen a and b. | def distance(a, b):
ax, ay = a
bx, by = b
dx = bx - ax
dy = by - ay
return (abs(dx) + abs(dy) + abs(dx - dy)) / 2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))",
"def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def distance(self, a, b):\n raise NotImplementedError()",
"def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not %s and %s.\" % (a, b)\n )\n return abs(a - b)",
"def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not %s and %s.\" % (a, b)\n )\n return abs(a - b)",
"def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)",
"def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not {} and {}.\".format(a, b)\n )\n return 1 if a != b else 0",
"def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def distance(x: int, y: int, a: int, b: int) -> float:\n return ((x - a) ** 2 + (y - b) ** 2) ** .5",
"def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)",
"def distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n\n return math.sqrt(dx*dx + dy*dy)",
"def get_distance(self, b_a, b_b):\n dx = max(max(0, b_a[1] - b_b[3]), max(0, b_b[1] - b_a[3]))\n dy = max(max(0, b_a[0] - b_b[2]), max(0, b_b[0] - b_a[2]))\n return max(dx, dy)",
"def dist(a, b):\n return np.sum((a-b)**2.0)**.5",
"def distance(A, B):\n return abs(A - B)",
"def distance(A, B):\n return abs(A - B)",
"def distance(a, b):\n if len(a) != 2 or len(b) != 2:\n raise ValueError\n\n dx = float(a[0]) - float(b[0])\n dy = float(a[1]) - float(b[1])\n\n return ( dx**2 + dy**2 )**0.5",
"def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)",
"def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)",
"def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))",
"def dist(a, b):\n x0, y0 = a # Destructuring assignment\n x1, y1 = b\n\n return math.sqrt((x1 - x0)**2 + (y1 - y0)**2)",
"def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2",
"def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )",
"def eucl_dist(a, b):\n return np.sqrt( (a[0]-b[0])** 2 + (a[1]-b[1])** 2)",
"def distance(a, b):\n if len(a) > len(b):\n a = a[:len(b)]\n elif len(b) > len(a):\n b = b[:len(a)]\n\n ar = numpy.array(a)\n br = numpy.array(b)\n dist = numpy.linalg.norm(ar-br)\n\n return dist",
"def dist(a,b):\n dist = 0.\n for i in range(len(a)):\n dist += (b[i]-a[i])**2.\n\n dist = dist**.5\n return dist",
"def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )",
"def heuristic(self, a, b):\n return math.fabs(a[0] - b[0]) + math.fabs(a[1] - b[1])",
"def dist(a, b):\n x0, y0 = a # Destructuring assignment\n x1, y1 = b\n \"\"\" math.sqrt(x): Return the square root of x\"\"\"\n return math.sqrt((x1 - x0)**2 + (y1 - y0)**2)"
]
| [
"0.84512717",
"0.8359271",
"0.82396984",
"0.8216251",
"0.82030517",
"0.82030517",
"0.81532663",
"0.8082506",
"0.8056524",
"0.80535",
"0.80527997",
"0.8037395",
"0.80054075",
"0.7997625",
"0.7989338",
"0.7966094",
"0.7966094",
"0.79631495",
"0.79503185",
"0.78624356",
"0.78066504",
"0.77834684",
"0.7684105",
"0.76492816",
"0.7589675",
"0.7587376",
"0.7579107",
"0.7555146",
"0.75273186",
"0.75038934"
]
| 0.8451247 | 1 |
Checks if a and b are in given distance d | def in_distance(a, b, d):
return distance(a, b) <= d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not {} and {}.\".format(a, b)\n )\n return 1 if a != b else 0",
"def dwithin(a, b, distance, **kwargs):\n return lib.dwithin(a, b, distance, **kwargs)",
"def dist(self, one, two):\n return sum((one[0] != two[0], one[1] != two[1]))",
"def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not %s and %s.\" % (a, b)\n )\n return abs(a - b)",
"def distance(self, a, b):\n if not (a in self and b in self):\n raise RuntimeError(\n \"Can only compute distance for values within \"\n \"the space, not %s and %s.\" % (a, b)\n )\n return abs(a - b)",
"def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)",
"def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))",
"def distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n\n return math.sqrt(dx*dx + dy*dy)",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def distance(a, b):\n ax, ay = a\n bx, by = b\n dx = bx - ax\n dy = by - ay\n return (abs(dx) + abs(dy) + abs(dx - dy)) / 2",
"def distance(self, a, b):\n raise NotImplementedError()",
"def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)",
"def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)",
"def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)",
"def distance(a, b):\n if len(a) != 2 or len(b) != 2:\n raise ValueError\n\n dx = float(a[0]) - float(b[0])\n dy = float(a[1]) - float(b[1])\n\n return ( dx**2 + dy**2 )**0.5",
"def intersects(a, b, c, d):\n return ccw(a, c, d) != ccw(b, c, d) and ccw(a, b, c) != ccw(a, b, d)",
"def is_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n\n # TODO\n raise NotImplementedError",
"def dist(a, b):\n x0, y0 = a # Destructuring assignment\n x1, y1 = b\n\n return math.sqrt((x1 - x0)**2 + (y1 - y0)**2)",
"def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))",
"def check_if_intersection(point_a, point_b, point_c, point_d):\n try:\n # get factors a and b of such that y = a + b*x describes line between point_a and point_b\n b = (point_b[1] - point_a[1]) / (point_b[0] - point_a[0])\n a = point_a[1] - b * point_a[0]\n # get factors c and d of such that y = c + d*x describes line between point_c and point_d\n d = (point_d[1] - point_c[1]) / (point_d[0] - point_c[0])\n c = point_c[1] - d * point_c[0]\n\n # calulate intersection by setting a + b*x = c + d*x\n x = (a - c) / (d - b)\n\n # check if x is between x coordinates of points a and b and between x coordinates of points c and d\n return (min(point_a[0], point_b[0]) < x) & (max(point_a[0], point_b[0]) > x) & \\\n (min(point_c[0], point_d[0]) < x) & (max(point_c[0], point_d[0]) > x)\n\n except ZeroDivisionError:\n # this mean point a and b have same x coordinate or point c and d have same c coordinate -> deal with this\n if (point_b[0] == point_a[0]) & (point_c[0] == point_d[0]): # both pairs have same x coordinate\n return point_a[0] == point_c[0]\n if point_b[0] == point_a[0]:\n # get factors c and d of such that y = c + d*x describes line between point_c and point_d\n d = (point_d[1] - point_c[1]) / (point_d[0] - point_c[0])\n c = point_c[1] - d * point_c[0]\n # get y value of connection between c and d at x = point_a[0]\n y = c + d*point_a[0]\n return (y > min(point_a[1], point_b[1])) & (y < max(point_a[1], point_b[1]))\n if point_c[0] == point_d[0]:\n # get factors a and b of such that y = a + b*x describes line between point_a and point_b\n b = (point_b[1] - point_a[1]) / (point_b[0] - point_a[0])\n a = point_a[1] - b * point_a[0]\n # get y value of connection between a and b at x = point_c[0]\n y = a + b*point_c[0]\n return (y > min(point_c[1], point_d[1])) & (y < max(point_c[1], point_d[1]))",
"def interval_intersect(a, b, c, d):\n if (c <= b) and (a <= d):\n return True\n else:\n return False",
"def dist(a, b):\n return np.sum((a-b)**2.0)**.5",
"def compare_distance(self, a, b):\n a_dist = int(a['distance'])\n b_dist = int(b['distance'])\n if a_dist < b_dist:\n return -1\n elif a_dist > b_dist:\n return 1\n else:\n return 0",
"def bond_check(bond_distance,bond_min=0,bond_max=1.5): # we can define the default min and max in the def\n if bond_distance >bond_min and bond_distance<bond_max:\n return True\n else:\n return False",
"def euclideanDistance(a, b):\n vec = [pow(a[i] - b[i], 2) for i in range(len(a)) if None not in [a[i],b[i]]]\n return (sum(vec) / len(vec)) if len(vec) > 0 else NaN",
"def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )",
"def bond_check(distance, minimum=0, maximum=1.5): # when variables are set equal to => default\n if distance > minimum and distance < maximum:\n return True\n return False",
"def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)",
"def direct_distance(a, b):\n\n if a[0] == b[0]:\n return abs(a[1] - b[1]) - 1\n if a[1] == b[1]:\n return abs(a[0] - b[0]) - 1\n return abs(a[0] - b[0]) - 1",
"def distance_checker(xyz1, xyz2):\n return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +\n (xyz1[2] - xyz2[2])**2)"
]
| [
"0.7185901",
"0.7166674",
"0.6720538",
"0.6656798",
"0.6656798",
"0.6601606",
"0.6598739",
"0.65757394",
"0.65753025",
"0.65734214",
"0.65687853",
"0.6567245",
"0.6496106",
"0.6494706",
"0.6463152",
"0.6428333",
"0.64235395",
"0.64084",
"0.63912725",
"0.63847333",
"0.63520175",
"0.6330312",
"0.6330219",
"0.62866443",
"0.6286208",
"0.627648",
"0.6271196",
"0.6270766",
"0.6268999",
"0.62197345"
]
| 0.88950545 | 0 |
Calculates how many points lies in circle in d distance | def num_points_in_circle(d):
return 6 * d if d > 0 else 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_points_in_distance(d):\n return 1 + 3 * d * (d + 1)",
"def points_in_distance(c, d):\n points = set()\n for i in range(0, d + 1):\n points = points | points_in_circle(c, i)\n return points",
"def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)",
"def inside_circle(total_count):\n\n x = np.float32(np.random.uniform(size=total_count))\n y = np.float32(np.random.uniform(size=total_count))\n\n radii = ##\n\n count = ##\n\n return count",
"def points_in_circle(c, d):\n if d == 0:\n return set((c,))\n circle = set()\n x, y = (c[0] + d * directions[4][0], c[1] + d * directions[4][1])\n for m in directions:\n for i in range(1, d + 1):\n x, y = x + m[0], y + m[1]\n circle.add((x, y))\n return circle",
"def distance_to(self, circle):\n diff = tuple(map(sub, self.pos, circle.pos))\n return math.hypot(*diff)",
"def semidiameter(radius, distance):\n\n return np.arcsin(radius / distance)",
"def radius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, self.vertices[0])",
"def inside_circle(total_count):\n\n host_name = MPI.Get_processor_name()\n print(\"Rank {} generating {:n} samples on host {}.\".format(\n rank, total_count, host_name))\n x = np.float64(np.random.uniform(size=total_count))\n y = np.float64(np.random.uniform(size=total_count))\n\n radii = np.sqrt(x*x + y*y)\n\n count = len(radii[np.where(radii<=1.0)])\n\n return count",
"def inradius(self) -> npt.NDArray[np.float_]:\n return dist(self.center, cast(Segment, self.edges[0]).midpoint)",
"def latticepoints(circle_radius, pixel_size):\n\n numlatticepoints = 0\n npixels = int(circle_radius/float(pixel_size))\n for i in range(-npixels, npixels+1, 1):\n for j in range(-npixels, npixels+1, 1):\n if ((i*pixel_size)**2 + (j*pixel_size)**2) <= (np.sqrt(2.*float(npixels*pixel_size)**2))**2:\n #if ((m*pixel_size)**2 + (n*pixel_size)**2) <= npixels**2:\n numlatticepoints = numlatticepoints + 1\n\n return numlatticepoints",
"def great_circle_distance(pnt1, pnt2, radius):\n\t\t\tlat1 = radians(pnt1[0])\n\t\t\tlat2 = radians(pnt2[0])\n\t\t\tdLat = lat2 - lat1\n\t\t\tdLon = radians(pnt2[1]) - radians(pnt1[1])\n\t\t\ta = sin(dLat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dLon / 2.0) ** 2\n\t\t\treturn 2 * asin(min(1, sqrt(a))) * radius * 57.2957795",
"def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)",
"def radius(self):\n c = self.centroid()\n dmax = -np.inf\n for vertex in self.path.vertices:\n d = np.linalg.norm(vertex - c)\n if d > dmax:\n dmax = d\n return d",
"def calcPointDensity(number_NN, radius):\n \n D = (number_NN+1.0)/((4.0/3)*np.pi*pow(radius, 3))\n \n return D",
"def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d",
"def intersection_area(self, d, R, r):\n \n if d <= abs(R-r):\n # One circle is entirely enclosed in the other.\n return np.pi * min(R, r)**2\n if d >= r + R:\n # The circles don't overlap at all.\n return 0\n \n r2, R2, d2 = r**2, R**2, d**2\n alpha = np.arccos((d2 + r2 - R2) / (2*d*r))\n beta = np.arccos((d2 + R2 - r2) / (2*d*R))\n answer = (r2 * alpha + R2 * beta -\n 0.5 * (r2 * np.sin(2*alpha) + R2 * np.sin(2*beta)))\n return answer",
"def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))",
"def _calculate_distance(self):\n xy = list(zip(self.x, self.y))\n\n dist = [0]\n for i in range(1, len(xy)):\n dist.append(self.distance_between_two_points(xy[i-1], xy[i]))\n\n return np.array(dist).cumsum()",
"def amount(self):\n return len(self.circles)",
"def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance",
"def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2",
"def distance_from_center(self, x: int, y: int) -> float:\n width, height = self.width, self.height\n dis = distance(x, y, width/2, height/2)\n return dis",
"def perimeter(points):\n return sum(get_distances(points))",
"def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0",
"def calDominationCount(p,visitedPoints):\n isDominated = utils.MultiThread(utils.dominating, zip([visitedPoints[k].mean for k in visitedPoints],repeat(p.mean)))\n dominationCount = sum(isDominated)\n print('Please _cutils.calDominantionCount(). This method is too slow.')\n return dominationCount",
"def dist(x,y,xc=0.,yc=0.):\n return sqrt((x-xc)**2+(y-yc)**2)",
"def points_on_circumference(center=(0, 0), r=50, n=100):\n\treturn [\n (\n center[0]+(cos(2 * pi / n * x) * r), \n center[1] + (sin(2 * pi / n * x) * r) \n\n ) for x in range(0, n + 1)]",
"def distance_from_cylinder(self, points, params, sqrt=False):\n # axis: 3 x 1, center: 1 x 3\n axis, center, radius = params\n center = center.reshape((1, 3))\n axis = axis.reshape((3, 1))\n\n v = points - center\n prj = (v @ axis) ** 2\n\n # this is going negative at some point! fix it. Numerical issues.\n # voilating pythagoras\n dist_from_surface = torch.sum(v * v, 1) - prj[:, 0]\n dist_from_surface = torch.clamp(dist_from_surface, min=1e-5)\n\n distance = torch.sqrt(dist_from_surface) - radius\n # distance.register_hook(self.print_norm)\n distance = distance ** 2\n\n if sqrt:\n distance = guard_sqrt(distance)\n\n if torch.sum(torch.isnan(distance)):\n import ipdb;\n ipdb.set_trace()\n if self.reduce:\n distance = torch.mean(distance)\n\n return distance",
"def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):\n\n\t# circum_cnt is actual points on cicumference as a percentage of total \n\t# random points(n) = Percentage_of_Total_Points * n / 100\n\tcircum_cnt = int(per*n/100)\n\n\t# random_cnt is points inside the circle = Total random points - Points on Circum\n\trandom_cnt = n - circum_cnt\n\n\t# Append points on circumference\n\tfinal_pts = [\n\t\t(\n\t\t\tcenter[0]+(cos(2 * pi / circum_cnt * x) * r), \n\t\t\tcenter[1] + (sin(2 * pi / circum_cnt * x) * r) \n\t\t) for x in range(0, circum_cnt + 1)]\n\n\n\n\n\t# Generate random points inside circle\n\t# random points inside circle should have atleast 5 radius to be visible enough\n\tfor i in range(1,random_cnt+1):\n\t\tfinal_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),\n\t\t\t\t\t\t\tcenter[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))\n\n\n\treturn final_pts"
]
| [
"0.82408607",
"0.7174484",
"0.68130255",
"0.65756804",
"0.6550924",
"0.6432983",
"0.63694406",
"0.62871903",
"0.6249227",
"0.620468",
"0.62038755",
"0.6202209",
"0.6198283",
"0.6193179",
"0.6191535",
"0.6151975",
"0.6101965",
"0.60924065",
"0.6084547",
"0.6073051",
"0.60636437",
"0.6061378",
"0.60583425",
"0.60534984",
"0.6026429",
"0.60250974",
"0.60106575",
"0.5994906",
"0.59891945",
"0.5986214"
]
| 0.8592174 | 0 |
Calculates how many points lies in given distance | def num_points_in_distance(d):
return 1 + 3 * d * (d + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_points_in_circle(d):\n return 6 * d if d > 0 else 1",
"def length(self):\n points = [Point(v, crs=self.crs) for v in self.vertices]\n distances = [a.distance(b) for a, b in zip(points[:-1], points[1:])]\n return sum(distances)",
"def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))",
"def distance(self) -> int:\n return 0",
"def _calculate_distance(self):\n xy = list(zip(self.x, self.y))\n\n dist = [0]\n for i in range(1, len(xy)):\n dist.append(self.distance_between_two_points(xy[i-1], xy[i]))\n\n return np.array(dist).cumsum()",
"def num_points_sweep(self, start, stop, step):\r\n return(abs((stop - start)//step) + 1)",
"def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0",
"def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0",
"def calc_spot_size(self, distance):\n if distance < 1.2:\n return self.spot_width_close\n else:\n return distance * self.spot_width_scalar",
"def calculateDistance(point1, point2, dimension):\n distance=0\n # print 'p1: ' + str(point1) + 'p2: ' + str(point2) + str(dimension)\n for x in range(dimension - 1):\n distance += pow((point1[x] - point2[x]), 2)\n return math.sqrt(distance)",
"def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))",
"def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))",
"def n_elements(x, dist, var=None):\n n = dist/mdiff(x)\n if var == 'time':\n n = n/60\n return int(np.round(n))",
"def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)",
"def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )",
"def distance_to(self, p):\n return (self - p).length()",
"def distance_to(self, p):\n return (self - p).length()",
"def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])",
"def total_distance(points):\n return sum([distance_lat_lon(point, points[index + 1]) for index, point in enumerate(points[:-1])])",
"def total_distance(self):\n distance = 0\n\n for segment in self.data:\n segment_distance = 0\n\n last_lon = None\n last_lat = None\n\n for point in segment:\n current_lon = point[\"lon\"]\n current_lat = point[\"lat\"]\n\n # in case data is missing skip point !\n if current_lon is None or current_lat is None:\n continue\n\n # the first valid element is processed, get distance\n if not (last_lon is None or last_lat is None):\n d = gpx_distance(last_lat, last_lon, current_lat, current_lon)\n segment_distance += d\n\n last_lon = current_lon\n last_lat = current_lat\n\n distance += segment_distance\n\n return distance",
"def distance(x, y, f):\n n = 0\n while x != y:\n x = f(x)\n n += 1\n return n",
"def countTotalDistance(path):\n current = path[0]\n totalDistance = 0\n\n for node in path[1:]:\n totalDistance += distance_func(current, node)\n current = node\n\n return totalDistance",
"def number_of_atoms_within_radius(self, distance_cutoff):\n n_atoms = 0\n atom_ids = []\n for contact in self.nearby_atoms:\n other_id = contact.atom_id_no_altloc()\n if (not other_id in atom_ids):\n if (contact.distance() < distance_cutoff):\n n_atoms += 1\n atom_ids.append(other_id) # check for alt confs.\n return n_atoms",
"def length(self) -> npt.NDArray[np.float_]:\n return dist(*self.vertices)",
"def distances(self):",
"def Points_Counting(self):\n return len(self.__traectory_list)",
"def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))",
"def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)",
"def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)",
"def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d"
]
| [
"0.6829636",
"0.67380637",
"0.6653657",
"0.65781885",
"0.64964586",
"0.64594823",
"0.6406789",
"0.63200027",
"0.6301598",
"0.62999535",
"0.6289167",
"0.6283409",
"0.6279848",
"0.6279096",
"0.6256218",
"0.6254257",
"0.6254257",
"0.62347174",
"0.622541",
"0.6223266",
"0.6196775",
"0.6178062",
"0.6172857",
"0.61690587",
"0.6168127",
"0.6167313",
"0.61600727",
"0.6151967",
"0.61398035",
"0.61260325"
]
| 0.8255963 | 0 |
Return a modelled PSF for the given model | def model_psf(self):
return self.model()(self._XGrid, self._YGrid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_SF_model(model_type):\n if model_type == \"BiLSTM\":\n return keras.models.load_model(SF_BiLSTM_model_path)\n elif model_type == \"MLP\":\n SF_model = SF_module().to(DEVICE)\n SF_model.load_state_dict(torch.load(SF_MLP_model_path))\n return SF_model",
"def get_model():\n model = ecole.scip.Model.from_file(str(DATA_DIR / \"bppc8-02.mps\"))\n model.disable_cuts()\n model.disable_presolve()\n model.set_param(\"randomization/permuteconss\", True)\n model.set_param(\"randomization/permutevars\", True)\n model.set_param(\"randomization/permutationseed\", 784)\n model.set_param(\"randomization/randomseedshift\", 784)\n model.set_param(\"randomization/lpseed\", 784)\n return model",
"def get_FSF(self):\n try:\n white = self.images['MUSE_WHITE']\n pixstep = white.wcs.get_step(unit=u.arcsec)[0]\n except Exception:\n pixstep = None\n\n try:\n fsfmodel = FSFModel.read(self.header, pixstep=pixstep)\n return fsfmodel\n except ValueError:\n # no model found\n return",
"def model_fn(model_dir):\n with open(os.path.join(model_dir, 'model.pkl'), 'rb') as pickle_file:\n model = pickle.load(pickle_file)\n return model",
"def create_model(self):\n model = solph.Model(self.es)\n return model",
"def get_model(self):\n if (\n self.params.model_str == 'optfixedsig'\n or self.params.model_str == 'sampfixedsig'\n ):\n return get_model_gp_fixedsig(print_status=self.verbose)\n elif self.params.model_str == 'opt' or self.params.model_str == 'samp':\n return get_model_gp(print_status=self.verbose)\n elif self.params.model_str == 'fixedparam':\n return None",
"def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model",
"def model_from_gdsfactory(\n component: Component, dirpath=gf.CONFIG[\"sparameters\"], **kwargs\n) -> Model:\n kwargs.pop(\"function_name\", \"\")\n kwargs.pop(\"module\", \"\")\n component = gf.call_if_func(component, **kwargs)\n pins, f, s = sim.read_sparameters_lumerical(component=component, dirpath=dirpath)\n\n def interpolate_sp(freq):\n return interpolate(freq, f, s)\n\n Model.pin_count = len(pins)\n m = Model()\n m.pins = PinList([Pin(component=m, name=pins[i]) for i, _ in enumerate(pins)])\n m.__setattr__(\"sparams\", (f, s))\n m.s_parameters = interpolate_sp\n m.freq_range = (m.sparams[0][0], m.sparams[0][-1])\n m.wavelengths = speed_of_light / np.array(f)\n m.s = s\n return m",
"def createModel(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n\n model_name = self.newPassport().setAsStr(model_psp).name\n\n scheme = self.getScheme()\n if scheme:\n return scheme.getModel(model_name)\n else:\n log_func.warning(u'Error create data scheme object')\n return None",
"def get_model():\n model_folder = os.path.join(os.environ['CovidTools'], 'mod_split_model')\n model_path = os.path.join(model_folder, 'model.pt')\n if not os.path.exists(model_path):\n fs = Filesplit()\n fs.merge(input_dir=os.path.join(model_folder, 'parts'),\n output_file=os.path.join(model_path),\n cleanup=False)\n return torch.load(model_path)",
"def tff_model_fn():\n keras_model = load_model(FLAGS.batch_size)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n return simple_fedavg_tf.KerasModelWrapper(keras_model,\n test_data.element_spec, loss)",
"def _get_model():\n with open('models/catapp_gp_model.pickle', 'rb') as modelfile:\n model = pickle.load(modelfile)\n return model",
"def _model(self):\n common_scale = self.edp_par['common_scale'].value\n model = self.F_trans() * self.F_cont()\n # get F(h=1,k=0), which is used for normalization \n # common_scale is a common scaling factor => F(h=1,k=0) = 100*common_scale\n F_10 = model[(self.h==1)&(self.k==0)]\n model = model / np.absolute(F_10) * 100 * common_scale\n return model",
"def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model",
"def get_model(cls):\n if cls.model == None:\n with open(os.path.join(model_path, 'vdok3_rf.pkl'), 'rb') as inp:\n cls.model = pickle.load(inp)\n return cls.model",
"def getModelObj(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n model_obj = self.getKernel().createByPsp(psp=model_psp)\n return model_obj",
"def smurf_predictor(context):\n return 'smurf'",
"def model(self, model_num = 0):\n return self.struct[model_num]",
"def generate_modelSED_photo_fit(sp=None,sfh_form=4,filters=None,add_igm_absorption=0,igm_type=0,params_fsps=None,DL_Gpc=0.0,cosmo='flat_LCDM',\n\tH0=70.0,Om0=0.3,params_val=None,interp_filters_waves=[],interp_filters_trans=[]):\n\n\tdef_params_fsps, params_assoc_fsps, status_log = list_params_fsps()\n\n\tformed_mass = pow(10.0,params_val['log_mass'])\n\n\t# input model parameters to FSPS:\n\tfor pp in range(len(params_fsps)):\n\t\tstr_temp = params_assoc_fsps[params_fsps[pp]]\n\t\tif status_log[params_fsps[pp]] == 0:\n\t\t\tsp.params[str_temp] = params_val[params_fsps[pp]]\n\t\telif status_log[params_fsps[pp]] == 1:\n\t\t\tsp.params[str_temp] = pow(10.0,params_val[params_fsps[pp]])\n\n\t# generate the SED:\n\tif sfh_form==0 or sfh_form==1:\n\t\tage = pow(10.0,params_val['log_age'])\n\t\twave, extnc_spec = sp.get_spectrum(peraa=True,tage=age) ## spectrum in L_sun/AA\n\t\tmass = sp.stellar_mass\n\t\tdust_mass0 = sp.dust_mass ## in solar mass/norm\n\telif sfh_form==2 or sfh_form==3 or sfh_form==4:\n\t\tt0 = pow(10.0,params_val['log_t0'])\n\t\ttau = pow(10.0,params_val['log_tau'])\n\t\tage = pow(10.0,params_val['log_age'])\n\t\talpha = pow(10.0,params_val['log_alpha'])\n\t\tbeta = pow(10.0,params_val['log_beta'])\n\t\tSFR_fSM,mass,wave,extnc_spec,dust_mass0 = csp_spec_restframe_fit(sp=sp,sfh_form=sfh_form,formed_mass=formed_mass,age=age,tau=tau,t0=t0,alpha=alpha,beta=beta)\n\n\t# redshifting\n\tredsh_wave,redsh_spec0 = cosmo_redshifting(DL_Gpc=DL_Gpc,cosmo=cosmo,H0=H0,Om0=Om0,z=params_val['z'],wave=wave,spec=extnc_spec)\n\n\t# IGM absorption:\n\tif add_igm_absorption == 1:\n\t\tif igm_type == 0:\n\t\t\ttrans = igm_att_madau(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\t\telif igm_type == 1:\n\t\t\ttrans = igm_att_inoue(redsh_wave,params_val['z'])\n\t\t\ttemp = redsh_spec0\n\t\t\tredsh_spec0 = temp*trans\n\n\t# normalize:\n\tnorm0 = formed_mass/mass\n\tredsh_spec = redsh_spec0*norm0\n\tdust_mass = dust_mass0*norm0\n\n\t# filtering:\n\tphoto_SED_flux = filtering_interp_filters(redsh_wave,redsh_spec,interp_filters_waves,interp_filters_trans)\n\n\treturn photo_SED_flux",
"def get_model(recompile=False, print_status=True):\n\n model_str = 'gp_fixedsig_all'\n\n base_path = pathlib.Path(__file__).parent\n relative_path_to_model = 'model_pkls/' + model_str + '.pkl'\n model_path = str((base_path / relative_path_to_model).resolve())\n\n if recompile:\n starttime = time.time()\n model = pystan.StanModel(model_code=get_model_code())\n buildtime = time.time() - starttime\n with open(model_path, 'wb') as f:\n pickle.dump(model, f)\n if print_status:\n print('[INFO] Time taken to compile = ' + str(buildtime) + ' seconds.')\n print('[INFO] Stan model saved in file ' + model_path)\n else:\n model = pickle.load(open(model_path, 'rb'))\n if print_status:\n print('[INFO] Stan model loaded from file {}'.format(model_path))\n return model",
"def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function",
"def get_model():\n return UNISAL",
"def get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False,\n root='~/.mxnet/models', ctx=cpu(0), pretrained_base=True, **kwargs):\n acronyms = {\n 'pascal_voc': 'voc',\n 'pascal_aug': 'voc',\n 'ade20k': 'ade',\n 'coco': 'coco',\n 'citys': 'citys',\n }\n from ..data import datasets\n # infer number of classes\n model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone,\n pretrained_base=pretrained_base, ctx=ctx, **kwargs)\n model.classes = datasets[dataset].CLASSES\n if pretrained:\n from .model_store import get_model_file\n model.load_parameters(get_model_file('psp_%s_%s'%(backbone, acronyms[dataset]),\n tag=pretrained, root=root), ctx=ctx)\n return model",
"def build_model():",
"def smurf_predictor(context : Context) -> str:\n return 'smurf'",
"def load_trained_model(filename = 'pricing_model.p'):\n # with ZipFile(\"model.zip\",\"r\") as w:\n # w.extractall()\n \n with open(filename, 'rb') as model:\n pricingmodel = pickle.load(model)\n \n # pricingmodel.Model_made = tf.keras.models.load_model(\"Model_made.h5\")\n # pricingmodel.Model_claim = tf.keras.models.load_model(\"Model_claim.h5\")\n \n \n return pricingmodel",
"def pick_model(self):\n return ConvModel(self.model_pmeter)",
"def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type",
"def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)",
"def get_model(model_name: str, map_location=torch.device('cpu')):\n # model urls on Zenodo\n model_urls = {'ParallelNets': 'https://zenodo.org/record/7245516/files/ParallelNets.pth?download=1',\n 'UNetPath': 'https://zenodo.org/record/7245516/files/UNetPath.pth?download=1'}\n\n # check if model_name is supported\n if model_name not in ['ParallelNets', 'UNetPath']:\n raise ValueError(\"Model name needs to be 'ParallelNets' or 'UNetPath'.\")\n\n model_path = pkg_resources.resource_filename('crackpy', f'crack_detection/models/{model_name}.pth')\n\n # check if model folder exists\n origin, _ = os.path.split(model_path)\n if not os.path.exists(origin):\n os.makedirs(origin)\n\n if not os.path.exists(model_path):\n print(f\"Downloading {model_name}...\")\n torch.hub.download_url_to_file(model_urls[model_name], model_path)\n\n if model_name == 'ParallelNets':\n model = ParallelNets(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n else: # model_name == 'UNetPath'\n model = UNet(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n\n return model"
]
| [
"0.70333433",
"0.6461371",
"0.63930553",
"0.62396306",
"0.6135629",
"0.6135161",
"0.610893",
"0.6049868",
"0.6015454",
"0.59949577",
"0.5974333",
"0.5963697",
"0.5957364",
"0.59512144",
"0.59200984",
"0.5866675",
"0.5854476",
"0.5829802",
"0.57794815",
"0.57694125",
"0.5759047",
"0.57564276",
"0.5746606",
"0.574543",
"0.57450235",
"0.57158816",
"0.5686069",
"0.5661729",
"0.56588286",
"0.5658365"
]
| 0.673051 | 1 |
Extract the FWHM from the model of the star. The FWHM needs to be calculated for each model. For the Moffat, the FWHM is a function of the gamma and alpha parameters (in other words, the scaling factor and the exponent of the expression), while for a Gaussian FWHM = 2.3548 sigma. Unfortunately, our case is a 2D Gaussian, so a compromise between the two sigmas (sigma_x, sigma_y) must be reached. We will use the average of the two. | def fwhm(self):
model_dict = dict(zip(self.model().param_names, self.model().parameters))
if self.model_type == self._MOFFAT2D:
gamma, alpha = [model_dict[ii] for ii in ("gamma_0", "alpha_0")]
FWHM = 2.0 * gamma * np.sqrt(2 ** (1 / alpha) - 1)
FWHM_x, FWHM_y = None, None
elif self.model_type == self._GAUSSIAN2D:
sigma_x, sigma_y = [model_dict[ii] for ii in ("x_stddev_0", "y_stddev_0")]
FWHM = 2.3548 * np.mean([sigma_x, sigma_y])
FWHM_x, FWHM_y = 2.3548 * sigma_x, 2.3548 * sigma_y
return FWHM, FWHM_x, FWHM_y | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_psf_fwhm_inpix_gaussian(arr):\n\tmodel = fit_gaussian(arr)\n\n\tsigma = max(model.y_stddev, model.x_stddev)\n\tfwhm = 2.355 * sigma\n\n\treturn fwhm",
"def FWHM(self):\n # The width of the Lorentz profile\n fl = 2.0 * self[\"al\"]\n # Width of the Gaussian [2.35 = 2*sigma*sqrt(2*ln(2))]\n fd = 2.35482 * self['ad']\n return 0.5346 * fl + numpy.sqrt(0.2166 * (fl**2.) + fd**2.)",
"def calc_psf_fwhm_inpix_moffat(arr):\n\tmodel = fit_moffat(arr)\n\n\tfwhm = 2.* model.gamma * np.sqrt( 2.**(1./model.alpha) - 1. )\n\n\treturn fwhm",
"def fwhm(self):\n return self._get_mean_and_samples_attribute('fwhm')",
"def g2dfwhm(img):\n npix = img.shape[0]\n rowCen,colCen = adaptiveCentroid(img,1.1/scale)\n row,col = np.mgrid[0:npix,0:npix]\n row = row - rowCen\n col = col - colCen\n A0,sigmac0 = moments(img)\n sigmar0 = sigmac0\n rho0 = 0.\n B0 = 0.\n p0=np.array([sigmac0,sigmar0,rho0,A0, B0])\n def residualg2d(p,x,y,xc,yc,I):\n sigmax,sigmay,rho,A,B = p\n Ierr = np.sqrt(abs(I))+0.00001 # to avoid those = 0, add a small number \n res = (gaussian2d(x,y,xc,yc,sigmax,sigmay,rho,A,B) - I)/Ierr\n return res.flatten()\n p = leastsq(residualg2d,p0,args=(col,row,colCen,rowCen,img))[0]\n sigmac,sigmar,rho,A,B = p\n Mcc = sigmac**2\n Mrr = sigmar**2\n Mrc = rho**2*Mcc*Mrr\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhm_g2d = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n #fwhm = np.sqrt(M20/2.)*2.35482*scale\n return A, B, whiskerLength, fwhm_g2d",
"def test_fwhm(self):\n m = self.sp.model\n bp = SpectralElement(\n Gaussian1D, mean=m.mean, amplitude=m.amplitude, stddev=m.stddev)\n assert_quantity_allclose(bp.fwhm(), 100 * u.AA, rtol=1e-3) # 0.1%",
"def sigma_to_fwhm(sigma):\n\n return sigma * 2 * numpy.sqrt(2 * numpy.log(2))",
"def fwhm_to_sigma(fwhm):\n\n return fwhm / 2 / numpy.sqrt(2 * numpy.log(2))",
"def measure_fwhm(image, plot=True, printout=True):\n\n # Find FWHM\n # ----------\n\n fitted_line = fit_gaussian2d(image)\n\n # Find fitted center\n x_mean, y_mean = [i.value for i in [fitted_line.x_mean, fitted_line.y_mean]]\n\n # Estimate FWHM using gaussian_sigma_to_fwhm\n x_fwhm = fitted_line.x_stddev * gaussian_sigma_to_fwhm\n y_fwhm = fitted_line.y_stddev * gaussian_sigma_to_fwhm\n\n # Find half max\n hm = fitted_line(x_mean, y_mean) / 2.\n\n # Find the mean of the x and y direction\n mean_fwhm = np.mean([x_fwhm, y_fwhm])\n mean_fwhm = int(np.round(mean_fwhm))\n\n # Print info about fit and FWHM\n # ------------------------------\n\n if printout:\n print(\"Image Max: {}\".format(image.max()))\n print(\"Amplitude: {}\".format(fitted_line.amplitude.value))\n print(\"Center: ({}, {})\".format(x_mean, y_mean))\n print(\"Sigma = ({}, {})\".format(fitted_line.x_stddev.value,\n fitted_line.y_stddev.value, ))\n print(\"Mean FWHM: {} Pix \".format(mean_fwhm))\n print(\"FWHM: (x={}, y={}) Pix \".format(x_fwhm, y_fwhm))\n\n if plot:\n\n fig, [ax0, ax1, ax2, ax3] = plot_fit(image, fitted_line)\n\n # Make x and y grid to plot to\n y_arange, x_arange = np.mgrid[:image.shape[0], :image.shape[1]]\n\n # Plot input image with FWHM and center\n # -------------------------------------\n\n ax0.imshow(image, cmap='gray_r')\n\n ax0.axvline(x_mean - x_fwhm / 2, c='c', linestyle=\"--\", label=\"X FWHM\")\n ax0.axvline(x_mean + x_fwhm / 2, c='c', linestyle=\"--\")\n\n ax0.axhline(y_mean - y_fwhm / 2, c='g', linestyle=\"--\", label=\"Y FWHM\")\n ax0.axhline(y_mean + y_fwhm / 2, c='g', linestyle=\"--\")\n\n ax0.set_title(\"Center and FWHM Plot\")\n ax0.legend()\n\n # Plot X fit\n # ----------\n\n ax2.axvline(x_mean, linestyle=\"-\", label=\"Center\")\n ax2.axvline(x_mean - x_fwhm / 2, c='c', linestyle=\"--\", label=\"X FWHM\")\n ax2.axvline(x_mean + x_fwhm / 2, c='c', linestyle=\"--\")\n ax2.axhline(hm, c=\"black\", linestyle=\"--\", label=\"Half Max\")\n\n ax2.legend()\n\n # Plot Y fit\n # ----------\n\n ax3.axvline(y_mean, linestyle=\"-\", label=\"Center\")\n ax3.axvline(y_mean - y_fwhm / 2, c='g', linestyle=\"--\", label=\"Y FWHM\")\n ax3.axvline(y_mean + y_fwhm / 2, c='g', linestyle=\"--\")\n ax3.axhline(hm, c=\"black\", linestyle=\"--\", label=\"Half Max\")\n\n ax3.legend()\n\n plt.show()\n\n return np.array([x_fwhm, y_fwhm])",
"def fwhm(self) -> float:\n return 2 * np.sqrt(2 * np.log(2)) * self.width",
"def fwhm(self, criteria='last'):\n return fwhm(self.x, self.y, self.data, criteria=criteria)",
"def fwhm(x, y, data, criteria='last'):\n # native calculation is a radius, \"HWHM\", *2 is FWHM\n return estimate_size(x=x, y=y, data=data, metric='fwhm', criteria=criteria) * 2",
"def fwhm(self):\n if not self.has_fwhm():\n if self.has_sepobjects():\n fwhm_pxl = self.sepobjects.get_fwhm_pxl(isolated_only=True,\n stars_only=True)\n self.set_fwhm(fwhm_pxl/self.units_to_pixels(\"arcsec\").value*\\\n units.arcsec)\n else:\n raise AttributeError(\"'fwhm' is not defined and no sepobjects loaded.\")\n return self._derived_properties[\"fwhm\"]",
"def _FWHMGauss(sigma, pixel=12):\n return sigma*2*np.sqrt(2*np.log(2))*pixel",
"def wfwhm(img,sigma):\n nrow,ncol=img.shape\n Isum = img.sum()\n Icol = img.sum(axis=0) # sum over all rows\n Irow = img.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = img*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Cm = np.matrix([[Mcc,Mrc],[Mrc,Mrr]])\n Cw = np.matrix([[sigma**2,0.],[0.,sigma**2]])\n Cimg = (Cm.I - Cw.I).I\n Mcc = Cimg[0,0]\n Mrr = Cimg[1,1]\n Mrc = Cimg[0,1]\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n e1 = M22.real/M20.real\n e2 = M22.imag/M20.real\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhmw = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n return e1,e2,whiskerLength,fwhmw",
"def calc_psf_fwhm(arr, mode='moffat'):\n\n\tif mode == 'moffat':\n\t\treturn calc_psf_fwhm_inpix_moffat(arr)\n\telif mode == 'gaussian':\n\t\treturn calc_psf_fwhm_inpix_gaussian(arr)\n\telse:\n\t\traise ValueError(\"mode not recognized\")",
"def _printFWHM(sigma_x, sigma_y, sigma_xerr, sigma_yerr, req=10.8):\n print(\"=\" * 60)\n print 'FWHM (requirement %.1f microns):' % req\n print round(np.sqrt(_FWHMGauss(sigma_x)*_FWHMGauss(sigma_y)), 2), ' +/- ', \\\n round(np.sqrt(_FWHMGauss(sigma_xerr)*_FWHMGauss(sigma_yerr)), 3) , ' microns'\n print 'x:', round(_FWHMGauss(sigma_x), 2), ' +/- ', round(_FWHMGauss(sigma_xerr), 3), ' microns'\n print 'y:', round(_FWHMGauss(sigma_y), 2), ' +/- ', round(_FWHMGauss(sigma_yerr), 3), ' microns'\n print(\"=\" * 60)",
"def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)",
"def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm",
"def get_standard_deviation(fwhm):\n return fwhm / np.sqrt(8 * np.log(2))",
"def gaussian(amp, fwhm, mean):\n return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)",
"def fwhmax_fwatmin(model, estimates, normalize_RFs=False, return_profiles=False):\n \n model = model.lower()\n x=np.linspace(-50,50,1000).astype('float32')\n\n prf = estimates['betas'] * np.exp(-0.5*x[...,np.newaxis]**2 / estimates['size']**2)\n vol_prf = 2*np.pi*estimates['size']**2\n\n if 'dog' in model or 'dn' in model:\n srf = estimates['sa'] * np.exp(-0.5*x[...,np.newaxis]**2 / estimates['ss']**2)\n vol_srf = 2*np.pi*estimates['ss']*2\n\n if normalize_RFs==True:\n\n if model == 'gauss':\n profile = prf / vol_prf\n elif model == 'css':\n #amplitude is outside exponent in CSS\n profile = (prf / vol_prf)**estimates['ns'] * estimates['betas']**(1 - estimates['ns'])\n elif model =='dog':\n profile = prf / vol_prf - \\\n srf / vol_srf\n elif 'dn' in model:\n profile = (prf / vol_prf + estimates['nb']) /\\\n (srf / vol_srf + estimates['sb']) - estimates['nb']/estimates['sb']\n else:\n if model == 'gauss':\n profile = prf\n elif model == 'css':\n #amplitude is outside exponent in CSS\n profile = prf**estimates['ns'] * estimates['betas']**(1 - estimates['ns'])\n elif model =='dog':\n profile = prf - srf\n elif 'dn' in model:\n profile = (prf + estimates['nb'])/(srf + estimates['sb']) - estimates['nb']/estimates['sb']\n\n\n half_max = np.max(profile, axis=0)/2\n fwhmax = np.abs(2*x[np.argmin(np.abs(profile-half_max), axis=0)])\n\n\n if 'dog' in model or 'dn' in model:\n\n min_profile = np.min(profile, axis=0)\n fwatmin = np.abs(2*x[np.argmin(np.abs(profile-min_profile), axis=0)])\n\n result = fwhmax, fwatmin\n else:\n result = fwhmax\n\n if return_profiles:\n return result, profile.T\n else:\n return result",
"def calc_fwhm_on_bright_star(image_file, print=True, fwhm_init=2.0):\n \n img = load_image(image_file)\n \n # Calculate the bacgkround\n bkg = photutils.Background(img, img.shape, filter_shape=(1,1), method='median')\n\n threshold = bkg.background + (30.0 * bkg.background_rms)\n\n sigma = 2.0 * gaussian_fwhm_to_sigma # FWHM = 2. pixels\n kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)\n kernel.normalize()\n segm = detect_sources(img, threshold, npixels=5, filter_kernel=kernel) \n\n props = source_properties(img, segm)\n tbl = properties_table(props)\n\n # Check for junk stars (cosmic rays)\n idx = np.where((tbl['semimajor_axis_sigma'] > 1) & (tbl['semiminor_axis_sigma'] > 1))[0]\n tbl = tbl[idx]\n \n tbl['image_name'] = image_file\n\n if print == True:\n reformat_source_table(tbl)\n print_source_table(tbl)\n \n return tbl",
"def kernel_gaussiano(image: np.ndarray, sigma: float, kind: str = 'low') -> np.ndarray:\n U, V = fourier_meshgrid(image)\n D = fourier_distance(U, V)\n H = np.exp( (-1.0 * D) / (2.0 * sigma**2) )\n \n if kind == 'high' or kind == 'highpass':\n H = 1.0 - H\n \n return H",
"def womgau(hop):\n import numpy as np\n import logging\n import matplotlib.pyplot as plt\n from scipy.optimize import curve_fit\n from tmath.wombat.womwaverange import womwaverange\n from tmath.wombat.womget_element import womget_element\n from tmath.wombat.inputter import inputter\n from tmath.wombat.inputter_single import inputter_single\n from tmath.wombat.gauss import gauss\n from tmath.wombat.gauss_cont import gauss_cont\n from tmath.wombat.yesno import yesno\n print(' ')\n logging.info('Object is {}'.format(hop[0].obname))\n print(' ')\n print('Spectrum runs from {} to {}'.format(hop[0].wave[0],hop[0].wave[-1]))\n print(' ')\n print('This routine expects the spectrum to be in flambda units.')\n print('It also expects a linear wavelength scale.')\n print(' ')\n print('Choose general region of spectrum\\n')\n nwave,nflux,mode=womwaverange(hop[0].wave,hop[0].flux,'none')\n print('\\nNow pick the exact range for the fit')\n waveint,fluxint,mode=womwaverange(nwave,nflux,mode)\n indexblue=womget_element(nwave, waveint[0])\n indexred=womget_element(nwave,waveint[-1])\n if (mode == 'w'):\n done = False\n while (not done):\n print(' ')\n wavecenter=inputter('Enter approximate center of Gaussian : ','float',False)\n indexcenter=womget_element(waveint,wavecenter)\n if (indexcenter <= 0) or (wavecenter > waveint[-1]):\n print('Bad central wavelength, try again')\n else:\n done = True\n else:\n done=False\n while (not done):\n print('Mark the approximate center of the Gaussian')\n pickcent=plt.ginput(1,timeout=-1)\n indexcenter=womget_element(waveint,pickcent[0][0])\n print('\\nApproximate center at {}'.format(waveint[indexcenter]))\n print('\\nIs this OK?')\n answer=yesno('y')\n if (answer == 'y'):\n done=True\n weights=np.sqrt(hop[0].var[indexblue:indexred+1])\n print(' ')\n continuum=inputter_single('Do you want to fit gaussian with (c)ontinuum, or (n)o continuum? ','cn')\n if (continuum == 'c'):\n p=[fluxint[indexcenter], waveint[indexcenter],3.0,1.0,waveint[0]]\n result=curve_fit(gauss_cont,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True)\n else:\n p=[fluxint[indexcenter], waveint[indexcenter],3.0]\n result=curve_fit(gauss,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True)\n coefferr=np.sqrt(np.diag(result[1]))\n coeff=result[0]\n # make 'finer-grained' version of fit, 0.2A/pix for calculations\n wavecalc=np.arange(2*5*50*abs(coeff[2]))*0.2+coeff[1]-0.2*5*50*abs(coeff[2])\n calccenter=womget_element(wavecalc,coeff[1])\n if (continuum == 'c'):\n fluxcalc=gauss_cont(wavecalc,*coeff)\n fluxcont=wavecalc*coeff[3]+coeff[4]\n fluxgaussian=fluxcalc-fluxcont\n linecont=fluxcont[calccenter]\n else:\n fluxcalc=gauss(wavecalc,*coeff)\n \n \n deltafit=wavecalc[1]-wavecalc[0]\n calcindexblue=womget_element(wavecalc,waveint[0])\n calcindexred=womget_element(wavecalc,waveint[-1])\n sumfluxcalc=np.sum(fluxcalc[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxcalc=np.sum(fluxcalc*deltafit)\n chi=(result[2]['fvec']**2).sum()\n redchi=chi/(len(waveint)-len(coeff))\n if (continuum == 'c'):\n sumfluxgaussian=np.sum(fluxgaussian[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxgaussian=np.sum(fluxgaussian*deltafit)\n sumfluxcont=np.sum(fluxcont[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxcont=np.sum(fluxcont*deltafit)\n sumallfluxcont_test=np.sum(fluxcont)\n # propagate uncertainty (from old version) not sure this is correct\n height_pct=coefferr[0]/coeff[0]\n sigma_pct=coefferr[2]/coeff[2]\n flux_pct=np.sqrt(height_pct**2+sigma_pct**2)\n sumfluxgaussiansig=sumfluxgaussian*flux_pct\n sumallfluxgaussiansig=sumallfluxgaussian*flux_pct\n plt.cla()\n plt.plot(nwave,nflux,drawstyle='steps-mid',color='k')\n plt.ylabel('Flux')\n plt.xlabel('Wavelength')\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n plt.plot(wavecalc,fluxcalc,drawstyle='steps-mid',color='b')\n if (continuum == 'c'):\n plt.plot(wavecalc,fluxgaussian,drawstyle='steps-mid',color='r')\n plt.plot(wavecalc,fluxcont,drawstyle='steps-mid',color='g')\n plt.plot([waveint[0],waveint[0]],[ymin,ymax],color='k',linestyle='--')\n plt.plot([waveint[-1],waveint[-1]],[ymin,ymax],color='k',linestyle='--')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n logging.info('For object {} Gaussian fit'.format(hop[0].obname))\n if (continuum == 'c'):\n print('\\nData = Black, Fit = Blue, Continuum = Green, Fit-Continuum = Red\\n')\n else:\n print('\\nData = Black, Fit = Blue\\n')\n logging.info('Height {:16.8f}+/-{:16.8f}'.format(coeff[0],coefferr[0]))\n logging.info('Center {:16.8f}+/-{:16.8f}'.format(coeff[1],coefferr[1]))\n logging.info('Sigma {:16.8f}+/-{:16.8f}'.format(coeff[2],coefferr[2]))\n if (continuum == 'c'):\n FWHM = 2.35482*np.abs(coeff[2])\n rest_wave = input('Rest wavelength [N/A]: ') or None\n redshift = input('Redshift [N/A]: ') or None\n if rest_wave:\n rest_wave = float(rest_wave)\n w1 = (rest_wave - FWHM/2.)/(1.+float(redshift)) \n w2 = (rest_wave + FWHM/2.)/(1.+float(redshift)) \n c = 299792.458\n v1 = -1.*c*((rest_wave/w1)**2. - 1)/(1+((rest_wave/w1)**2.))\n v2 = -1.*c*((rest_wave/w2)**2. - 1)/(1+((rest_wave/w2)**2.))\n logging.info('Slope {:16.8f}+/-{:16.8f}'.format(coeff[3],coefferr[3]))\n logging.info('Y-intercept {:16.8f}+/-{:16.8f}'.format(coeff[4],coefferr[4]))\n logging.info('FWHM {:16.8f}+/-{:16.8f}'.format(2.35482*np.abs(coeff[2]),2.35482*coefferr[2]))\n logging.info('FWHM (velocity) {:16.8f} km/s'.format(v2-v1))\n logging.info('Flux between dotted lines (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumfluxgaussian, sumfluxgaussiansig))\n logging.info('EW between dotted lines (Gaussian): {:16.8f}'.format(sumfluxgaussian/linecont))\n logging.info('Flux for full (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumallfluxgaussian, sumallfluxgaussiansig))\n logging.info('EW for full (Gaussian): {:16.8f}'.format(sumallfluxgaussian/linecont))\n logging.info('Continuum flux at line center: {:16.8f}'.format(linecont))\n\n \n logging.info('Chi^2: {}'.format(chi))\n logging.info('Reduced chi^2: {}'.format(redchi))\n logging.info('All fluxes might need to be scaled by 1e-15')\n print(' ')\n return hop",
"def mean_sigma(h):\n h.Fit(\"gaus\", \"q\")\n result_fit = h.GetFunction(\"gaus\")\n mean = result_fit.GetParameter(1)\n sigma = result_fit.GetParameter(2)\n return mean, sigma",
"def estimateFWHM(imgID, side='blue'):\r\n\r\n iraf.unlearn('imexam')\r\n iraf.rimexam.fittype = \"gaussian\"\r\n iraf.delete('trace.xy', verify=\"no\")\r\n iraf.delete('fwhm.log', verify=\"no\")\r\n # extract the position of the trace\r\n f = open('database/ap%s%04d' % (side, imgID), 'r')\r\n dat = f.read()\r\n xy = dat.split('\\n')[5].split()[1:3]\r\n f.close()\r\n f = open('trace.xy', 'w')\r\n f.write('%s %s\\n' % (xy[0], xy[1]))\r\n f.close()\r\n # run imexam\r\n if side == 'blue':\r\n defkey = 'j'\r\n else:\r\n defkey = 'k'\r\n iraf.imexam('%s%04d' % (side, imgID), '1', logfile='fwhm.log', keeplog=\"yes\", defkey=defkey, imagecur='trace.xy', use_display=\"no\", autoredraw=\"no\")\r\n # load values\r\n f = open('fwhm.log', 'r')\r\n dat = f.read()\r\n fwhm = float(dat.split('\\n')[1].split('=')[4].split()[0])\r\n f.close()\r\n # cleanup\r\n os.unlink(\"fwhm.log\")\r\n os.unlink(\"trace.xy\")\r\n\r\n # update the header\r\n f = pyfits.open('%s%04d.spec.fits' % (side, imgID))\r\n #f[0].header.update('FWHM', np.round(fwhm, 2), 'FWHM estimate of the trace [pix]')\r\n f[0].header['FWHM']=np.round(fwhm, 2) #, 'FWHM estimate of the trace [pix]')\r\n f.writeto('%s%04d.spec.fits' % (side, imgID), clobber=True)\r\n f.close()\r\n if os.access('%s%04d_flux.spec.fits' % (side, imgID), os.F_OK):\r\n f = pyfits.open('%s%04d_flux.spec.fits' % (side, imgID))\r\n #f[0].header.update('FWHM', np.round(fwhm, 2), 'FWHM estimate of the trace [pix]')\r\n f[0].header['FWHM']= np.round(fwhm, 2)\r\n f.writeto('%s%04d_flux.spec.fits' % (side, imgID), clobber=True)\r\n f.close()",
"def gauss_kernel(n_fwhm,sigma):\n\n x_length = int(n_fwhm * sigma + 0.5) #Add 0.5 to approximate to nearest integer\n y_length = x_length\n \n \n x, y = mgrid[-x_length:x_length+1, -y_length:y_length+1]\n g = numpy.exp(-(x**2/(2*(float(sigma)**2))+y**2/(2*(float(sigma)**2))))\n return g / g.sum()",
"def test_height_fwhm_calculation(peakdata):\n # mu = 0\n # variance = 1.0\n # sigma = np.sqrt(variance)\n # x = np.linspace(mu - 20*sigma, mu + 20*sigma, 100.0)\n # y = norm.pdf(x, mu, 1)\n x = peakdata[0]\n y = peakdata[1]\n check_height_fwhm(x, y, lineshapes.voigt, models.VoigtModel())\n check_height_fwhm(x, y, lineshapes.pvoigt, models.PseudoVoigtModel())\n check_height_fwhm(x, y, lineshapes.pearson7, models.Pearson7Model())\n check_height_fwhm(x, y, lineshapes.moffat, models.MoffatModel())\n check_height_fwhm(x, y, lineshapes.students_t, models.StudentsTModel())\n check_height_fwhm(x, y, lineshapes.breit_wigner, models.BreitWignerModel())\n check_height_fwhm(x, y, lineshapes.damped_oscillator,\n models.DampedOscillatorModel())\n check_height_fwhm(x, y, lineshapes.dho,\n models.DampedHarmonicOscillatorModel())\n check_height_fwhm(x, y, lineshapes.expgaussian,\n models.ExponentialGaussianModel())\n check_height_fwhm(x, y, lineshapes.skewed_gaussian,\n models.SkewedGaussianModel())\n check_height_fwhm(x, y, lineshapes.donaich, models.DonaichModel())\n x = x-9 # Lognormal will only fit peaks with centers < 1\n check_height_fwhm(x, y, lineshapes.lognormal, models.LognormalModel())",
"def gwf(self, psi_w, H, J):\n\t if self.vw <= 0.:\n\t return 0.00001\n\t else:\n\t return self.GWMAX*exp(-(-psi_w/J)**H)"
]
| [
"0.7319097",
"0.71567017",
"0.7058549",
"0.70203537",
"0.6777484",
"0.67330366",
"0.65833426",
"0.6531526",
"0.651764",
"0.6465564",
"0.6410498",
"0.63757175",
"0.63664764",
"0.6298521",
"0.6227312",
"0.62063545",
"0.6195452",
"0.6123725",
"0.6082363",
"0.60621554",
"0.6030687",
"0.60101235",
"0.6007678",
"0.5977929",
"0.5969925",
"0.59672755",
"0.5959643",
"0.59154695",
"0.5910782",
"0.5898668"
]
| 0.7796027 | 0 |
Initialize a model with first guesses for the parameters. The user can select between several astropy models, e.g., 'Gaussian2D', 'Moffat2D'. We will use the data to get the first estimates of the parameters of each model. Finally, a Constant2D model is added to account for the background or sky level around the star. | def _initialize_model(self):
max_value = self.data.max()
if self.model_type == self._GAUSSIAN2D:
model = models.Gaussian2D(
x_mean=self.x, y_mean=self.y, x_stddev=1, y_stddev=1
)
model.amplitude = max_value
# Establish reasonable bounds for the fitted parameters
model.x_stddev.bounds = (0, self._box / 4)
model.y_stddev.bounds = (0, self._box / 4)
model.x_mean.bounds = (self.x - 5, self.x + 5)
model.y_mean.bounds = (self.y - 5, self.y + 5)
elif self.model_type == self._MOFFAT2D:
model = models.Moffat2D()
model.x_0 = self.x
model.y_0 = self.y
model.gamma = 2
model.alpha = 2
model.amplitude = max_value
# Establish reasonable bounds for the fitted parameters
model.alpha.bounds = (1, 6)
model.gamma.bounds = (0, self._box / 4)
model.x_0.bounds = (self.x - 5, self.x + 5)
model.y_0.bounds = (self.y - 5, self.y + 5)
model += models.Const2D(self.fit_sky())
model.amplitude_1.fixed = True
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_model(self):\n # n_dims == n_hparams\n n_dims = len(self.searchspace.keys())\n\n if self.interim_results:\n n_dims += 1 # add one dim for augumented budget\n\n cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))\n\n other_kernel = Matern(\n length_scale=np.ones(n_dims),\n length_scale_bounds=[(0.01, 100)] * n_dims,\n nu=2.5,\n )\n base_model = GaussianProcessRegressor(\n kernel=cov_amplitude * other_kernel,\n normalize_y=True,\n noise=\"gaussian\",\n n_restarts_optimizer=2,\n )\n self.base_model = base_model",
"def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model",
"def setup_model(msid, t0, t1, model_spec, init):\n\n model = xija.ThermalModel(msid, start=t0, stop=t1, model_spec=model_spec)\n for key, value in init.items():\n if isinstance(value, dict):\n model.comp[key].set_data(value['data'], value['times'])\n else:\n model.comp[key].set_data(value)\n\n return model",
"def _initialize_model(rngs):\n init_model_state, init_params = model_def.init(\n rngs, *dummy_input, train=False, debug=False).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if config.get('init_head_bias', None) is not None:\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state",
"def initialize_model(self, initial_data):\n # EDIT THIS METHOD TO RETURN A MINIMAX MODEL ###\n return None",
"def initialize_model(self, init_model: Union[str, np.ndarray], pixel_mean: float):\n\n if (type(init_model) is str) and init_model == 'random':\n model = np.random.rand(*self.model_size)\n model = model * pixel_mean / model.mean()\n return model\n\n if (type(init_model) is str) and init_model == 'sum':\n model = self.frames.sum(0).reshape(*self.frame_size)\n model = model * pixel_mean / model.mean()\n model, mask = model_reshape(model, self.model_size)\n noise = np.where(mask == 1, 0, np.random.rand(*mask.shape)*pixel_mean*0.5)\n return model + noise\n\n if type(init_model) is np.ndarray:\n if not init_model.ndim == 2:\n raise ValueError(\"init_model has to be a 2D array.\")\n model, _ = model_reshape(init_model, self.model_size)\n model = model * pixel_mean / model.mean()\n return model\n raise ValueError(\"unknown initial model type. initial model can be 'random', 'sum', or a numpy array.\")",
"def _init_model_params(self):\n super()._init_model_params()\n\n if 'e' in self.init_params:\n if self.init_type == 'uniform':\n if self.nr_no_train_de == 0:\n self.B = [\n np.full(\n (self.n_states, self.n_features[i]), 1.0 / self.n_features[i])\n for i in range(self.n_emissions)\n ]\n else:\n check_if_attributes_set(self, attr='e')\n else:\n if self.nr_no_train_de == 0:\n self.B = [\n np.random.rand(self.n_states, self.n_features[i])\n for i in range(self.n_emissions)\n ]\n for i in range(self.n_emissions):\n normalise(self.B[i], axis=1)\n\n else:\n check_if_attributes_set(self, attr='e')",
"def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type",
"def _initialize_model(rngs):\n init_model_state, init_params = nn.init(\n fn=init_fn, module=model_def)(rngs).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if (config.get('init_head_bias', None) is not None and\n 'output_projection' in init_params):\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state",
"def initialize_model_params():\n beta_0 = np.array([0., 0.])\n mu_0 = 0.\n return beta_0, mu_0",
"def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())",
"def _initialize_model_params(self):\n\n if 'model' not in self._raw_data_dict:\n raise Error('The \"model\" key is not found in the configuration file. Looks like the parsed file is not '\n 'Object Detection API model configuration file.')\n params = list(self._raw_data_dict['model'].values())[0]\n for rule in mapping_rules:\n self._update_param_using_rule(params, rule)",
"def __init__(self,models,extraparams=None,outputcontraction=None,\n interpolation='linear',interpolationdirection='y',\n offgrid=None):\n from operator import isMappingType\n\n if len(models)==2 and isMappingType(models[1]):\n modtype = get_model_class(models[0])\n params = np.array(models[1].values())\n params = [dict([(models[1].keys()[i],v)] for v in t) for i,t in enumerate(params.T)]\n models = [modtype(**params[i]) for m in range(len(params))]\n\n params = None\n\n for m in models:\n if params is None:\n params = m.params\n else:\n if m.params != params:\n raise ValueError('model %s does not match parameters for other models'%m)\n\n if extraparams is not None:\n self._extraparams = {}\n for n,ps in extraparams.iteritems():\n arr = np.array(ps)\n if extraparams[n].size != len(models):\n raise ValueError('too many/few extra parameters for parameter %s'%n)\n self._extraparams[n] = arr\n else:\n self._extraparams = None\n\n self._params = params\n self.models = tuple(models)\n self._extraparams = extraparams\n\n self.outputcontraction = outputcontraction\n self.interpolation = interpolation\n self.interpolationdirection = interpolationdirection\n self.offgrid = offgrid",
"def initialize_model(self, positions, shifts_y, shifts_x):\n shifts_y = list(map(lambda x: x*-1, shifts_y))\n shifts_x = list(map(lambda x: x*-1, shifts_x))\n\n def list_shift(pos, c):\n return np.array([DeformationModel.calculate_shifts_from_coeffs(p[0],\n p[1], p[2], c) for p in pos])\n\n def residuals(c, shift, pos):\n return shift - list_shift(pos, c)\n\n c0y = [1] * 9\n res_y = optimize.leastsq(residuals, c0y, args=(shifts_y, positions))[0]\n\n c0x = [1] * 9\n res_x = optimize.leastsq(residuals, c0x, args=(shifts_x, positions))[0]\n\n result = np.concatenate((res_y, res_x), axis=0).reshape(2, 9)\n\n self.coeffs = result",
"def test_linear_fit_2d_model_set_fixed_parameters(self):\n init_model = models.Polynomial2D(\n degree=2,\n c1_0=[1, 2],\n c0_1=[-0.5, 1],\n n_models=2,\n fixed={\"c1_0\": True, \"c0_1\": True},\n )\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)",
"def init_model(model):\n model(tf.random.uniform((1, 512, 512, 3)))",
"def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()",
"def set_up_and_parameterise_model_for_experiment(self):\n self.experiment_unique_steps_to_model = {}\n for op_number, op in enumerate(self.experiment.unique_steps):\n new_model = self.model.new_copy()\n new_parameter_values = self.parameter_values.copy()\n\n if op.type != \"current\":\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # check which kind of external circuit model we need (differential\n # or algebraic)\n if op.type == \"voltage\":\n submodel_class = pybamm.external_circuit.VoltageFunctionControl\n elif op.type == \"power\":\n submodel_class = pybamm.external_circuit.PowerFunctionControl\n\n # Build the new submodel and update the model with it\n submodel = submodel_class(new_model.param, new_model.options)\n variables = new_model.variables\n submodel.variables = submodel.get_fundamental_variables()\n variables.update(submodel.variables)\n submodel.variables.update(submodel.get_coupled_variables(variables))\n variables.update(submodel.variables)\n submodel.set_rhs(variables)\n submodel.set_algebraic(variables)\n submodel.set_initial_conditions(variables)\n new_model.rhs.update(submodel.rhs)\n new_model.algebraic.update(submodel.algebraic)\n new_model.initial_conditions.update(submodel.initial_conditions)\n\n # Set the \"current function\" to be the variable defined in the submodel\n new_parameter_values[\"Current function [A]\"] = submodel.variables[\n \"Current [A]\"\n ]\n self.update_new_model_events(new_model, op)\n # Update parameter values\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n experiment_parameter_values = self.get_experiment_parameter_values(\n op, op_number\n )\n new_parameter_values.update(\n experiment_parameter_values, check_already_exists=False\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[repr(op)] = parameterised_model\n\n # Set up rest model if experiment has start times\n if self.experiment.initial_start_time:\n new_model = self.model.new_copy()\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n new_parameter_values.update(\n {\"Current function [A]\": 0, \"Ambient temperature [K]\": \"[input]\"},\n check_already_exists=False,\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[\n \"Rest for padding\"\n ] = parameterised_model",
"def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')",
"def init_model(model_filename, doGPU):\n # set model attributes list\n ##print(\"Model-dataset =\", model_ds_name)\n ##if model_ds_name == 'modelRAP':\n ## model_labels = loader_rapdataset_yiqiang.ATTRIBUTES\n ##elif model_ds_name == 'modelPETA':\n ## model_labels = loader_peta_dataset.ATTRIBUTES\n ##elif model_ds_name == 'modelRAPPETA':\n ## model_labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n ##else:\n ## print(\"ERROR: unknown model-dataset.\")\n ## sys.exit()\n model_labels = loader_rap_plus_peta_dataset.ATTRIBUTES\n assert (len(model_labels) == 49)\n\n # create model\n person.NO_ATTRIBUTES = len(model_labels) #TODO-elo: ugly, attr. nbr should be a parameter of person.Net.__init__()\n net = person.Net()\n if doGPU:\n net = person.Net().cuda()\n\n # load model\n print('loading model \"' + model_filename + '\"')\n person.load_model(net, model_filename)\n\n return net, model_labels",
"def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model",
"def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n\n baseEncoder = self.createEncoder(inputs)\n baseEncoder = Dropout(self.drop)(baseEncoder)\n\n # Instantiate encoder layers\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(baseEncoder)\n z_log_var = Q_z_log_var(baseEncoder)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n \n G_0 = Dense(8*self.kernel_init)(encoding)\n G_0 = Dropout(self.drop)(G_0)\n baseDecoder = self.createDecoder(G_0)\n\n self.model =Model(inputs, baseDecoder)\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n self.model.summary()\n print(\"Completed model setup.\")",
"def initialize_model(self, config_param_vals = None):\n self._is_initialized = True\n\n self.fmu.instantiate()\n self.fmu.reset()\n self.fmu.setupExperiment(startTime=self.start_time)\n if config_param_vals is not None:\n self._apply_config(config_param_vals)\n self.fmu.enterInitializationMode()\n self.fmu.exitInitializationMode()\n\n return",
"def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()",
"def test_linear_fit_model_set_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5 * x * x, -2 * x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)",
"def __init__(self, model, X_lower, X_upper):\n self.model = model\n self.X_upper = X_upper\n self.X_lower = X_lower",
"def init_model(self):\n pass",
"def standard_init(self, data):\n comm = self.comm\n H = self.H\n my_y = data['y']\n my_N, D = my_y.shape\n\n assert D == self.D\n\n # Calculate averarge W\n W_mean = parallel.allmean(my_y, axis=0, comm=comm) # shape: (D, )\n\n # Calculate data variance\n sigma_sq = parallel.allmean((my_y-W_mean)**2, axis=0, comm=comm) # shape: (D, )\n sigma_init = np.sqrt(sigma_sq).sum() / D # scalar\n\n # Initial W\n noise = sigma_init/4.\n W_init = W_mean + np.random.normal(scale=noise, size=[H, D]) # shape: (H, D)\n\n #Create and set Model Parameters, W columns have the same average!\n model_params = {\n 'W' : W_init, \n 'pi' : 1./H,\n 'sigma' : sigma_init\n }\n\n return model_params",
"def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)",
"def initialize(self):\n for key in self.parameter_dict:\n self.models[key] = self._create_model(key)"
]
| [
"0.6551731",
"0.6463214",
"0.6463214",
"0.635312",
"0.6221765",
"0.6221027",
"0.6211802",
"0.6145611",
"0.61099744",
"0.6089215",
"0.60518026",
"0.6050148",
"0.60214335",
"0.6004301",
"0.6001939",
"0.5985846",
"0.59553504",
"0.5951064",
"0.5933379",
"0.5914137",
"0.590727",
"0.58914447",
"0.58783215",
"0.58773786",
"0.5874676",
"0.5873079",
"0.58593243",
"0.58482015",
"0.58478427",
"0.58251476"
]
| 0.75738263 | 0 |
Fit the sky using a Ring2D model in which all parameters but the amplitude are fixed. | def fit_sky(self):
min_value = self.data.min()
ring_model = models.Ring2D(
min_value, self.x, self.y, self._box * 0.4, width=self._box * 0.4
)
ring_model.r_in.fixed = True
ring_model.width.fixed = True
ring_model.x_0.fixed = True
ring_model.y_0.fixed = True
fit_p = fitting.LevMarLSQFitter()
return fit_p(ring_model, self._XGrid, self._YGrid, self.data).amplitude | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_linear_fit_2d_model_set_fixed_parameters(self):\n init_model = models.Polynomial2D(\n degree=2,\n c1_0=[1, 2],\n c0_1=[-0.5, 1],\n n_models=2,\n fixed={\"c1_0\": True, \"c0_1\": True},\n )\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)",
"def fit2d(xdata,ydata,zdata,degree=1,reject=0,plot=None,xr=None,yr=None,zr=None,xt=None,yt=None,zt=None,gdrange=None,pfit=None,log=False,size=5) :\n\n if gdrange is not None :\n gd = np.where((zdata > gdrange[0]) & (zdata < gdrange[1]))[0]\n xfit = xdata[gd]\n yfit = ydata[gd]\n zfit = zdata[gd]\n else :\n xfit = xdata\n yfit = ydata\n zfit = zdata\n\n # set up fitter and do fit\n if pfit is None :\n fit_p = fitting.LinearLSQFitter()\n p_init = models.Polynomial2D(degree=degree)\n pfit = fit_p(p_init, xfit, yfit, zfit)\n # rejection of points?\n if reject > 0 :\n gd=np.where(abs(zfit-pfit(xfit,yfit)) < reject)[0]\n bd=np.where(abs(zfit-pfit(xfit,yfit)) >= reject)[0]\n print('rejected ',len(xdata)-len(gd),' of ',len(xdata),' points')\n pfit = fit_p(p_init, xfit[gd], yfit[gd], zfit[gd])\n\n print('2D rms: ',(zfit-pfit(xfit,yfit)).std())\n \n if plot is not None :\n if log :\n zfit = 10.**zfit\n if xr is None : xr = [xfit.min(),xfit.max()]\n if yr is None : yr = [yfit.min(),yfit.max()]\n if zr is None : zr = [zfit.min(),zfit.max()]\n # plot data\n plots.plotc(plot,xfit,yfit,zfit,xr=xr,yr=yr,zr=zr,\n xt=xt,yt=yt,zt=zt,colorbar=True,size=size,linewidth=1)\n # create independent variable grid for model and display\n y, x = np.mgrid[yr[1]:yr[0]:200j, xr[1]:xr[0]:200j]\n if log :\n plot.imshow(10.**pfit(x,y),extent=[xr[1],xr[0],yr[1],yr[0]],\n aspect='auto',vmin=zr[0],vmax=zr[1], origin='lower',cmap='rainbow')\n else :\n plot.imshow(pfit(x,y),extent=[xr[1],xr[0],yr[1],yr[0]],\n aspect='auto',vmin=zr[0],vmax=zr[1], origin='lower',cmap='rainbow')\n #plt.show()\n\n return pfit",
"def _initialize_model(self):\n max_value = self.data.max()\n\n if self.model_type == self._GAUSSIAN2D:\n model = models.Gaussian2D(\n x_mean=self.x, y_mean=self.y, x_stddev=1, y_stddev=1\n )\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.x_stddev.bounds = (0, self._box / 4)\n model.y_stddev.bounds = (0, self._box / 4)\n model.x_mean.bounds = (self.x - 5, self.x + 5)\n model.y_mean.bounds = (self.y - 5, self.y + 5)\n\n elif self.model_type == self._MOFFAT2D:\n model = models.Moffat2D()\n model.x_0 = self.x\n model.y_0 = self.y\n model.gamma = 2\n model.alpha = 2\n model.amplitude = max_value\n\n # Establish reasonable bounds for the fitted parameters\n model.alpha.bounds = (1, 6)\n model.gamma.bounds = (0, self._box / 4)\n model.x_0.bounds = (self.x - 5, self.x + 5)\n model.y_0.bounds = (self.y - 5, self.y + 5)\n\n model += models.Const2D(self.fit_sky())\n model.amplitude_1.fixed = True\n return model",
"def test_linear_fit_2d_model_set_masked_values(self):\n init_model = models.Polynomial2D(1, n_models=2)\n x, y = np.mgrid[0:5, 0:5]\n z = np.ma.masked_array(\n [2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x])\n )\n\n z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked\n z.mask[0, 3, 1] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, z)\n\n assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14)\n assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14)",
"def fit(self, x, y):\n # *** START CODE HERE ***\n m, n = x.shape\n theta = self.theta\n if theta is None:\n theta = np.zeros(n)\n\n while True:\n loss = self.loss(x, y, theta, m, n)\n theta_new = self.update(x, y, theta, m, n)\n if self.verbose:\n print(\"Loss: \", loss, \" 1-norm: \", np.linalg.norm(theta_new - theta, ord=1))\n if np.linalg.norm(theta_new - theta, ord = 1) < self.eps:\n self.theta = theta_new\n break\n theta = theta_new\n\n self.theta = theta\n util.plot(x, y, self.theta, 'output/p01b')\n\n\n # *** END CODE HERE ***",
"def fit_noise_model(self):\n\n for term in self._term_data.values(): #perform all pairwise fits\n term.fit()\n \n for pair,pauli in self.layer.single_pairs:\n self._term_data[pauli].fit_single()\n pair_dat = self._term_data[pair]\n pair_dat.fidelity = pair_dat.fidelity**2/self._term_data[pauli].fidelity\n\n \n logger.info(\"Fit noise model with following fidelities:\") \n logger.info([term.fidelity for term in self._term_data.values()])\n\n #get noise model from fits\n self.nnls_fit()",
"def test_linear_fit_model_set_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5 * x * x, -2 * x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)",
"def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)",
"def test_linear_fit_2d_model_set(self):\n\n init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)\n x = np.arange(10)\n y = np.arange(10)\n z_expected = init_model(x, y, model_set_axis=False)\n assert z_expected.shape == (2, 10)\n\n # Add a bit of random noise\n with NumpyRNGContext(_RANDOM_SEED):\n z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, z)\n assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1)",
"def fit(self, X, y):\n self.centers = self._select_centers(X)\n self.ampls = self._select_ampl(y)\n G = self._calculate_interpolation_matrix(X)\n self.weights = np.dot(np.linalg.pinv(G), y)",
"def fit(self, x, y):\n for i in range(len(x)):\n for j in range(len(x[i])):\n\n # init probability\n if j == 0:\n self.init_probability[y[i][j]] += 1\n\n # transition probability\n else:\n self.transition[y[i][j], y[i][j - 1]] += 1\n\n # emission probability\n self.emission[x[i][j]][y[i][j]] += 1\n\n # laplace smoothing\n self.init_probability = (self.init_probability + 1) / np.sum(self.init_probability)\n self.transition = (self.transition + 1) / (np.sum(self.transition) + self.n_classes)\n self.emission = (self.emission + 1) / (np.sum(self.emission) + self.n_feature)",
"def fit(self, X, y):\n self.model_x = X\n self.model_y = y",
"def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True",
"def skydip(scans):\n title = Path(scans[0]).name + \" \".join([Path(scan).name.split(\"_\")[4] for scan in scans[1:]])\n\n signal = []\n std = []\n elevation = []\n\n for scan in scans:\n kd = KissData(scan)\n kd.read_data(list_data=[\"A_masq\", \"I\", \"Q\", \"F_tone\", \"F_tl_Az\", \"F_tl_El\"])\n\n # TODO: Why do we need copy here, seems that numpy strides are making\n # funny things here !\n\n F_tone = 1e3 * kd.F_tone.copy().mean(1)[:, np.newaxis] + kd.continuum\n signal.append(F_tone.mean(1))\n std.append(F_tone.std(1))\n elevation.append(kd.F_tl_El.mean())\n\n signal = np.array(signal)\n std = np.array(std)\n elevation = np.array(elevation)\n detectors = kd.list_detector\n\n # rearrange signal to be coherent with the fit ?\n signal_new = 2 * signal[:, 0][:, np.newaxis] - signal\n\n air_mass = 1.0 / np.sin(np.radians(elevation))\n\n def T(\n airm, const, fact, tau_f\n ): # signal definition for skydip model: there is -1 before B to take into account the increasing resonance to lower optical load\n return const + 270.0 * fact * (1.0 - np.exp(-tau_f * airm))\n\n popts = []\n pcovs = []\n for _sig, _std in zip(signal_new.T, std.T):\n P0 = (4e8, 1e8, 1.0)\n popt, pcov = curve_fit(T, air_mass, _sig, sigma=_sig, p0=P0, maxfev=100000)\n\n popts.append(popt)\n pcovs.append(pcovs)\n\n popts = np.array(popts)\n\n ndet = popts.shape[0]\n fig_skydip_fit, axes = plt.subplots(\n np.int(np.sqrt(ndet)), np.int(ndet / np.sqrt(ndet)), sharex=True\n ) # , sharey=True)\n for _sig, _std, popt, detector, ax in zip(signal_new.T, std.T, popts, detectors, axes.flatten()):\n ax.errorbar(air_mass, _sig, _std)\n ax.plot(air_mass, T(air_mass, *popt))\n ax.set_title(detector, pad=-15)\n ax.label_outer()\n\n fig_skydip_fit.suptitle(title)\n fig_skydip_fit.tight_layout()\n fig_skydip_fit.subplots_adjust(wspace=0, hspace=0)\n\n Ao, Bo, tau = popts.T\n\n fig_skydip_stat, axes = plt.subplots(1, 3)\n for (item, value), ax in zip({r\"$A_0$\": Ao, r\"$B_0$\": Bo, \"tau\": tau}.items(), axes):\n mean_value = np.nanmedian(value)\n std_value = mad_std(value, ignore_nan=True)\n range_value = np.array([-3, 3]) * std_value + mean_value\n ax.hist(value, range=range_value)\n ax.set_xlabel(item)\n fig_skydip_stat.suptitle(title)\n\n return fig_skydip_fit, fig_skydip_stat",
"def fit_quad_to_peak(x,y):\n def quad(B,x):\n return B[0] *(x -B[1]) ** 2 + B[2]\n\n beta = (0,np.mean(x),y[val_to_indx(x,np.mean(x))])\n\n data = sodr.Data(x,y)\n model = sodr.Model(quad)\n worker = sodr.ODR(data,model,beta)\n out = worker.run()\n\n\n \n ## plts.figure()\n ## plts.plot(x,y)\n ## plts.plot(x,quad(out.beta,x))\n ## plts.title(out.beta[1])\n return out",
"def makeFit(self):\n if not self.fitModel.params:\n return\n cs = self.spectrum\n self.worker.make_model_curve(cs, allData=csi.allLoadedItems)\n\n dfparams = cs.fitParams\n lcfRes = dfparams['lcf_result']\n self.fitR.setText('R={0:.5g}'.format(lcfRes['R']))\n self.updateFitResults()\n self.fitReady.emit()",
"def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()",
"def fit(self, skydip):\n parameter_order = ['tau', 'offset', 'kelvin', 'tsky']\n self.parameters = {}\n self.errors = {}\n self.p_opt = None\n self.p_cov = None\n self.fitted_values = None\n self.data = None\n self.sigma = None\n self.elevation = None\n\n log.debug(\"Initial skydip values:\")\n log.debug(f\" Tsky = {self.initial_guess['tsky']}\")\n log.debug(f\" offset = {self.initial_guess['offset']}\")\n log.debug(f\" kelvin = {self.initial_guess['kelvin']}\")\n log.debug(f\" tau = {self.initial_guess['tau']}\")\n\n if self.el_range is not None:\n from_bin = max(0, skydip.get_bin(self.el_range.min))\n to_bin = min(skydip.data.size, skydip.get_bin(self.el_range.max))\n else:\n from_bin = 0\n to_bin = skydip.data.size\n\n self.init_parameters(skydip)\n\n data = skydip.data[from_bin:to_bin]\n weight = skydip.weight[from_bin:to_bin]\n valid = weight > 0\n data = data[valid]\n weight = weight[valid]\n\n if self.uniform_weights:\n sigma = None\n else:\n sigma = 1 / weight\n\n elevation = skydip.get_elevation(\n np.nonzero(valid)[0]).to('radian').value\n\n self.use_points = data.size\n\n p0 = []\n lower_bounds = np.zeros(4, dtype=float)\n upper_bounds = np.zeros(4, dtype=float)\n\n for i, parameter in enumerate(parameter_order):\n value = self.initial_guess[parameter]\n p0.append(value)\n if parameter in self.fit_for:\n lower_bounds[i] = self.bounds[parameter][0]\n upper_bounds[i] = self.bounds[parameter][1]\n else: # An attempt to fix parameters with curve_fit\n eps = abs(value - np.nextafter(value, 1))\n lower_bounds[i] = value - eps\n upper_bounds[i] = value + eps\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', OptimizeWarning)\n p_opt, p_cov = curve_fit(self.value_at, elevation, data,\n p0=p0, sigma=sigma,\n bounds=(lower_bounds, upper_bounds))\n self.p_opt = p_opt\n self.p_cov = p_cov\n self.data = data\n self.elevation = elevation\n self.sigma = sigma\n\n self.has_converged = np.isfinite(p_opt).all()\n if not self.has_converged: # pragma: no cover\n log.warning(\"Skydip fit did not converge!\")\n errors = np.sqrt(np.diag(p_cov))\n\n for i, parameter in enumerate(parameter_order):\n self.parameters[parameter] = p_opt[i]\n self.errors[parameter] = errors[i]\n\n self.fitted_values = self.fit_elevation(elevation)\n fit_weights = None if sigma is None else weight ** 2\n\n t_obs_rms = np.sqrt(np.average((data - self.fitted_values) ** 2,\n weights=fit_weights))\n self.rms = t_obs_rms / self.parameters['kelvin']",
"def make_model(self, incl, psi, PA=0.0, get_2d=True, int_kwargs={}, vel_kwargs={}, lw_kwargs=None):\n if PA: x_plane, y_plane = Rosenfeld2d._rotate_sky_plane(self.grid.XYZ[0], self.grid.XYZ[1], -PA)\n else: x_plane, y_plane = self.grid.XYZ[:2]\n\n cos_incl = np.cos(incl)\n sin_incl = np.sin(incl)\n y_plane_cos_incl = y_plane/cos_incl\n\n #**********************\n #ROSENFELD COEFFICIENTS\n fac = -2*np.sin(psi)**2\n A = np.cos(2*incl) + np.cos(2*psi)\n B = fac * 2*(sin_incl/cos_incl) * y_plane\n C = fac * (x_plane**2 + (y_plane_cos_incl)**2)\n t = self._get_t(A,B,C).T\n\n #****************************\n #ROSENFELD CONVERSION X<-->X'\n x_true_near = x_plane\n y_true_near = y_plane_cos_incl + t[1]*sin_incl\n \n x_true_far = x_plane\n y_true_far = y_plane_cos_incl + t[0]*sin_incl\n \n #np.hypot 2x faster than np.linalg.norm([x,y], axis=0)\n R_true_near = np.hypot(x_true_near, y_true_near) \n R_true_far = np.hypot(x_true_far, y_true_far)\n\n z_true_near = t[1] * cos_incl\n z_true_far = t[0] * cos_incl \n\n phi_true_near = np.arctan2(y_true_near, x_true_near) \n phi_true_far = np.arctan2(y_true_far, x_true_far) \n\n #****************************\n \n grid_true = {'near': [x_true_near, y_true_near, z_true_near, R_true_near, phi_true_near], \n 'far': [x_true_far, y_true_far, z_true_far, R_true_far, phi_true_far]}\n\n #*******************************\n #COMPUTE PROPERTIES ON TRUE GRID\n avai_kwargs = [vel_kwargs, int_kwargs, lw_kwargs]\n avai_funcs = [self.velocity_func, self.intensity_func, self.linewidth_func]\n true_kwargs = [isinstance(kwarg, dict) for kwarg in avai_kwargs]\n prop_kwargs = [kwarg for i, kwarg in enumerate(avai_kwargs) if true_kwargs[i]]\n prop_funcs = [func for i, func in enumerate(avai_funcs) if true_kwargs[i]]\n props = self._compute_prop(grid_true, prop_funcs, prop_kwargs)\n #Positive vel is positive along z, i.e. pointing to the observer, for that reason imposed a (-) factor to convert to the standard convention: (+) receding \n if true_kwargs[0]:\n ang_fac_near = -sin_incl * np.cos(phi_true_near)\n ang_fac_far = -sin_incl * np.cos(phi_true_far)\n props[0]['near'] *= ang_fac_near \n props[0]['far'] *= ang_fac_far\n \n #*************************************\n\n return [{side: prop[side].reshape(self.grid.Nodes[:2]) for side in ['near', 'far']} for prop in props]",
"def fit():\n pass",
"def sky_noise_weighting(file_name, sky_file_name):\n cs_data = spectra_analysis(file_name, sky_file_name)\n cube_data = cs_data['gd_shifted']\n sn_data = cs_data['sky_noise']\n wl_soln = wavelength_solution(file_name)\n\n sn_data_min = np.min(sn_data)\n in_wt = 1 / (sn_data - sn_data_min + 1)\n\n sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise\n for i in range(len(in_wt)): \n data_acl = cube_data[i]\n data_sky = sn_data[i]\n data_prb = in_wt[i]\n \n if ( 0.00 <= np.abs(data_prb) <= 1.00 ):\n sky_regns[i][0] = data_prb\n sky_regns[i][1] = data_sky\n\n # finding max peak in the sky-noise data and fitting a Gaussian to that\n # x-axis data\n x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n\n # Finding peaks with PeakUtils\n sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)\n sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)\n\n if (sky_peaks_x.size != 0):\n sky_peak = sky_peaks_x[0]\n sky_peak_index = find_nearest(sky_peak, x_range)\n else:\n sky_peak = 6000\n sky_peak_index = 0\n\n sky_peak_loc = x_range[sky_peak_index]\n\n sky_peak_range = [sky_peak-100, sky_peak+100]\n sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]\n\n sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n\n sky_gauss_params = Parameters()\n sky_gauss_params.add('c', value=0)\n sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)\n sky_gauss_params.add('mu', value=sky_peak_loc)\n sky_gauss_params.add('sigma1', value=3)\n\n sky_gauss_model = Model(sn_gauss)\n sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x, \n params=sky_gauss_params)\n sky_gauss_best = sky_gauss_rslt.best_values\n\n sky_sigma = sky_gauss_best['sigma1']\n\n return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}",
"def test_linear_fit_model_set_masked_values(self):\n # NB. For single models, there is an equivalent doctest.\n\n init_model = models.Polynomial1D(degree=1, n_models=2)\n x = np.arange(10)\n y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))\n\n y[0, 7] = 100.0 # throw off fit coefficients if unmasked\n y.mask[0, 7] = True\n y[1, 1:3] = -100.0\n y.mask[1, 1:3] = True\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n\n assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)",
"def plot_fitmodel(extdata, alax=False, yoffset=0, res=False, wavenum=False):\n # plot a fitted model if available\n if extdata.model:\n if extdata.model[\"type\"] == \"pow_elx\":\n # in this case, fitted amplitude must be multiplied by A(V) to get the \"combined\" model amplitude\n labeltxt = r\"$%5.2f \\lambda ^{-%5.2f} - %5.2f$\" % (\n extdata.model[\"params\"][0].value * extdata.model[\"params\"][3].value,\n extdata.model[\"params\"][2].value,\n extdata.model[\"params\"][3].value,\n )\n elif extdata.model[\"type\"] == \"pow_alax\":\n labeltxt = r\"$%5.3f \\,\\lambda^{-%5.2f}$\" % (\n extdata.model[\"params\"][0].value,\n extdata.model[\"params\"][2].value,\n )\n else:\n labeltxt = \"fitted model\"\n\n # obtain the model extinctions\n mod_ext = extdata.model[\"exts\"]\n\n # if the plot needs to be in A(lambda)/A(V), the model extinctions need to be converted to match the data\n if alax:\n mod_ext = (mod_ext / extdata.columns[\"AV\"][0]) + 1\n\n if wavenum:\n x = 1 / extdata.model[\"waves\"]\n else:\n x = extdata.model[\"waves\"]\n\n plt.plot(\n x,\n mod_ext + yoffset,\n \"-\",\n lw=3,\n color=\"crimson\",\n alpha=0.8,\n label=labeltxt,\n zorder=5,\n )\n plt.legend(loc=\"lower left\")\n\n # plot the residuals if requested\n if res:\n plt.setp(plt.gca().get_xticklabels(), visible=False)\n plt.axes([0.125, 0, 0.775, 0.11], sharex=plt.gca())\n plt.scatter(x, extdata.model[\"residuals\"], s=0.5, color=\"k\")\n plt.axhline(ls=\"--\", c=\"k\", alpha=0.5)\n plt.axhline(y=0.05, ls=\":\", c=\"k\", alpha=0.5)\n plt.axhline(y=-0.05, ls=\":\", c=\"k\", alpha=0.5)\n plt.ylim(-0.1, 0.1)\n plt.ylabel(\"residual\")\n\n else:\n warnings.warn(\n \"There is no fitted model available to plot.\",\n stacklevel=2,\n )",
"def _beam_fit_fn_2(z, d0, Theta):\n return d0**2 + (Theta*z)**2",
"def initialize_variables(self):\n super(D2Model, self).initialize_variables()\n\n s = \"::: initializing 2D variables :::\"\n print_text(s, cls=self)\n\n # Depth below sea level :\n class Depth(Expression):\n def eval(self, values, x):\n values[0] = abs(min(0, x[2]))\n self.D = Depth(element=self.Q.ufl_element())\n \n # Enthalpy model\n self.theta_surface = Function(self.Q, name='theta_surface')\n self.theta_float = Function(self.Q, name='theta_float')\n self.theta_app = Function(self.Q, name='theta_app')\n self.theta = Function(self.Q, name='theta')\n self.theta0 = Function(self.Q, name='theta0')\n self.W0 = Function(self.Q, name='W0')\n self.thetahat = Function(self.Q, name='thetahat')\n self.uhat = Function(self.Q, name='uhat')\n self.vhat = Function(self.Q, name='vhat')\n self.what = Function(self.Q, name='what')\n self.mhat = Function(self.Q, name='mhat')\n self.rho_b = Function(self.Q, name='rho_b')\n\n # Age model \n self.age = Function(self.Q, name='age')\n self.a0 = Function(self.Q, name='a0')\n\n # Surface climate model\n self.precip = Function(self.Q, name='precip')\n\n # Stokes-balance model :\n self.u_s = Function(self.Q, name='u_s')\n self.u_t = Function(self.Q, name='u_t')\n self.F_id = Function(self.Q, name='F_id')\n self.F_jd = Function(self.Q, name='F_jd')\n self.F_ib = Function(self.Q, name='F_ib')\n self.F_jb = Function(self.Q, name='F_jb')\n self.F_ip = Function(self.Q, name='F_ip')\n self.F_jp = Function(self.Q, name='F_jp')\n self.F_ii = Function(self.Q, name='F_ii')\n self.F_ij = Function(self.Q, name='F_ij')\n self.F_iz = Function(self.Q, name='F_iz')\n self.F_ji = Function(self.Q, name='F_ji')\n self.F_jj = Function(self.Q, name='F_jj')\n self.F_jz = Function(self.Q, name='F_jz')\n self.tau_iz = Function(self.Q, name='tau_iz')\n self.tau_jz = Function(self.Q, name='tau_jz')",
"def fit(self, X, y):\n self.__X = X\n self.__y = y\n self.__trained = True",
"def __init__(\r\n self,\r\n centre=30.0, # <- **PyAutoFit** recognises these constructor arguments\r\n normalization=1.0, # <- are the Exponential`s model parameters.\r\n rate=0.01,\r\n ):\r\n self.centre = centre\r\n self.normalization = normalization\r\n self.rate = rate",
"def fit_me(X, Y, n_components = 2, period = 24, model_type = 'lin', lin_comp = False, alpha = 0, name = '', save_to = '', plot=True, plot_residuals=False, plot_measurements=True, plot_margins=True, return_model = False, color = False, plot_phase = True, hold=False, x_label = \"\", y_label = \"\"):\n X_test = np.linspace(0, 100, 1000)\n\n if n_components == 0:\n X_fit = X\n X_fit_test = X_test\n lin_comp = True\n else:\n for i in range(n_components):\n n = i+1\n\n A = np.sin((X/(period/n))*np.pi*2)\n B = np.cos((X/(period/n))*np.pi*2) \n A_test = np.sin((X_test/(period/n))*np.pi*2)\n B_test = np.cos((X_test/(period/n))*np.pi*2)\n\n if not i:\n X_fit = np.column_stack((A, B))\n X_fit_test = np.column_stack((A_test, B_test)) \n else:\n X_fit = np.column_stack((X_fit, np.column_stack((A, B))))\n X_fit_test = np.column_stack((X_fit_test, np.column_stack((A_test, B_test))))\n\n \n X_fit_eval_params = X_fit_test\n \n if lin_comp and n_components:\n X_fit = np.column_stack((X, X_fit))\n X_fit_eval_params = np.column_stack((np.zeros(len(X_test)), X_fit_test))\n X_fit_test = np.column_stack((X_test, X_fit_test)) \n\n\n #if model_type == 'lin':\n X_fit = sm.add_constant(X_fit, has_constant='add')\n X_fit_test = sm.add_constant(X_fit_test, has_constant='add')\n X_fit_eval_params = sm.add_constant(X_fit_eval_params, has_constant='add')\n \"\"\"\n ###\n # fit\n ###\n \"\"\" \n if model_type == 'lin':\n model = sm.OLS(Y, X_fit)\n results = model.fit()\n elif model_type == 'poisson':\n model = sm.GLM(Y, X_fit, family=sm.families.Poisson())\n results = model.fit()\n elif model_type =='gen_poisson':\n model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit)\n results = model.fit()\n elif model_type == 'nb':\n #exposure = np.zeros(len(Y))\n #exposure[:] = np.mean(Y)\n #model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(), exposure = exposure)\n \n \n # https://towardsdatascience.com/negative-binomial-regression-f99031bb25b4\n # https://dius.com.au/2017/08/03/using-statsmodels-glms-to-model-beverage-consumption/#cameron\n if not alpha:\n train_model = sm.GLM(Y, X_fit, family=sm.families.Poisson())\n train_results = train_model.fit()\n\n df_train = pd.DataFrame()\n df_train['Y'] = Y\n df_train['mu'] = train_results.mu\n df_train['AUX_OLS_DEP'] = df_train.apply(lambda x: ((x['Y'] - x['mu'])**2 - x['Y']) / x['mu'], axis=1)\n ols_expr = \"\"\"AUX_OLS_DEP ~ mu - 1\"\"\"\n aux_olsr_results = smf.ols(ols_expr, df_train).fit()\n\n alpha=aux_olsr_results.params[0]\n #print(alpha)\n\n model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(alpha=alpha))\n \n results = model.fit()\n else:\n print(\"Invalid option\")\n return\n\n \n if model_type =='lin':\n Y_fit = results.fittedvalues\n else:\n Y_fit = results.predict(X_fit)\n \n \n if model_type in ['lin', 'poisson', 'nb']:\n statistics = calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp)\n if model_type in ['poisson', 'nb']:\n statistics['count'] = np.sum(Y) \n else:\n RSS = sum((Y - Y_fit)**2)\n p = results.llr_pvalue\n statistics = {'p':p, 'RSS':RSS, 'count': np.sum(Y)}\n \n Y_test = results.predict(X_fit_test)\n Y_eval_params = results.predict(X_fit_eval_params)\n \n rhythm_params = evaluate_rhythm_params(X_test, Y_eval_params)\n \n \"\"\"\n ###\n # plot\n ###\n \"\"\"\n if plot:\n if plot_margins:\n if model_type == 'lin':\n sdev, lower, upper = wls_prediction_std(results, exog=X_fit_test, alpha=0.05)\n if color:\n plt.fill_between(X_test, lower, upper, color=color, alpha=0.1)\n else:\n plt.fill_between(X_test, lower, upper, color='#888888', alpha=0.1)\n else:\n res2 = copy.deepcopy(results)\n params = res2.params\n CIs = results.conf_int()\n \n #N = 512\n N = 1024\n \n if n_components == 1:\n #N2 = 8\n N2 = 10\n elif n_components == 2:\n #N2 = 6\n N2 = 8\n else: \n #N2 = 8 - n_components \n N2 = 10 - n_components \n \n \n P = np.zeros((len(params), N2))\n \n for i, CI in enumerate(CIs):\n P[i,:] = np.linspace(CI[0], CI[1], N2)\n \n amplitude_CI = [rhythm_params['amplitude']]\n mesor_CI = [rhythm_params['mesor']]\n acrophase_CI = [rhythm_params['acrophase']]\n \n param_samples = list(itertools.product(*P))\n N = min(N, len(param_samples))\n \n for i,p in enumerate(sample(param_samples, N)):\n res2.initialize(results.model, p) \n Y_test_CI = res2.predict(X_fit_test)\n \n rhythm_params_CI = evaluate_rhythm_params(X_test, Y_test_CI)\n amplitude_CI.append(rhythm_params_CI['amplitude'])\n mesor_CI.append(rhythm_params_CI['mesor'])\n acrophase_CI.append(rhythm_params_CI['acrophase'])\n \n \n \"\"\"\n if i == 0:\n Y_min = Y\n Y_max = Y\n else:\n Y_min = np.min(np.vstack([Y,Y_min]), axis=0)\n Y_max = np.max(np.vstack([Y,Y_max]), axis=0)\n \"\"\"\n if color and color != '#000000':\n plt.plot(X_test, Y_test_CI, color=color, alpha=0.05)\n else:\n plt.plot(X_test, Y_test_CI, color='#888888', alpha=0.05)\n \n \n #plt.fill_between(X_test, Y_min, Y_max, color='#888888', alpha=0.1)\n \n #amplitude_CI = (min(amplitude_CI), max(amplitude_CI))\n #mesor_CI = (min(mesor_CI), max(mesor_CI))\n #acrophase_CI = (min(acrophase_CI), max(acrophase_CI))\n \n rhythm_params['amplitude_CI'] = amplitude_CI\n rhythm_params['mesor_CI'] = mesor_CI\n rhythm_params['acrophase_CI'] = acrophase_CI\n \n \n ###\n if not color:\n color = 'black'\n\n if plot_measurements: \n if not hold: \n plt.plot(X,Y, 'ko', markersize=1, label = 'data', color=color)\n else:\n plt.plot(X,Y, 'ko', markersize=1, color=color)\n #plt.plot(X, results.fittedvalues, label = 'fit')\n \n if not hold:\n plt.plot(X_test, Y_test, 'k', label = 'fit', color=color)\n else:\n plt.plot(X_test, Y_test, 'k', label = name, color=color)\n #if color and not plot_margins: \n # plt.plot(X_test, Y_test, 'k', label = 'fit', color=color)\n #else:\n # plt.plot(X_test, Y_test, 'k', label = 'fit')\n \n if plot_measurements:\n X = X % period\n\n if model_type == 'lin': \n #plt.axis([min(min(X),0), 1.1*max(max(X),period), 0.9*min(min(Y), min(Y_test)), 1.1*max(max(Y), max(Y_test))])\n plt.axis([min(min(X),0), max(X), 0.9*min(min(Y), min(Y_test)), 1.1*max(max(Y), max(Y_test))])\n else:\n plt.axis([min(min(X),0), max(X), 0.9*min(min(Y), min(Y_test)), 1.1*max(max(Y), max(Y_test))])\n else:\n plt.axis([min(X_test), period, min(Y_test)*0.9, max(Y_test)*1.1])\n #plt.title(name + ', components=' + str(n_components) +' , period=' + str(period) + '\\np-value=' + str(statistics['p']) + ', p-value(gof)=' + str(statistics['p_reject']))\n #plt.title(name + ', components=' + str(n_components) +' , period=' + str(period) + '\\np-value=' + str(statistics['p']))\n if model_type == 'lin':\n if name: \n plt.title(name + ', p-value=' + \"{0:.5f}\".format(statistics['p']))\n else:\n plt.title('p-value=' + \"{0:.5f}\".format(statistics['p']))\n else:\n if name:\n plt.title(name + ', p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')') \n else:\n plt.title('p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')\n if x_label:\n plt.xlabel(x_label)\n else:\n plt.xlabel('Time [h]')\n \n if y_label:\n plt.ylabel(y_label)\n elif model_type == 'lin':\n plt.ylabel('Measurements')\n else:\n plt.ylabel('Count')\n #fig = plt.gcf()\n #fig.set_size_inches(11,8) \n \n\n \n if not hold:\n if save_to:\n plt.savefig(save_to+'.png')\n plt.savefig(save_to+'.pdf')\n plt.close()\n else:\n plt.show()\n if plot_residuals:\n resid = results.resid\n fig = sm.qqplot(resid)\n plt.title(name)\n if save_to:\n plt.savefig(save_to+'_resid.pdf', bbox_inches='tight')\n plt.savefig(save_to+'_resid.png') \n plt.close()\n else:\n plt.show()\n \n if plot_phase:\n per = rhythm_params['period']\n amp = rhythm_params['amplitude']\n phase = rhythm_params['acrophase']\n if save_to:\n plot_phases([phase], [amp], [name], period=per, folder=\"\\\\\".join(save_to.split(\"\\\\\")[:-1]))\n else:\n plot_phases([phase], [amp], [name], period=per)\n\n if return_model: \n return results, statistics, rhythm_params, X_test, Y_test, model\n else: \n return results, statistics, rhythm_params, X_test, Y_test",
"def fitModel(self, params:lmfit.Parameters=None):\r\n if params is None:\r\n params = self.params\r\n self.initializeRoadRunnerModel()\r\n if self.parametersToFit is not None:\r\n self.optimizer = Optimizer.optimize(self.calcResiduals, params,\r\n self._fitterMethods, logger=self.logger,\r\n numRestart=self._numRestart)\r\n self.minimizerResult = self.optimizer.minimizerResult\r\n # Ensure that residualsTS and fittedTS match the parameters\r\n self.updateFittedAndResiduals(params=self.params)",
"def prepare_fg(\n self, times, wavelength, spectra, stellar, intensities, telluric, area=None\n ):\n\n if area is None:\n orb = Orbit(self.star, self.planet)\n area = orb.stellar_surface_covered_by_planet(times)\n\n model = stellar * telluric\n\n # Normalize the profile of the observations\n profile = np.nanmean(spectra, axis=1)\n model_profile = np.nanmean(model, axis=1)\n norm = profile / model_profile\n\n # Normalize the spectrum\n # model = stellar * telluric * norm[:, None]\n # profile = np.median(spectra, axis=0)\n # model_profile = np.median(model, axis=0)\n\n # nm = np.nanmedian(profile / model_profile)\n # norm *= nm\n\n # model = stellar * telluric * norm[:, None]\n # diff = spectra - model\n\n # model = np.nanmedian(spectra, axis=0)\n\n # f = -(\n # # np.nan_to_num(intensities) *\n # self.area_atmosphere\n # / self.area_planet\n # * area[:, None]\n # # * np.nan_to_num(telluric, nan=1)\n # * norm[:, None]\n # )\n # f = np.nan_to_num(intensities) * np.nan_to_num(telluric, nan=1) * norm[:, None]\n area *= self.area_atmosphere / self.area_planet\n f = -np.nan_to_num(intensities, nan=1) * area[:, None]\n if hasattr(f, \"to_value\"):\n f = f.to_value(1)\n\n # g = spectra - stellar * telluric * norm[:, None]\n # if self.n_sysrem is not None:\n # g = sysrem(g, self.n_sysrem)\n\n g = spectra\n if self.n_sysrem is not None:\n # Use SVD directly instead of Sysrem\n g = sysrem(spectra, self.n_sysrem)\n # u, s, vh = np.linalg.svd(spectra, full_matrices=False)\n # s[: self.n_sysrem] = 0\n # s[80:] = 0\n # ic = (u * s) @ vh\n # g = ic\n else:\n # g = spectra - stellar * telluric * norm[:, None]\n gen = np.random.default_rng()\n tmp = sysrem(spectra, 5)\n g = gen.normal(\n loc=np.nanmean(tmp), scale=np.nanstd(tmp), size=spectra.shape\n )\n # g *= np.nanstd() # std of random is 1 (in theory)\n\n # norm = np.nanstd(g, axis=0)\n # f /= norm\n # g /= norm\n\n # plt.imshow(g, aspect=\"auto\", origin=\"lower\")\n # plt.xlabel(\"Wavelength\")\n # plt.ylabel(\"Time\")\n # plt.title(f\"N_Sysrem: {self.n_sysrem}\")\n # plt.savefig(f\"spectra_sysrem_{self.n_sysrem}.png\")\n\n return wavelength, f, g"
]
| [
"0.6069153",
"0.6029275",
"0.59248644",
"0.5835233",
"0.5779711",
"0.56909484",
"0.5657765",
"0.56360203",
"0.55903643",
"0.5554314",
"0.553601",
"0.55332834",
"0.5531295",
"0.55231124",
"0.5509397",
"0.5460297",
"0.5444665",
"0.5421841",
"0.5399147",
"0.5375256",
"0.5364925",
"0.53632206",
"0.53075844",
"0.5303162",
"0.5288466",
"0.52685595",
"0.5264563",
"0.52598506",
"0.52403647",
"0.52370036"
]
| 0.7843751 | 0 |
Creates a dynamic environment given a json_path to an environment file, using property_name as the initial node to start reading, validating against a json schema file if given any | def set_env_from_json_file(json_path, property_name=None, schema_path=None):
if schema_path: # if schema_path is provided
with open(schema_path, 'r') as schema_file:
json_schema = json.load(schema_file)
else:
json_schema = None
with open(json_path, 'r') as data:
if property_name:
try:
set_env_from_json(json.load(data)[property_name], json_schema)
except KeyError:
raise EnvironmentNotDefinedError(
"The property_name '{0}' is not defined in file '{1}'".format(
property_name, json_path
)
)
else:
set_env_from_json(json.load(data), json_schema) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_env_from_json(json_object, json_schema=None):\n if json_schema:\n validate(json_object, json_schema)\n\n environment = json_object\n\n # Prepend the command \"source\" to each one of the commands defined in the\n # command_prefixes property of the json file.\n prefixes = environment.pop('command_prefixes', [])\n sourced_prefixes = map(lambda p: 'source %s' % p, prefixes)\n\n env.update(environment)\n env.command_prefixes += sourced_prefixes",
"def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]",
"def load_envfile(instance):\n validate(ENVFILE_SCHEMA, instance)\n semantic_validate(instance)\n\n # At the moment the object model is mostly 1-to-1 with the configuration\n # format. In the future that might change; the idea is for the object model\n # to be an abstraction rather than exactly the same as config format, so\n # e.g. same object model might support two different versions of the config\n # format.\n\n # We do however make some minor changes.\n instance = freeze(instance)\n\n # 1. Drop unneeded fields:\n instance = instance.remove(\"Envfile-version\")\n instance = instance.transform([\"local\", \"templates\", match_any, \"type\"],\n discard)\n\n # 2. Some objects want to know their own name:\n def add_name(mapping):\n # Convert {a: {x: 1}} to {a: {name: a, x: 1}}:\n for key, value in mapping.items():\n mapping = mapping.set(key, value.set(\"name\", key))\n return mapping\n\n instance = instance.transform([\"local\", \"templates\"], add_name)\n instance = instance.transform([\"application\", \"requires\"], add_name)\n instance = instance.transform([\"application\", \"services\"], add_name)\n instance = instance.transform(\n [\"application\", \"services\", match_any, \"requires\"], add_name)\n\n return System.create(instance)",
"def generate_environment(self):\n try:\n if self._environment is None:\n self._environment = Environment.fromfilepath(self._environmentName,\n self._configuration.environment_file_path)\n except Exception:\n raise",
"def load_environment(path: Optional[str] = None):\n environment = deserialize_environment_from_file(path=path)\n EnvironmentProvider().environment = environment",
"def create_jinja_environment(template_path: str) -> Environment:\n\n environment = Environment(\n loader=FileSystemLoader(template_path), autoescape=select_autoescape()\n )\n environment.globals[\"env\"] = env\n\n return environment",
"def initFromFile(self):\n\n bootFilename = os.path.join(os.environ['CRAB3_BOOTSTRAP_DIR'], BOOTSTRAP_ENVFILE)\n if not os.path.isfile(bootFilename):\n msg = \"The CRAB3_BOOTSTRAP_DIR environment variable is set, but I could not find %s\" % bootFilename\n raise EnvironmentException(msg)\n else:\n with open(bootFilename) as fd:\n self.update(json.load(fd))",
"def read(cls, envvar=\"CONFIG_FILE\", filename=\"config.json\"):\n filename = os.environ.get(envvar, filename)\n directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n filename = directory + \"/\" + filename\n try:\n with open(filename, \"r\") as config_file:\n config = json.loads(config_file.read())\n except FileNotFoundError:\n config = {}\n\n return cls(config)",
"def init_config(cls, path):\n try:\n config_string = open(path).read()\n except EnvironmentError as ex:\n LOGGER.error('Could not load %s file, error: %s', path, ex)\n sys.exit()\n\n try:\n cls.config = json.loads(config_string)\n except ValueError as ex:\n LOGGER.error(' %s file is not valid json, error: %s', path, ex)\n sys.exit()",
"def _read_config(json_path, step):\n with open(json_path) as json_file:\n json_data = json.load(json_file)\n\n config = json_data[step]\n\n return giit.config.validate_config(config=config)",
"def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)",
"def _create_environment(config):\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env",
"def create_environment(cls, full_config):\n\n config = full_config['template']['devops_settings']\n environment = cls.create(config['env_name'])\n\n # create groups and drivers\n groups = config['groups']\n environment.add_groups(groups)\n\n # create address pools\n address_pools = config['address_pools']\n environment.add_address_pools(address_pools)\n\n # process group items\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add l2_network_devices\n group.add_l2_network_devices(\n group_data.get('l2_network_devices', {}))\n\n # add network_pools\n group.add_network_pools(\n group_data.get('network_pools', {}))\n\n # Connect nodes to already created networks\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add group volumes\n group.add_volumes(\n group_data.get('group_volumes', []))\n\n # add nodes\n group.add_nodes(\n group_data.get('nodes', []))\n\n return environment",
"def _load_json_schema(filename: str):\n relative_path = path.join('schemas', filename)\n absolute_path = path.join(path.dirname(__file__), relative_path)\n\n with open(absolute_path, 'r', encoding='utf-8') as schema_file:\n schema = json.loads(schema_file.read())\n\n return schema",
"def process_config(json_file):\n config, _ = get_config_from_json(json_file)\n print(\" THE Configuration of your experiment ..\")\n pprint(config)\n print(\" *************************************** \")\n try:\n config.summary_dir = os.path.join(\"experiments\", config.exp_name, \"summaries/\")\n config.checkpoint_dir = os.path.join(\"experiments\", config.exp_name, \"checkpoints/\")\n config.out_dir = os.path.join(\"experiments\", config.exp_name, \"out/\")\n create_dirs([config.summary_dir, config.checkpoint_dir, config.out_dir])\n except AttributeError as e:\n print(\"ERROR!!..Please provide the exp_name in json file..\")\n exit(-1)\n return config",
"def _load_json_schema(filename):\n\n relative_path = join('schemas', filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as schema_file:\n return json.loads(schema_file.read())",
"def load_file(self):\n self._check_setup()\n json_str = self.get_json_file()\n if json_str is None:\n return\n\n if not self._is_json_str():\n with open(json_str, 'r') as f:\n jf = json.load(f)\n else:\n jf = json.loads(json_str)\n\n\n self.jf = jf\n\n target = jf['target']\n if isinstance(target, str):\n target = eval(target)\n\n goal = jf['goal']\n if isinstance(goal, str):\n goal = eval(goal)\n\n self.gen_target_pos = np.array(target)\n self.gen_goal_pos = np.array(goal)\n\n if 'place_walls' in jf:\n self.place_walls = jf['place_walls']\n\n if self.get_is_rnd():\n self.rnd_map = jf['rnd']\n self.env_jf = jf['env']",
"def init_json(path='', json_data=''):\n if not path or not json:\n raise ValueError\n if path and json_data:\n raise ValueError\n data = {}\n if path:\n with open(path, 'r') as file:\n temp = file.read()\n data = json.loads(temp)\n elif json_data:\n data = json_data\n\n if data['city']:\n json_city = data['city']\n else:\n raise ValueError\n if data['country']:\n json_country = data['country']\n else:\n raise ValueError\n if data['list_of_streets']:\n json_list_of_streets = data['list_of_streets']\n else:\n raise ValueError\n\n if check(json_city, json_country, json_list_of_streets):\n return get_sample_data(json_city, json_country, json_list_of_streets)",
"def deserialize_file(cls, file_path='./data.json'):\n file_stream = open(file_path, 'rb')\n dct = json.load(file_stream, object_hook=CustomTypeDecoder)\n file_stream.close()\n env = Configuration.Configuration()\n env.update(dct)\n return env",
"def _load_json_schema(filename):\n\n relative_path = join(\"schemas\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n base_path = dirname(absolute_path)\n base_uri = 'file://{}/'.format(base_path)\n\n print(f\"base uri {base_uri}\")\n print(f\"base path {base_path}\")\n print(f\"relative_path {relative_path}\")\n print(f\"absolute_path {absolute_path}\")\n\n with open(absolute_path) as schema_file:\n return jsonref.loads(schema_file.read(), base_uri=base_uri, jsonschema=True)",
"def env_create_setup_parser(subparser):\n subparser.add_argument(\"create_env\", metavar=\"env\", help=\"name of environment to create\")\n subparser.add_argument(\n \"-d\", \"--dir\", action=\"store_true\", help=\"create an environment in a specific directory\"\n )\n subparser.add_argument(\n \"--keep-relative\",\n action=\"store_true\",\n help=\"copy relative develop paths verbatim into the new environment\"\n \" when initializing from envfile\",\n )\n view_opts = subparser.add_mutually_exclusive_group()\n view_opts.add_argument(\n \"--without-view\", action=\"store_true\", help=\"do not maintain a view for this environment\"\n )\n view_opts.add_argument(\n \"--with-view\",\n help=\"specify that this environment should maintain a view at the\"\n \" specified path (by default the view is maintained in the\"\n \" environment directory)\",\n )\n subparser.add_argument(\n \"envfile\",\n nargs=\"?\",\n default=None,\n help=\"optional init file; can be spack.yaml or spack.lock\",\n )",
"def from_json_file(cls, json_file:str):\n with open(json_file) as file:\n data = json.load(file)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance",
"def _create_config(env_path):\n s2e_yaml = 's2e.yaml'\n version_path = os.path.join(os.path.dirname(__file__), '..', 'dat', 'VERSION')\n\n with open(version_path, 'r', encoding='utf-8') as fp:\n context = {\n 'creation_time': str(datetime.datetime.now()),\n 'version': fp.read().strip(),\n }\n\n render_template(context, s2e_yaml, os.path.join(env_path, s2e_yaml))",
"def load_json(path, prop_name):\n data_file= open(path)\n data = json.load(data_file)\n prop = data['features'][0]['properties'][prop_name]\n return prop",
"def test_from_json():\n with open('test.json', 'w') as f:\n json.dump({'foo': 'bar'}, f)\n \n # build from config file\n conf = core.Config(path='test.json')\n\n assert conf['foo'] == 'bar'\n\n # remove test file\n os.remove('test.json')",
"def load(cls, path):\n d = util.load_json(path, cls.api_version)\n\n obj = cls(\n d['params'],\n Environment('', d['python'], d['requirements']),\n d['commit_hash'],\n d['date'])\n obj.add_times(d['results'])\n obj._filename = os.path.join(*path.split(os.path.sep)[-2:])\n return obj",
"def open_local(cls, filename, stage=None, for_env=False):\n module_dir = os.path.dirname(os.path.abspath(__file__))\n src_root_dir = os.path.abspath(os.path.join(module_dir, '..'))\n config_file_path = os.path.join(src_root_dir, filename)\n\n result = {}\n\n if os.path.isfile(config_file_path):\n print('Loading local configuration from: {0}'.format(config_file_path))\n\n if filename.endswith('.yml'):\n config = yaml.safe_load(cls._read_file(config_file_path))\n else:\n config = json.loads(cls._read_file(config_file_path)).get(stage)\n\n for key, value in config.items():\n parsed_value = value\n\n if str(value).startswith('$ref:'):\n filename = value.replace('$ref:', '')\n parsed_value = cls._read_file(filename)\n\n if for_env:\n if isinstance(parsed_value, list):\n if len(parsed_value) > 0:\n if isinstance(parsed_value[0], dict):\n parsed_value = json.dumps(parsed_value)\n else:\n parsed_value = ','.join(parsed_value)\n else:\n parsed_value = ''\n elif isinstance(parsed_value, dict):\n parsed_value = json.dumps(parsed_value)\n\n result[key] = parsed_value\n else:\n print('Configuration file not found at: {0}'.format(config_file_path))\n\n return result",
"def from_dict(dct, parent):\n result = Environment.__new__(Environment)\n result.namespace = dct.copy()\n result.parent = parent\n return result",
"def load_env(env_package, env_name, **kwargs):\n\n if env_package == 'NULL':\n env = gym.make(env_name)\n elif env_package == 'CUSTOMIZED':\n # The customized env must be gin.configurable\n env = env_name(**kwargs)\n else:\n pkg = getattr(pybullet_envs.bullet, env_package)\n env = getattr(pkg, env_name)(**kwargs)\n if not hasattr(env, '_cam_dist'):\n env._cam_dist = 6\n env._cam_yaw = 0\n env._cam_pitch = -30\n\n # Some pybullet_env do not have close() implemented, add close()\n def close():\n if hasattr(env, '_pybullet_client'):\n env._pybullet_client.resetSimulation()\n del env._pybullet_client\n\n env.close = close\n\n # Some pybullet env do not have seed() implemented, add seed()\n def seed(rand_seed):\n np.random.seed(rand_seed)\n random.seed(rand_seed)\n\n env.seed = seed\n return env",
"def _configure_vm_from_json(test_microvm, vm_config_file):\n test_microvm.create_jailed_resource(test_microvm.kernel_file,\n create_jail=True)\n test_microvm.create_jailed_resource(test_microvm.rootfs_file,\n create_jail=True)\n\n # vm_config_file is the source file that keeps the desired vmm\n # configuration. vm_config_path is the configuration file we\n # create inside the jail, such that it can be accessed by\n # firecracker after it starts.\n vm_config_path = os.path.join(test_microvm.path,\n os.path.basename(vm_config_file))\n with open(vm_config_file) as f1:\n with open(vm_config_path, \"w\") as f2:\n for line in f1:\n f2.write(line)\n test_microvm.create_jailed_resource(vm_config_path, create_jail=True)\n test_microvm.jailer.extra_args = {'config-file': os.path.basename(\n vm_config_file)}"
]
| [
"0.62821966",
"0.6093502",
"0.58737135",
"0.57207036",
"0.56639695",
"0.55425805",
"0.54886854",
"0.54484504",
"0.5266997",
"0.5259031",
"0.5169255",
"0.5167175",
"0.51394403",
"0.51284474",
"0.5121911",
"0.51147294",
"0.5093304",
"0.5088686",
"0.508787",
"0.5087083",
"0.50832677",
"0.50811285",
"0.508087",
"0.5077218",
"0.5065882",
"0.50421816",
"0.5039899",
"0.50321674",
"0.5030244",
"0.5006185"
]
| 0.790252 | 0 |
Creates a dynamic environment based on the contents of the given json_object and validates againts a json_schema object if given any. If 'command_prefixes' is available it adds it to the environment prepending 'source' to the given paths. fabfile.py from fabric.api import task from fabutils.env import set_env_from_json | def set_env_from_json(json_object, json_schema=None):
if json_schema:
validate(json_object, json_schema)
environment = json_object
# Prepend the command "source" to each one of the commands defined in the
# command_prefixes property of the json file.
prefixes = environment.pop('command_prefixes', [])
sourced_prefixes = map(lambda p: 'source %s' % p, prefixes)
env.update(environment)
env.command_prefixes += sourced_prefixes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_env_from_json_file(json_path, property_name=None, schema_path=None):\n if schema_path: # if schema_path is provided\n with open(schema_path, 'r') as schema_file:\n json_schema = json.load(schema_file)\n else:\n json_schema = None\n\n with open(json_path, 'r') as data:\n\n if property_name:\n try:\n set_env_from_json(json.load(data)[property_name], json_schema)\n\n except KeyError:\n raise EnvironmentNotDefinedError(\n \"The property_name '{0}' is not defined in file '{1}'\".format(\n property_name, json_path\n )\n )\n else:\n set_env_from_json(json.load(data), json_schema)",
"def from_json(cls, json_str: str) -> UpdateEnvironmentVariableRequest:\n return cls.from_dict(json.loads(json_str))",
"def _sdk_env(self, sdk_dir, target_arch):\n env = {}\n env_prefixes = {}\n\n if target_arch not in ('x86', 'x64', 'arm64'):\n raise ValueError('unknown architecture {!r}'.format(target_arch))\n\n data = self.m.step('read SetEnv json', [\n 'python3',\n self.resource('find_env_json.py'),\n '--sdk_root',\n sdk_dir,\n '--target_arch',\n target_arch,\n '--output_json',\n self.m.json.output(),\n ],\n step_test_data=lambda: self.m.json.test_api.output({\n 'env': {\n 'PATH': [['..', '..', 'win_sdk', 'bin', 'x64']],\n 'VSINSTALLDIR': [['..', '..\\\\']],\n },\n })).json.output.get('env')\n for key in data:\n # SDK cipd packages prior to 10.0.19041.0 contain entries like:\n # \"INCLUDE\": [[\"..\",\"..\",\"win_sdk\",\"Include\",\"10.0.17134.0\",\"um\"], and\n # recipes' Path() does not like .., ., \\, or /, so this is cumbersome.\n # What we want to do is:\n # [sdk_bin_dir.join(*e) for e in env[k]]\n # Instead do that badly, and rely (but verify) on the fact that the paths\n # are all specified relative to the root, but specified relative to\n # win_sdk/bin (i.e. everything starts with \"../../\".)\n #\n # For 10.0.19041.0 and later, the cipd SDK package json is like:\n # \"INCLUDE\": [[\"Windows Kits\",\"10\",\"Include\",\"10.0.19041.0\",\"um\"], so\n # we simply join paths there.\n results = []\n for value in data[key]:\n if value[0] == '..' and (value[1] == '..' or value[1] == '..\\\\'):\n results.append('%s' % sdk_dir.join(*value[2:]))\n else:\n results.append('%s' % sdk_dir.join(*value))\n\n # PATH is special-cased because we don't want to overwrite other things\n # like C:\\Windows\\System32. Others are replacements because prepending\n # doesn't necessarily makes sense, like VSINSTALLDIR.\n if key.lower() == 'path':\n env_prefixes[key] = results\n else:\n env[key] = ';'.join(results)\n\n return {'env': env, 'env_prefixes': env_prefixes}",
"def create_from_json(cls, config_json: str) -> 'ResolverOp':\n return cls.create(**json_utils.loads(config_json))",
"def from_dict(dct, parent):\n result = Environment.__new__(Environment)\n result.namespace = dct.copy()\n result.parent = parent\n return result",
"def _create_environment(config):\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env",
"def __init__(self, environment_variables=None, var_prefix='PLATFORM_'):\n\n self._environmentVariables = os.environ if environment_variables is None else environment_variables\n self._varPrefix = var_prefix\n\n if self['ROUTES']:\n routes = self['ROUTES']\n self._routesDef = self.decode(routes)\n if self['RELATIONSHIPS']:\n relationships = self['RELATIONSHIPS']\n self._relationshipsDef = self.decode(relationships)\n self.register_formatter('pymongo', pymongo_formatter)\n self.register_formatter('pysolr', pysolr_formatter)\n self.register_formatter('postgresql_dsn', posgresql_dsn_formatter)\n\n if self['VARIABLES']:\n variables = self['VARIABLES']\n self._variablesDef = self.decode(variables)\n if self['APPLICATION']:\n application = self['APPLICATION']\n self._applicationDef = self.decode(application)",
"def env(parser, args):\n action = subcommand_functions[args.env_command]\n action(args)",
"def _build_env(target, *, orig=os.environ):\n overlay = dict(\n PYTHONPATH=_path_insert(orig.get('PYTHONPATH', ''), os.fspath(target)),\n PATH=_path_insert(orig.get('PATH', ''), os.fspath(target / 'bin')),\n )\n return {**orig, **overlay}",
"def wrapper_environment(args):\n\n return {\n ENVIRONMENT_KEY: json.dumps({\n 'verbose': args.verbose,\n 'cc': shlex.split(args.cc),\n 'cxx': shlex.split(args.cxx)\n })\n }",
"def _build_environment(envdict):\n lines = []\n for k, v in envdict.iteritems():\n if \" \" in v: # NOTE: per the spec, one might want to handle all 'whitespace' chars.\n v = v.replace(\"'\", \"''\")\n v = \"'%s'\" % v\n v = v.replace('\"', '\"\"')\n lines.append('%s=%s' % (k, v))\n return '\"%s\"' % ' '.join(lines)",
"def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):\n env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)\n # Set environment variables needed for inventory and ad hoc event\n # callbacks to work.\n env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)\n env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)\n env['INVENTORY_HOSTVARS'] = str(True)\n env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'\n env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'\n\n return env",
"def update_from_env(self):\n for key, value in os.environ.items():\n if not key.startswith(self._prefix):\n continue\n\n setting = key[len(self._prefix):]\n if setting not in self._default_settings:\n continue\n\n setting_value = getattr(self, setting)\n if isinstance(setting_value, bool):\n value = (value == 'True')\n elif isinstance(setting_value, (int, float)):\n value = type(setting_value)(value)\n elif isinstance(setting_value, (list, dict)):\n value = json.loads(value)\n\n setattr(self, setting, value)\n self._explicit_settings.add(setting)",
"def _handle_env_variables(envdict: Optional[dict[str, Any]] = None) -> None:\n envdict = cast(\n dict[str, Any], envdict if envdict is not None else os.environ)\n if \"S3_ENDPOINT_URL\" not in envdict:\n return\n endpoint_url = envdict[\"S3_ENDPOINT_URL\"]\n\n from fsspec.config import conf # pylint: disable=import-outside-toplevel\n conf[\"s3\"] = conf.get(\"s3\", {})\n conf[\"s3\"][\"client_kwargs\"] = conf[\"s3\"].get(\"client_kwargs\", {})\n client_kwargs = conf[\"s3\"][\"client_kwargs\"]\n if \"endpoint_url\" not in client_kwargs:\n client_kwargs[\"endpoint_url\"] = endpoint_url",
"def env_create_setup_parser(subparser):\n subparser.add_argument(\"create_env\", metavar=\"env\", help=\"name of environment to create\")\n subparser.add_argument(\n \"-d\", \"--dir\", action=\"store_true\", help=\"create an environment in a specific directory\"\n )\n subparser.add_argument(\n \"--keep-relative\",\n action=\"store_true\",\n help=\"copy relative develop paths verbatim into the new environment\"\n \" when initializing from envfile\",\n )\n view_opts = subparser.add_mutually_exclusive_group()\n view_opts.add_argument(\n \"--without-view\", action=\"store_true\", help=\"do not maintain a view for this environment\"\n )\n view_opts.add_argument(\n \"--with-view\",\n help=\"specify that this environment should maintain a view at the\"\n \" specified path (by default the view is maintained in the\"\n \" environment directory)\",\n )\n subparser.add_argument(\n \"envfile\",\n nargs=\"?\",\n default=None,\n help=\"optional init file; can be spack.yaml or spack.lock\",\n )",
"def create_venv(obj, venv_or_script: str,\n install_params: Iterable[str],\n clean: bool, update: bool) -> None:\n if not isinstance(obj, VenvConfig): # pragma: no cover\n raise TypeError(\"ctx.obj must be a VEnvConfig\")\n obj.create(venv_or_script, *install_params, clean=clean, update=update)",
"def load_json_and_arguments(config):\n account_id = config['account_sid']\n auth_token = config['auth_token']\n from_phone_number = config['from_phone_number']\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--p\", \"-phone\", type=str, help=\"A non-default phone-number to send an alert to\")\n parser.add_argument(\"command\", type=str, nargs=1)\n args = parser.parse_args()\n\n arg_command = args.command[0]\n arg_phone = args.p\n\n if arg_phone:\n to_phone_number = arg_phone\n else:\n to_phone_number = config['to_phone_number']\n\n if not is_valid_phone_number(to_phone_number):\n raise ValueError(\"Invalid to_phone_number in configuration file\")\n\n if not is_valid_phone_number(from_phone_number):\n raise ValueError(\"Invalid from_phone_number in configuration file\")\n\n execute_and_wait(account_id, auth_token, to_phone_number, from_phone_number, arg_command)",
"def from_dict(cls, obj: dict) -> UpdateEnvironmentVariableRequest:\n if obj is None:\n return None\n\n if not isinstance(obj, dict):\n return UpdateEnvironmentVariableRequest.parse_obj(obj)\n\n _obj = UpdateEnvironmentVariableRequest.parse_obj({\n \"value\": obj.get(\"value\")\n })\n return _obj",
"def setenv(args: Namespace) -> None:\n env = {}\n if not args.no_langkit_support:\n env = langkit_support_env_map(args)\n\n for cwd in selected_lib_roots(args):\n d = json.loads(subprocess.check_output(\n [sys.executable,\n \"./manage.py\",\n \"setenv\",\n f\"--build-mode={args.build_mode}\",\n \"-J\"],\n cwd=cwd\n ))\n\n for k, v in d.items():\n if k in env:\n env[k] = format_path(k, [env[k], v])\n else:\n env[k] = v\n\n if args.json:\n print(json.dumps(env))\n else:\n for k, v in env.items():\n print(format_setenv(k, v))",
"def initFromFile(self):\n\n bootFilename = os.path.join(os.environ['CRAB3_BOOTSTRAP_DIR'], BOOTSTRAP_ENVFILE)\n if not os.path.isfile(bootFilename):\n msg = \"The CRAB3_BOOTSTRAP_DIR environment variable is set, but I could not find %s\" % bootFilename\n raise EnvironmentException(msg)\n else:\n with open(bootFilename) as fd:\n self.update(json.load(fd))",
"def _get_input_from_env():\n try:\n params = {\n 'repo': {\n 'owner': os.environ['DRONE_REPO_OWNER'],\n 'name': os.environ['DRONE_REPO_NAME'],\n 'full_name': os.environ['DRONE_REPO'],\n 'link_url': os.environ['DRONE_REPO_LINK'],\n 'clone_url': os.environ['DRONE_REMOTE_URL']\n },\n 'build': {\n 'number': os.environ['DRONE_BUILD_NUMBER'],\n 'event': os.environ['DRONE_BUILD_EVENT'],\n 'branch': os.environ['DRONE_BRANCH'],\n 'commit': os.environ['DRONE_COMMIT'],\n 'ref': os.environ['DRONE_COMMIT_REF'],\n 'author': os.environ['DRONE_COMMIT_AUTHOR'],\n 'author_email': os.environ['DRONE_COMMIT_AUTHOR_EMAIL']\n },\n 'workspace': {\n 'root': os.environ['DRONE_WORKSPACE'],\n 'path': os.environ['DRONE_WORKSPACE']\n },\n 'vargs': {\n key[7:].lower(): value\n for key, value in os.environ.items()\n if key.startswith('PLUGIN_')\n }\n }\n except KeyError:\n raise ValueError(\n \"Envronment variables were misconfigured.\")\n return json.dumps(params)",
"def create_environment(cls, full_config):\n\n config = full_config['template']['devops_settings']\n environment = cls.create(config['env_name'])\n\n # create groups and drivers\n groups = config['groups']\n environment.add_groups(groups)\n\n # create address pools\n address_pools = config['address_pools']\n environment.add_address_pools(address_pools)\n\n # process group items\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add l2_network_devices\n group.add_l2_network_devices(\n group_data.get('l2_network_devices', {}))\n\n # add network_pools\n group.add_network_pools(\n group_data.get('network_pools', {}))\n\n # Connect nodes to already created networks\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add group volumes\n group.add_volumes(\n group_data.get('group_volumes', []))\n\n # add nodes\n group.add_nodes(\n group_data.get('nodes', []))\n\n return environment",
"def setup_schema(command, conf, vars):",
"def __init__(self, environment):\n with open('config.json') as f:\n self.config = eval(f.read())\n self.config = self.config[environment]",
"def _prepare_fullvm_restore_json(self, restore_option=None):\r\n\r\n if restore_option is None:\r\n restore_option = {}\r\n restore_option['paths'] = []\r\n\r\n if \"destination_vendor\" not in restore_option:\r\n restore_option[\"destination_vendor\"] = \\\r\n self._backupset_object._instance_object._vendor_id\r\n\r\n if restore_option['copy_precedence']:\r\n restore_option['copy_precedence_applicable'] = True\r\n\r\n # set all the restore defaults\r\n self._set_restore_defaults(restore_option)\r\n\r\n # set the setters\r\n self._backupset_object._instance_object._restore_association = self._subClientEntity\r\n self._json_restore_virtualServerRstOption(restore_option)\r\n self._json_restore_diskLevelVMRestoreOption(restore_option)\r\n self._json_vcenter_instance(restore_option)\r\n\r\n for _each_vm_to_restore in restore_option['vm_to_restore']:\r\n if not restore_option[\"in_place\"]:\r\n if (\"restore_new_name\" in restore_option and\r\n restore_option[\"restore_new_name\"] is not None):\r\n restore_option[\"new_name\"] = restore_option[\"restore_new_name\"] + _each_vm_to_restore\r\n else:\r\n restore_option[\"new_name\"] = \"Delete\" + _each_vm_to_restore\r\n else:\r\n restore_option[\"new_name\"] = _each_vm_to_restore\r\n self.set_advanced_vm_restore_options(_each_vm_to_restore, restore_option)\r\n\r\n # prepare json\r\n request_json = self._restore_json(restore_option=restore_option)\r\n self._virtualserver_option_restore_json[\"diskLevelVMRestoreOption\"][\"advancedRestoreOptions\"] = self._advanced_restore_option_list\r\n self._advanced_restore_option_list = []\r\n request_json[\"taskInfo\"][\"subTasks\"][0][\"options\"][\"restoreOptions\"][\"virtualServerRstOption\"] = self._virtualserver_option_restore_json\r\n request_json[\"taskInfo\"][\"subTasks\"][0][\"options\"][\"restoreOptions\"][\"volumeRstOption\"] = self._json_restore_volumeRstOption(\r\n restore_option)\r\n\r\n return request_json",
"def from_json(cls, json_request):\n kwargs = {}\n request_scope = json_request['scope']\n request_scope_context = request_scope['typeName']\n\n if request_scope_context == 'courseContext':\n kwargs['course_id'] = request_scope['definition']['courseId']\n elif request_scope_context == 'partnerContext':\n kwargs['partner_id'] = \\\n request_scope['definition']['partnerId']['maestroId']\n elif request_scope_context == 'groupContext':\n kwargs['group_id'] = request_scope['definition']['groupId']\n\n if json_request.get('interval'):\n kwargs['interval'] = [\n json_request['interval']['start'],\n json_request['interval']['end']\n ]\n\n return cls(\n export_type=json_request.get('exportType'),\n anonymity_level=json_request.get('anonymityLevel'),\n statement_of_purpose=json_request.get('statementOfPurpose'),\n schema_names=json_request.get('schemaNames'),\n ignore_existing=json_request.get('ignoreExisting'),\n **kwargs)",
"def valid_target_obj(target_obj, require_compile=True):\n\n schema = {\n \"type\": \"object\",\n \"properties\": {\n \"vars\": {\"type\": \"object\"},\n \"secrets\": {\n \"type\": \"object\",\n \"properties\": {\n \"gpg\": {\n \"type\": \"object\",\n \"properties\": {\n \"recipients\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"fingerprint\": {\"type\": \"string\"},\n },\n },\n },\n },\n \"required\": [\"recipients\"],\n },\n \"gkms\": {\n \"type\": \"object\",\n \"properties\": {\"key\": {\"type\": \"string\"}},\n \"required\": [\"key\"],\n },\n \"awskms\": {\n \"type\": \"object\",\n \"properties\": {\"key\": {\"type\": \"string\"}},\n \"required\": [\"key\"],\n },\n \"azkms\": {\n \"type\": \"object\",\n \"properties\": {\"key\": {\"type\": \"string\"}},\n \"required\": [\"key\"],\n },\n \"vaultkv\": {\n \"type\": \"object\",\n \"properties\": {\n \"VAULT_ADDR\": {\"type\": \"string\"},\n \"VAULT_NAMESPACE\": {\"type\": \"string\"},\n \"VAULT_SKIP_VERIFY\": {\"type\": \"string\"},\n \"VAULT_CLIENT_KEY\": {\"type\": \"string\"},\n \"VAULT_CLIENT_CERT\": {\"type\": \"string\"},\n \"auth\": {\"enum\": [\"token\", \"userpass\", \"ldap\", \"github\", \"approle\"]},\n \"engine\": {\"type\": \"string\"},\n \"mount\": {\"type\": \"string\"},\n },\n },\n \"vaulttransit\": {\n \"type\": \"object\",\n \"properties\": {\n \"VAULT_ADDR\": {\"type\": \"string\"},\n \"VAULT_NAMESPACE\": {\"type\": \"string\"},\n \"VAULT_SKIP_VERIFY\": {\"type\": \"string\"},\n \"VAULT_CLIENT_KEY\": {\"type\": \"string\"},\n \"VAULT_CLIENT_CERT\": {\"type\": \"string\"},\n \"auth\": {\"enum\": [\"token\", \"userpass\", \"ldap\", \"github\", \"approle\"]},\n \"engine\": {\"type\": \"string\"},\n \"mount\": {\"type\": \"string\"},\n \"key\": {\"type\": \"string\"},\n },\n },\n },\n \"additionalProperties\": False,\n },\n \"compile\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"input_paths\": {\"type\": \"array\"},\n \"input_type\": {\"type\": \"string\"},\n \"output_path\": {\"type\": \"string\"},\n \"output_type\": {\"type\": \"string\"},\n \"helm_values\": {\"type\": \"object\"},\n \"helm_values_files\": {\"type\": \"array\"},\n \"helm_params\": {\n \"type\": \"object\",\n \"properties\": {\"name\": {\"type\": \"string\"}},\n \"additionalProperties\": True,\n },\n \"input_params\": {\"type\": \"object\"},\n \"env_vars\": {\"type\": \"object\"},\n \"args\": {\"type\": \"array\"},\n \"suffix_remove\": {\"type\": \"boolean\"},\n \"suffix_stripped\": {\"type\": \"string\"},\n },\n \"required\": [\"input_type\", \"input_paths\", \"output_path\"],\n \"minItems\": 1,\n \"oneOf\": [\n {\n \"properties\": {\n \"input_type\": {\"enum\": [\"jsonnet\", \"kadet\", \"copy\", \"remove\"]},\n \"output_type\": {\"enum\": [\"yml\", \"yaml\", \"json\", \"plain\", \"toml\"]},\n },\n },\n {\"properties\": {\"input_type\": {\"enum\": [\"jinja2\", \"helm\", \"external\"]}}},\n ],\n },\n },\n \"validate\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"output_paths\": {\"type\": \"array\"},\n \"type\": {\"type\": \"string\", \"enum\": [\"kubernetes\"]},\n \"kind\": {\"type\": \"string\"},\n \"version\": {\"type\": \"string\"},\n },\n \"required\": [\"output_paths\", \"type\"],\n \"minItems\": 1,\n \"allOf\": [\n {\n \"if\": {\"properties\": {\"type\": {\"const\": \"kubernetes\"}}},\n \"then\": {\n \"properties\": {\n \"type\": {},\n \"kind\": {},\n \"output_paths\": {},\n \"version\": {},\n },\n \"additionalProperties\": False,\n \"required\": [\"kind\"],\n },\n },\n ],\n },\n },\n \"dependencies\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"chart_name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\", \"enum\": [\"git\", \"http\", \"https\", \"helm\"]},\n \"output_path\": {\"type\": \"string\"},\n \"source\": {\"type\": \"string\"},\n \"subdir\": {\"type\": \"string\"},\n \"ref\": {\"type\": \"string\"},\n \"unpack\": {\"type\": \"boolean\"},\n \"version\": {\"type\": \"string\"},\n \"force_fetch\": {\"type\": \"boolean\"},\n \"submodules\": {\"type\": \"boolean\"},\n },\n \"required\": [\"type\", \"output_path\", \"source\"],\n \"additionalProperties\": False,\n \"allOf\": [\n {\n \"if\": {\"properties\": {\"type\": {\"enum\": [\"http\", \"https\"]}}},\n \"then\": {\n \"properties\": {\n \"type\": {},\n \"source\": {\"format\": \"uri\"},\n \"output_path\": {},\n \"unpack\": {},\n \"force_fetch\": {},\n },\n \"additionalProperties\": False,\n },\n },\n {\n \"if\": {\"properties\": {\"type\": {\"enum\": [\"helm\"]}}},\n \"then\": {\n \"properties\": {\n \"type\": {},\n \"source\": {\"format\": \"uri\"},\n \"output_path\": {},\n \"unpack\": {},\n \"chart_name\": {\"type\": \"string\"},\n \"version\": {\"type\": \"string\"},\n \"force_fetch\": {},\n },\n \"required\": [\"type\", \"output_path\", \"source\", \"chart_name\"],\n \"additionalProperties\": False,\n },\n },\n ],\n },\n },\n \"inventory\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\"type\": \"string\", \"enum\": [\"git\", \"http\", \"https\"]},\n \"output_path\": {\"type\": \"string\"},\n \"source\": {\"type\": \"string\"},\n \"subdir\": {\"type\": \"string\"},\n \"ref\": {\"type\": \"string\"},\n \"unpack\": {\"type\": \"boolean\"},\n },\n \"required\": [\"type\", \"output_path\", \"source\"],\n \"additionalProperties\": False,\n \"allOf\": [\n {\n \"if\": {\"properties\": {\"type\": {\"enum\": [\"http\", \"https\"]}}},\n \"then\": {\n \"properties\": {\n \"type\": {},\n \"source\": {\"format\": \"uri\"},\n \"output_path\": {},\n \"unpack\": {},\n },\n \"additionalProperties\": False,\n },\n },\n ],\n },\n },\n },\n }\n if require_compile:\n schema[\"required\"] = [\"compile\"]\n\n try:\n jsonschema.validate(target_obj, schema, format_checker=jsonschema.FormatChecker())\n except jsonschema.exceptions.ValidationError as e:\n raise InventoryError(\n \"Invalid inventory structure\\n\\nError: {}\\nOn instance:\\n{}\".format(\n e.message, json.dumps(e.instance, indent=2, sort_keys=False)\n )\n )",
"def load_envfile(instance):\n validate(ENVFILE_SCHEMA, instance)\n semantic_validate(instance)\n\n # At the moment the object model is mostly 1-to-1 with the configuration\n # format. In the future that might change; the idea is for the object model\n # to be an abstraction rather than exactly the same as config format, so\n # e.g. same object model might support two different versions of the config\n # format.\n\n # We do however make some minor changes.\n instance = freeze(instance)\n\n # 1. Drop unneeded fields:\n instance = instance.remove(\"Envfile-version\")\n instance = instance.transform([\"local\", \"templates\", match_any, \"type\"],\n discard)\n\n # 2. Some objects want to know their own name:\n def add_name(mapping):\n # Convert {a: {x: 1}} to {a: {name: a, x: 1}}:\n for key, value in mapping.items():\n mapping = mapping.set(key, value.set(\"name\", key))\n return mapping\n\n instance = instance.transform([\"local\", \"templates\"], add_name)\n instance = instance.transform([\"application\", \"requires\"], add_name)\n instance = instance.transform([\"application\", \"services\"], add_name)\n instance = instance.transform(\n [\"application\", \"services\", match_any, \"requires\"], add_name)\n\n return System.create(instance)",
"def dump(\n template: str = EMPTY_STRING,\n prefixes: Optional[List[str]] = None,\n strict_keys: Optional[Set[str]] = None,\n source: str = EMPTY_STRING,\n strict_source: bool = False,\n) -> Dict[str, str]:\n if prefixes is None:\n prefixes = [] if source else [EMPTY_STRING]\n\n if strict_keys:\n _assert_envs_exist(strict_keys)\n\n store: Dict[str, str] = {}\n\n if source:\n # Loading env values from source template file:\n store.update(_source(source, strict_source))\n\n if template:\n # Loading env values from template file:\n store.update(_parse(template))\n\n # Loading env variables from `os.environ`:\n for prefix in prefixes:\n store.update(_preload_existing_vars(prefix))\n\n # Sort keys and keep them ordered:\n return OrderedDict(sorted(store.items()))",
"def env_activate_setup_parser(subparser):\n shells = subparser.add_mutually_exclusive_group()\n shells.add_argument(\n \"--sh\",\n action=\"store_const\",\n dest=\"shell\",\n const=\"sh\",\n help=\"print sh commands to activate the environment\",\n )\n shells.add_argument(\n \"--csh\",\n action=\"store_const\",\n dest=\"shell\",\n const=\"csh\",\n help=\"print csh commands to activate the environment\",\n )\n shells.add_argument(\n \"--fish\",\n action=\"store_const\",\n dest=\"shell\",\n const=\"fish\",\n help=\"print fish commands to activate the environment\",\n )\n shells.add_argument(\n \"--bat\",\n action=\"store_const\",\n dest=\"shell\",\n const=\"bat\",\n help=\"print bat commands to activate the environment\",\n )\n\n view_options = subparser.add_mutually_exclusive_group()\n view_options.add_argument(\n \"-v\",\n \"--with-view\",\n action=\"store_const\",\n dest=\"with_view\",\n const=True,\n default=True,\n help=\"update PATH etc. with associated view\",\n )\n view_options.add_argument(\n \"-V\",\n \"--without-view\",\n action=\"store_const\",\n dest=\"with_view\",\n const=False,\n default=True,\n help=\"do not update PATH etc. with associated view\",\n )\n\n subparser.add_argument(\n \"-p\",\n \"--prompt\",\n action=\"store_true\",\n default=False,\n help=\"decorate the command line prompt when activating\",\n )\n\n env_options = subparser.add_mutually_exclusive_group()\n env_options.add_argument(\n \"--temp\",\n action=\"store_true\",\n default=False,\n help=\"create and activate an environment in a temporary directory\",\n )\n env_options.add_argument(\n \"-d\", \"--dir\", default=None, help=\"activate the environment in this directory\"\n )\n env_options.add_argument(\n metavar=\"env\",\n dest=\"activate_env\",\n nargs=\"?\",\n default=None,\n help=\"name of environment to activate\",\n )"
]
| [
"0.5949223",
"0.55071974",
"0.50784063",
"0.5027287",
"0.48285085",
"0.4828165",
"0.48247337",
"0.4818228",
"0.47885552",
"0.47781834",
"0.46954954",
"0.46735176",
"0.46518135",
"0.46491793",
"0.46348986",
"0.4574233",
"0.45632145",
"0.45630446",
"0.45593148",
"0.45418176",
"0.45370033",
"0.4515989",
"0.45056343",
"0.45030397",
"0.44796756",
"0.44679415",
"0.44437772",
"0.44385168",
"0.44299424",
"0.44286883"
]
| 0.82398796 | 0 |
The POST on `/institution` should create an institution | def test_create(self, mock_decorator):
response = self.client.post(
'/api/bce_institutions/0802145Y',
content_type='application/json',
headers={'Authorization': 'Bearer token'},
data=json.dumps({
'is_institution': True
}))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.data.decode('utf8'))
self.assertEqual(
response_json,
{'institution': {'uai': '0802145Y', 'is_institution': True}}
)
self.assertEqual(BceInstitution.query.count(), 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(uai, is_institution=True):\n institution = BceInstitution(\n uai=uai,\n is_institution=is_institution)\n\n return institution.save()",
"def test_perform_create(self):\n\n response = self.client.post(reverse('action-list'), data=self.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['name'], self.data['name'])\n self.assertTrue(len(response.data['institution']), self.data['institution'])",
"def KLP_Institution_Management_Create(request):\n\tbuttonType = request.POST.get('form-buttonType')\n #before Institution_Mangement.objects.all()\n\tKLP_Institution_Management_Create = KLP_Institution_Management(queryset = Institution_Management.objects.filter(pk=0), permitted_methods = ('GET', 'POST'), responder = TemplateResponder(template_dir = 'viewtemplates', template_object_name = 'InstitutionManagement',extra_context={'buttonType':buttonType}), receiver = XMLReceiver(),)\n\tresponse = KLP_Institution_Management_Create.responder.create_form(request,form_class=Institution_Management_Form)\n\t\n\treturn HttpResponse(response)",
"def institutions_post(session, user_inst): # pylint:disable=unused-argument, too-many-branches\n name = request.headers.get('name')\n webpage = request.headers.get('webpage')\n address = request.headers.get('address')\n username = request.headers.get('username')\n publickey = request.headers.get('publickey')\n description = request.headers.get('description')\n short = request.headers.get('short')\n latitude = request.headers.get('latitude')\n longitude = request.headers.get('longitude')\n\n if not user_inst.group == \"support\":\n return jsonify({'error': 'Forbidden'}), 403\n if None in [name, address, latitude, longitude, publickey]:\n return jsonify({'error': 'Missing parameter'}), 400\n\n try:\n # pylint: disable=unbalanced-tuple-unpacking\n latitude, longitude = check_params_float([latitude, longitude])\n check_name_length(name, 256)\n except ValueError:\n return jsonify({\"error\": \"bad argument\"}), 400\n\n if description:\n try:\n description = b64decode(description).decode(\"latin-1\")\n except (TypeError, binascii.Error):\n return jsonify({\"error\": \"bad base64 encoding\"}), 400\n\n if short:\n try:\n short = b64decode(short).decode(\"latin-1\")\n except (TypeError, binascii.Error):\n return jsonify({\"error\": \"bad base64 encoding\"}), 400\n\n if webpage is not None and not validators.url(webpage):\n return jsonify({'error': 'webpage is not a valid url'}), 400\n\n owner_inst: User = session.query(User).filter(User.usernameUser == username).one_or_none()\n if owner_inst is None:\n return jsonify({'error': 'username not found'}), 400\n\n # check if name is already taken\n if session.query(Institution).filter(Institution.nameInstitution == name).first():\n return jsonify({'error': 'name already exists'}), 400\n\n try:\n vouch_check = voucher_constructor_check(publickey)\n if vouch_check:\n return jsonify({'error': 'milestone error: ' + vouch_check}), 400\n\n sc_address = voucher_constructor(publickey)\n\n session.add(\n Institution(\n nameInstitution=name,\n webpageInstitution=webpage,\n addressInstitution=address,\n publickeyInstitution=publickey,\n descriptionInstitution=description,\n latitude=latitude,\n longitude=longitude,\n scAddress=sc_address,\n user=owner_inst,\n shortDescription=short\n ))\n session.commit()\n return jsonify({'status': 'Institution wurde erstellt'}), 201\n finally:\n session.rollback()\n session.close()",
"def institutions_patch(session, user_inst): # pylint:disable=too-many-branches\n institution_id = request.headers.get('id')\n name = request.headers.get('name')\n webpage = request.headers.get('webpage')\n address = request.headers.get('address')\n description = request.headers.get('description')\n short = request.headers.get('short')\n latitude = request.headers.get('latitude')\n longitude = request.headers.get('longitude')\n\n if institution_id is None:\n return jsonify({'error': 'Missing parameter'}), 400\n\n try:\n check_params_int([institution_id])\n check_params_float([latitude, longitude])\n except ValueError:\n return jsonify({\"error\": \"bad argument\"}), 400\n if None in [latitude, longitude] and any([latitude, longitude]):\n return jsonify({\"error\": \"bad geo argument\"}), 400\n\n if webpage is not None and not validators.url(webpage):\n return jsonify({'error': 'webpage is not a valid url'}), 400\n\n try:\n if name: # check if name is already taken\n if session.query(Institution).filter(Institution.nameInstitution == name).one_or_none():\n return jsonify({'error': 'name already exists'}), 400\n\n institution = session.query(Institution).get(institution_id)\n if institution is None:\n return jsonify({'error': 'Institution does not exist'}), 404\n\n # check user permission\n owner = session.query(Institution)\n owner = owner.filter(Institution.user == user_inst, Institution.idInstitution == institution_id).one_or_none()\n\n if owner is None:\n return jsonify({'error': 'no permission'}), 403\n\n if name:\n if len(name) > 256:\n return jsonify({\"error\": \"bad name argument\"}), 400\n institution.nameInstitution = name\n if address:\n institution.addressInstitution = address\n if webpage:\n institution.webpageInstitution = webpage\n if description:\n try:\n description = b64decode(description).decode(\"latin-1\")\n except (TypeError, binascii.Error):\n return jsonify({\"error\": \"bad base64 encoding\"}), 400\n institution.descriptionInstitution = description\n if short:\n try:\n short = b64decode(short).decode(\"latin-1\")\n except (TypeError, binascii.Error):\n return jsonify({\"error\": \"bad base64 encoding\"}), 400\n institution.shortDescription = short\n if latitude and longitude:\n institution.latitude = latitude\n institution.longitude = longitude\n\n session.commit()\n return jsonify({'status': 'Institution wurde bearbeitet'}), 201\n finally:\n session.rollback()\n session.close()",
"def institution(institution):\n\n try:\n logging.info(f\"Process a request for an institution resource\\nurl: {request.url}\")\n\n params = dict({\"institution_id\": institution})\n logging.info(f\"Parameters: {params}\")\n\n if not valid_institution_params(params):\n logging.error(f\"valid_institution_params returned false for {params}\")\n return Response(\n get_http_error_response_json(\n \"Bad Request\", \"Parameter Error\", \"Invalid parameter passed\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=400,\n )\n\n inst_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_inst_collection_id)\n dataset_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_dataset_collection_id)\n\n # Initialise dataset helper - used for retrieving latest dataset version\n dsh = DataSetHelper(client, dataset_collection_link)\n version = dsh.get_highest_successful_version_number()\n\n # Intialise an InstitutionFetcher\n institution_fetcher = InstitutionFetcher(client, inst_collection_link)\n institution = institution_fetcher.get_institution(version=version, **params)\n\n if institution:\n logging.info(f\"Found an institution {institution}\")\n \n return Response(\n institution,\n headers={\"Content-Type\": \"application/json\"},\n status=200,\n )\n\n return Response(\n get_http_error_response_json(\n \"Not Found\", \"institution\", \"Institution was not found.\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=404,\n )\n\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # Raise so Azure sends back the HTTP 500\n raise e",
"def add_institute(self, institute_obj):\n internal_id = institute_obj[\"internal_id\"]\n display_name = institute_obj[\"display_name\"]\n\n # Check if institute already exists\n if self.institute(institute_id=internal_id):\n raise IntegrityError(\"Institute {0} already exists in database\".format(display_name))\n\n LOG.info(\n \"Adding institute with internal_id: {0} and \"\n \"display_name: {1}\".format(internal_id, display_name)\n )\n\n insert_info = self.institute_collection.insert_one(institute_obj)\n ##TODO check if insert info was ok\n LOG.info(\"Institute saved\")",
"def post(self):\n site = Site(\n site_address=request.json['address'],\n site_city=request.json['city'],\n site_zip_code=request.json['zip_code'],\n site_country=request.json['country']\n )\n db.session.add(site)\n try:\n db.session.commit()\n except OperationalError:\n raise InternalServerError(description='Site table does not exists')\n except IntegrityError:\n raise Conflict(description=site.__repr__() + ' already exists')\n return {'message': 'Resource created'}, 201",
"def post(self):\n try:\n # Create the organization\n if request.headers['Content-Type'] == \"application/json\":\n payload = request.get_json(silent=True)\n elif request.form:\n payload = request.data.to_dict()\n else:\n payload = request.get_json(force=True)\n organization = Organization(**payload)\n organization.save()\n response = organization.serialize()\n return make_response(jsonify(response)), 201\n\n except Exception as e:\n response = {\n \"message\": str(e)\n }\n return make_response(jsonify(response)), 500",
"def test_post(self):\n user = self.make_user()\n school_year = SchoolYearFactory(school__admin=user)\n\n with self.login(user):\n response = self.post(\"reports:bundle\", school_year.pk)\n\n self.response_302(response)\n assert school_year.bundle_set.count() == 1",
"def test_office_creation(self):\n url = '/api/v1/consultorios/'\n data = {\n \"hospital\": \"Angeles Roma\",\n \"office\": \"306\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)",
"def post():\n\n title = request.form[\"title\"]\n description = request.form[\"description\"]\n is_valid = request.form[\"is_valid\"]\n company_id = request.form[\"company_id\"]\n city_id = request.form[\"city_id\"]\n start_date = request.form[\"start_date\"]\n add_date = request.form[\"add_date\"]\n sector = request.form[\"sector\"]\n contract_type_id = request.form[\"contract_type_id\"]\n experience = request.form[\"experience\"]\n formation = request.form[\"formation\"]\n try:\n elements = Advertisements().post( title, description, company_id, city_id, start_date, add_date, sector, contract_type_id, experience, formation)\n result = jsonify(elements)\n result.statut_code = 201\n return result\n except Exception as identifier:\n return abort(500, identifier)",
"def create_candidate(self, data, header):\n return self.client.post(\n path='/api/v2/office/1/register/', data=json.dumps(data), content_type='application/json', headers=header)",
"def post(self):\n data = request.json\n create_ue(data)\n return None, 201",
"def test_creating_new_dietitian(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n dietitian_id = create_new_dietitian_account(form_data)\n\n self.assertEqual(2, dietitian_id)",
"def post(self):\n request_user = User.get_by_id(token_auth.current_user())\n if request_user.role != 1:\n return {\n \"Error\": \"Only admin users can create organisations.\",\n \"SubCode\": \"OnlyAdminAccess\",\n }, 403\n\n try:\n organisation_dto = NewOrganisationDTO(request.get_json())\n if request_user.username not in organisation_dto.managers:\n organisation_dto.managers.append(request_user.username)\n organisation_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": str(e), \"SubCode\": \"InvalidData\"}, 400\n\n try:\n org_id = OrganisationService.create_organisation(organisation_dto)\n return {\"organisationId\": org_id}, 201\n except OrganisationServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400",
"def test_new_create_resgate_successful(self):\n payload = {\n 'value': 500,\n 'user': self.user\n }\n\n response = self.client.post(RESGATE_URL, payload)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def post_student():\n\n first_name = request.form.get('first_name')\n last_name = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first_name, last_name, github)\n\n flash(\"Successfully added new student.\")\n\n return redirect(\"/student?github={}\".format(github))",
"def create_new_banks():\n\n\tcity = request.form.get('bankCity', '')\n\tname = request.form.get('bankName', '')\n\taddress = request.form.get('bankAddress', '')\n\tinfo = dict(city=city, name=name, address=address)\n\t# print(info)\n\tbank = Bank(city, name, address)\n\tres = bank.save()\n\t# print('res=%d' % res)\n\treturn send_result(info, res, status=\"True\")",
"def add_nurse(request):\n if request.POST:\n post = request.POST\n username = post.get(\"username\")\n first_name = post.get(\"first_name\")\n last_name = post.get(\"last_name\")\n email = post.get(\"email\")\n password = post.get(\"password\")\n chosen_hospitals = post.getlist(\"chosen_hospitals\")\n\n new_user = User.objects.create_user(\n username=username,\n password=password,\n first_name=first_name,\n last_name=last_name,\n email=email\n )\n\n new_user_profile = UserProfile.objects.create(\n user=new_user,\n status=UserStatus.objects.get(pk=2)\n )\n\n if new_user:\n for chosen_hospital in chosen_hospitals:\n HospitalStaff.objects.create(user_profile=new_user_profile, hospital=Hospital.objects.get(pk=chosen_hospital))\n\n return redirect('add_nurse')\n\n hospitals = Hospital.objects.all()\n return render(request, 'add_nurse.html', {'hospitals': hospitals})",
"def authenticate(self, request):\n\n try:\n payload = jwt.decode(\n jwe.decrypt(request.body, settings.JWE_SECRET),\n settings.JWT_SECRET,\n options={'verify_exp': False},\n algorithm='HS256',\n )\n except (jwt.InvalidTokenError, TypeError):\n raise AuthenticationFailed\n\n data = json.loads(payload['data'])\n provider = data['provider']\n\n institution = Institution.load(provider['id'])\n if not institution:\n raise AuthenticationFailed('Invalid institution id specified \"{}\"'.format(provider['id']))\n\n username = provider['user'].get('username')\n fullname = provider['user'].get('fullname')\n given_name = provider['user'].get('givenName')\n family_name = provider['user'].get('familyName')\n middle_names = provider['user'].get('middleNames')\n suffix = provider['user'].get('suffix')\n\n # use given name and family name to build full name if not provided\n if given_name and family_name and not fullname:\n fullname = given_name + ' ' + family_name\n\n # institution must provide `fullname`, otherwise we fail the authentication and inform sentry\n if not fullname:\n message = 'Institution login failed: fullname required' \\\n ' for user {} from institution {}'.format(username, provider['id'])\n sentry.log_message(message)\n raise AuthenticationFailed(message)\n\n # `get_or_create_user()` guesses names from fullname\n # replace the guessed ones if the names are provided from the authentication\n user, created = get_or_create_user(fullname, username, reset_password=False)\n if created:\n if given_name:\n user.given_name = given_name\n if family_name:\n user.family_name = family_name\n if middle_names:\n user.middle_names = middle_names\n if suffix:\n user.suffix = suffix\n user.update_date_last_login()\n\n # Relying on front-end validation until `accepted_tos` is added to the JWT payload\n user.accepted_terms_of_service = timezone.now()\n\n # save and register user\n user.save()\n user.register(username)\n\n # send confirmation email\n send_mail(\n to_addr=user.username,\n mail=WELCOME_OSF4I,\n mimetype='html',\n user=user,\n domain=DOMAIN,\n osf_support_email=OSF_SUPPORT_EMAIL,\n storage_flag_is_active=waffle.flag_is_active(request, features.STORAGE_I18N),\n )\n\n if not user.is_affiliated_with_institution(institution):\n user.affiliated_institutions.add(institution)\n user.save()\n\n return user, None",
"def post(project_id):\n try:\n integration = request.get_json()\n integration_id = IntegrationService.create(project_id, integration)\n\n return {\"model_id\": project_id, \"integration_id\": integration_id}, 200\n except Exception:\n current_app.logger.error(traceback.format_exc())\n\n return err(500, \"Failed to save integration source to DB\"), 500",
"def test_adding_new_patient(self):\n\n data = {\"dietitian_id\": 1, \"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n result = self.client.post(\"/patient/new-patient\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"registered new patient\", result.data)\n\n data = {\"dietitian_id\": 1, \"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n result = self.client.post(\"/patient/new-patient\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"email address already exists\", result.data)",
"def test_perform_create(self):\n data = {\n 'name': 'Jane Joe',\n 'crm': 1234,\n 'email': '[email protected]',\n 'phone': '+55998754128'\n }\n response = self.unath_client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def add_student():\n if request.method == 'POST':\n db.add_student(request.form)\n return redirect('/registry')\n else:\n return render_template('add.html')",
"def test_add_foreign_site_failure(self):\n url = reverse(\"organisations:sites\", kwargs={\"org_pk\": self.organisation.id})\n\n data = {\n \"name\": \"regional site\",\n \"address\": {\n \"address\": \"a street\",\n \"country\": \"GB\",\n },\n }\n\n response = self.client.post(url, data, **self.gov_headers)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(Site.objects.filter(organisation=self.organisation).count(), 1)",
"def post(self):\n reg = self.request.get('registry')\n region_name = self.request.get('region')\n if reg and len(reg) > 0 and reg.isalnum() and validate_region(region_name):\n region = get_region_id(region_name)\n # Create Registry on IOT Core\n iot = IOT()\n success, message = iot.create_registry(region,reg)\n if success:\n # Add registry to Datastore\n ds = Datastore()\n status = ds.add_registry(reg, region_name)\n self.response.headers['Content-Type'] = 'text/plain'\n if status:\n self.response.write('Registry Added')\n else:\n self.response.write('Registry already exists')\n else:\n self.response.write(message)\n else:\n self.response.write('invalid parameters: ' + reg + \" \" + region_name )",
"def test_create_risk_profile_using_post(self):\n pass",
"def create(owner):\n data = request_content(request)\n resource = logic.resource.create(owner, data)\n return redirect(url_for('.get', owner=owner, \n resource=resource.name))",
"def test_create_patient(self):\n url = reverse('patient:patient-list')\n data = {\n \"birth_date\": \"1980-05-21\",\n \"patient_name\": \"testpatient2\",\n \"status\": \"A\",\n \"gender\": \"M\",\n \"patient_contact\" : \"+12342134523\"\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Patient.objects.count(), 2)"
]
| [
"0.6681688",
"0.6539136",
"0.64875925",
"0.6482373",
"0.6411193",
"0.6297357",
"0.59500855",
"0.58625144",
"0.578303",
"0.5740273",
"0.56996286",
"0.56490153",
"0.5545814",
"0.55356705",
"0.549819",
"0.5479565",
"0.54430705",
"0.5442307",
"0.54372895",
"0.54182273",
"0.538496",
"0.5361247",
"0.5361126",
"0.5360082",
"0.53376335",
"0.533234",
"0.5332334",
"0.5327288",
"0.53115505",
"0.52977586"
]
| 0.6618002 | 1 |
The PUT on `/institution` should update an institution's status if it is updated from False to True | def test_update_true(self, mock_decorator):
BceInstitutionRepository.create(
uai='0802145Z', is_institution=False)
response = self.client.put(
'/api/bce_institutions/0802145Z',
content_type='application/json',
headers={'Authorization': 'Bearer token'},
data=json.dumps({
'is_institution': True,
'id_esr': 4
})
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.data.decode('utf8'))
self.assertEqual(
response_json,
{'institution':
{'uai': '0802145Z', 'is_institution': True}}
)
institution = BceInstitutionRepository.get(uai='0802145Z')
self.assertEqual(institution.is_institution, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_false_success(self, mock_decorator, mock_request):\n BceInstitutionRepository.create(\n uai='0802145Z', is_institution=True)\n headers = {'Authorization': 'Bearer token'}\n response = self.client.put(\n '/api/bce_institutions/0802145Z',\n content_type='application/json',\n headers=headers,\n data=json.dumps({\n 'is_institution': False,\n 'id_esr': 4\n })\n )\n self.assertEqual(mock_request.called, True)\n url = ((os.getenv('INSTITUTION_URL')) + 'institutions/4',)\n args, kwargs = mock_request.call_args\n self.assertEqual(args, url)\n self.assertEqual(response.status_code, 200)\n\n institution = BceInstitutionRepository.get(uai='0802145Z')\n self.assertEqual(institution.is_institution, False)",
"def update(self, uai, is_institution):\n institution = self.get(uai)\n if not institution:\n return None\n institution.is_institution = is_institution\n return institution.save()",
"def test_update_false_failed(self, mock_decorator, mock_request):\n BceInstitutionRepository.create(uai='0802145Z', is_institution=True)\n headers = {'Authorization': 'Bearer token'}\n response = self.client.put(\n '/api/bce_institutions/0802145Z',\n content_type='application/json',\n headers=headers,\n data=json.dumps({\n 'is_institution': False,\n 'id_esr': 4\n })\n )\n\n self.assertEqual(mock_request.called, True)\n url = ((os.getenv('INSTITUTION_URL')) + 'institutions/4',)\n args, kwargs = mock_request.call_args\n self.assertEqual(args, url)\n self.assertEqual(response.status_code, 400)\n\n institution = BceInstitutionRepository.get(uai='0802145Z')\n self.assertEqual(institution.is_institution, True)",
"def institutions_patch(session, user_inst): # pylint:disable=too-many-branches\n institution_id = request.headers.get('id')\n name = request.headers.get('name')\n webpage = request.headers.get('webpage')\n address = request.headers.get('address')\n description = request.headers.get('description')\n short = request.headers.get('short')\n latitude = request.headers.get('latitude')\n longitude = request.headers.get('longitude')\n\n if institution_id is None:\n return jsonify({'error': 'Missing parameter'}), 400\n\n try:\n check_params_int([institution_id])\n check_params_float([latitude, longitude])\n except ValueError:\n return jsonify({\"error\": \"bad argument\"}), 400\n if None in [latitude, longitude] and any([latitude, longitude]):\n return jsonify({\"error\": \"bad geo argument\"}), 400\n\n if webpage is not None and not validators.url(webpage):\n return jsonify({'error': 'webpage is not a valid url'}), 400\n\n try:\n if name: # check if name is already taken\n if session.query(Institution).filter(Institution.nameInstitution == name).one_or_none():\n return jsonify({'error': 'name already exists'}), 400\n\n institution = session.query(Institution).get(institution_id)\n if institution is None:\n return jsonify({'error': 'Institution does not exist'}), 404\n\n # check user permission\n owner = session.query(Institution)\n owner = owner.filter(Institution.user == user_inst, Institution.idInstitution == institution_id).one_or_none()\n\n if owner is None:\n return jsonify({'error': 'no permission'}), 403\n\n if name:\n if len(name) > 256:\n return jsonify({\"error\": \"bad name argument\"}), 400\n institution.nameInstitution = name\n if address:\n institution.addressInstitution = address\n if webpage:\n institution.webpageInstitution = webpage\n if description:\n try:\n description = b64decode(description).decode(\"latin-1\")\n except (TypeError, binascii.Error):\n return jsonify({\"error\": \"bad base64 encoding\"}), 400\n institution.descriptionInstitution = description\n if short:\n try:\n short = b64decode(short).decode(\"latin-1\")\n except (TypeError, binascii.Error):\n return jsonify({\"error\": \"bad base64 encoding\"}), 400\n institution.shortDescription = short\n if latitude and longitude:\n institution.latitude = latitude\n institution.longitude = longitude\n\n session.commit()\n return jsonify({'status': 'Institution wurde bearbeitet'}), 201\n finally:\n session.rollback()\n session.close()",
"def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False",
"def put(self, request):\r\n new_status = request.body\r\n\r\n if not new_status in [\"success\", \"failure\"]:\r\n return HttpResponseBadRequest()\r\n\r\n else:\r\n # Configure all views to respond with the new status\r\n PaymentFakeView.PAYMENT_STATUS_RESPONSE = new_status\r\n return HttpResponse()",
"def institution(institution):\n\n try:\n logging.info(f\"Process a request for an institution resource\\nurl: {request.url}\")\n\n params = dict({\"institution_id\": institution})\n logging.info(f\"Parameters: {params}\")\n\n if not valid_institution_params(params):\n logging.error(f\"valid_institution_params returned false for {params}\")\n return Response(\n get_http_error_response_json(\n \"Bad Request\", \"Parameter Error\", \"Invalid parameter passed\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=400,\n )\n\n inst_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_inst_collection_id)\n dataset_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_dataset_collection_id)\n\n # Initialise dataset helper - used for retrieving latest dataset version\n dsh = DataSetHelper(client, dataset_collection_link)\n version = dsh.get_highest_successful_version_number()\n\n # Intialise an InstitutionFetcher\n institution_fetcher = InstitutionFetcher(client, inst_collection_link)\n institution = institution_fetcher.get_institution(version=version, **params)\n\n if institution:\n logging.info(f\"Found an institution {institution}\")\n \n return Response(\n institution,\n headers={\"Content-Type\": \"application/json\"},\n status=200,\n )\n\n return Response(\n get_http_error_response_json(\n \"Not Found\", \"institution\", \"Institution was not found.\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=404,\n )\n\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # Raise so Azure sends back the HTTP 500\n raise e",
"def test_status_endpoint(self):\n Org(id='test1').put()\n response = self.app.get('/adapter/test1/status')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json['connected'], False)\n self.assertEqual(response.json['synced'], False)\n self.assertEqual(response.json['updating'], False)\n self.assertEqual(response.json['synced_at'], None)\n\n Org(id='test2', status=2).put()\n response = self.app.get('/adapter/test2/status')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json['connected'], True)\n\n Org(id='test3', status=2).put()\n OrgChangeset(org_uid='test3', publish_job_finished=True, publish_job_failed=False).put()\n response = self.app.get('/adapter/test3/status')\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json['synced'], True)",
"def _update_status(self, status: dict):\n with generate_retry_session() as session:\n session.headers.update({\n 'Authorization': 'Bearer {}'.format(self.platform_auth_token)\n })\n url = '{}/training/definitions/{}/jobs/{}/status'.format(\n ORGANIZATION_ENDPOINT, self.job_definition_name, self.training_job_id)\n res = session.put(url, json=status)\n res.raise_for_status()",
"def test_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )",
"def test_update_review_status(self):\n risk = factories.RiskFactory()\n new_value = all_models.Review.STATES.REVIEWED\n\n self.api.put(risk, risk.id, {\n \"review_status\": new_value,\n \"review_status_display_name\": \"some status\"\n })\n\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertEquals(risk.review_status, new_value)",
"def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )",
"def update_status(request):\n task_id = request.POST.get('task_id', 0)\n new_status = request.POST.get('new_status', 0)\n\n search_task = task_models.Task.query.filter(task_models.Task.id == task_id).first()\n if not search_task:\n return HttpResponse(simplejson.dumps({'success': False}))\n\n search_task.update(user=request.user, status=new_status, lastModifiedBy=request.user.id,\n lastModified=str(datetime.utcnow()))\n\n return JsonResponse({\n 'status': new_status,\n 'lastModifiedBy': request.user.id,\n 'lastModified': str(datetime.utcnow())\n })",
"def update_status(request_id, status):\n pass",
"def api_can_update(self):\n person1 = User(name=\"test person1\",\n bio=\"test person1\",\n contact_info=\"test person\")\n person2 = User(name=\"test person2\",\n bio=\"test person2\",\n contact_info=\"test person\")\n person1.save()\n person2.save()\n # update_person = self.client.put(\n # reverse('details', kwargs={'pk': person1.id}),\n # person2, format='json'\n # )\n self.assertEqual(self.client.get('/api/guru'), 200)",
"def update_status(request):\n return 0",
"def update_status(self, value, incident_id):\n payload = {\"incident\":{\"state\": value}}\n response = self.session.put(\n \"{0}/incidents/{1}.json\".format(self.uri, incident_id),\n json=payload\n )\n return response.status_code",
"def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def test_api_object_update_public(self, api_object):\n attrs_dict = {'status': 'CREATING'}\n api_object.update_public_attrs(attrs_dict)\n assert api_object.status == 'CREATING'",
"def test_deprecated_update_bs(self):\n with app.test_client() as client:\n self.login_client(client)\n\n res = client.put(\n '/v1/sim/configs/ae',\n data=json.dumps({}),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(data['message'], 'Method Not Allowed.')\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(res.status_code, 405)",
"def updateResourceDef(url, user, pWd, resourceName, resJson):\n \n print(\"\\tupdating resource for catalog:-\" + url + \" resource=\" + \n resourceName + ' user=' + user)\n print(\"\\t\" + json.dumps(resJson))\n apiURL = url + '/access/1/catalog/resources/' + resourceName\n print(\"\\turl=\" + apiURL)\n header = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"} \n tResp = requests.put(apiURL, data=json.dumps(resJson), headers=header, \n auth=HTTPBasicAuth(user, pWd))\n print(\"\\tresponse=\" + str(tResp.status_code))\n if tResp.status_code == 200:\n # valid - return the jsom\n print(\"\\tyay - update resource worked...\")\n print(tResp)\n return tResp.status_code\n else:\n # not valid\n print(\"\\tdarn - update resource failed...\")\n print(tResp)\n return tResp.status_code",
"def change_status(self, inf, status):\n self.interfaces[inf]['status'] = status",
"def taco_test_put_update(self):\n body = '{ \"id\": 400, \"name\": \"item4\", \"content\": \"after test update\" }'\n env = self.get_env('PUT', '/item/4', body=body)\n webapi_start(env, lambda status, response_headers: self.assertEqual(status, '204'))",
"def test_update_status_period(self):\n prev_status = self.test_period.status\n self.test_period.status = 'FINALIZED'\n self.test_period.save()\n employee_payments = EmployeePayment.objects.filter(employer=self.test_user_employer.profile.employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_user_employer.profile.employer).count(),\n employee_payments)\n self.test_period.status = prev_status\n self.test_period.save()",
"def test_update_person_not_authenticated(self):\n\n data = {'first_name': 'Daenerys'}\n response = self.client.patch(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def _put(self, path, data):\n return self._api.put_status(path, data, headers={\"Hawkular-Tenant\": self.tenant_id,\n \"Content-Type\": \"application/json\"})",
"def updateStatus(self, status):\n pass",
"def mark_successful(self):\r\n self.require_item()\r\n\r\n url = '{0}/mark_successful'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n\r\n return request, parsers.parse_empty",
"def test_set_deprecated_status(self):\n control = factories.ControlFactory()\n self.assertIsNone(control.end_date)\n\n self.api.put(control, control.id, {\n \"status\": all_models.Control.DEPRECATED,\n })\n\n control = db.session.query(all_models.Control).get(control.id)\n self.assertIsNotNone(control.end_date)"
]
| [
"0.68200177",
"0.6680396",
"0.65138537",
"0.58143884",
"0.57722396",
"0.569369",
"0.55494267",
"0.55452234",
"0.5404221",
"0.5379043",
"0.5347192",
"0.53412354",
"0.5306662",
"0.52892303",
"0.528411",
"0.5245017",
"0.5228866",
"0.52209485",
"0.52191067",
"0.52098113",
"0.52023035",
"0.51984787",
"0.5188232",
"0.51773304",
"0.51629937",
"0.51288116",
"0.509552",
"0.50888354",
"0.5083535",
"0.50822055"
]
| 0.7161717 | 0 |
Overwrite the network's weights with a specified list of tensors or change weights along directions with a step size. | def set_weights(net, weights, directions=None, step=None):
if directions is None:
# You cannot specify a step length without a direction.
for (p, w) in zip(net.parameters(), weights):
p.data.copy_(w.type(type(p.data)))
else:
assert step is not None, 'If a direction is specified then step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
for (p, w, d) in zip(net.parameters(), weights, changes):
p.data = w + torch.Tensor(d).type(type(w)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_weights(self, weights):\n tuples = []\n for layer in self.layers:\n num_param = len(layer.weights)\n layer_weights = weights[:num_param]\n for sw, w in zip(layer.weights, layer_weights):\n tuples.append((sw, w))\n weights = weights[num_param:]\n K.batch_set_value(tuples)",
"def _set_weights(self, weights):\r\n self.weights = weights.reshape(self.output_size, self.input_size+1)",
"def step(self, layers, d_weights, d_biases, lr):\n for layer, d_W, d_b in zip(layers, d_weights, d_biases):\n layer.update(-lr * d_W, -lr * d_b)",
"def adjust_weights(self, weights_deltas):\n for layer_index in xrange(len(self.w)):\n self.w[layer_index] = np.add(self.w[layer_index], weights_deltas[layer_index])",
"def set_weights(self, weights):\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('You called `set_weights(weights)` on layer \"' +\n self.name + '\" with a weight list of length ' +\n str(len(weights)) + ', but the layer was expecting ' +\n str(len(params)) + ' weights. Provided weights: ' +\n str(weights)[:50] + '...')\n if not params:\n return\n weight_value_tuples = []\n param_values = K.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Layer weight shape ' + str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n K.batch_set_value(weight_value_tuples)",
"def update_weights(net, input_values, desired_output, neuron_outputs, r=1):\n raise NotImplementedError",
"def setWeights(self, w):\n raise NotImplementedError",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value\n self.weights_clipping()",
"def update_weights(self):\n self._weights = self._weights + self.update_weights_value",
"def set_weights(self, weights):\r\n self.weights = weights",
"def set_weights(self, weights):\n self.model.set_weights(weights)",
"def update_network(self, a, batch_size):\n for layer in self.layers:\n layer.weights_update(a, self.alpha, self.l2_lambda, batch_size)\n a = layer.a",
"def change_weight(self, new_weight_arr):\n self.weights = new_weight_arr",
"def doWeights_batch(self, index, o, odelta, weights, deltas, LR, M):\n\n deltas[index] += o*odelta",
"def update_weights(self):\n\t\tpass",
"def set_weights(self, w):\n self.nn.set_param_values(w)",
"def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)",
"def setWeights(self, weights):\n self._call_java('setWeights', weights)",
"def update_weights(self, gradient_steps):\n for gradient_step in range(int(gradient_steps)):\n states, actions, rewards, dones, new_states = tf.numpy_function(\n self.concat_buffer_samples, [], self.batch_dtypes\n )\n self.update_critic_weights(states, actions, new_states, dones, rewards)\n if gradient_step % self.policy_delay == 0:\n self.update_actor_weights(states)\n self.sync_target_models()",
"def update_weights(self, weight_delta):\n\n self._weights = math_util.vector_sum(self._weights, weight_delta)",
"def weights(self, weights):\n\n self._weights = weights",
"def set_weights(self, entry=None):\n if entry is None:\n entry = []\n for pos in entry:\n self._q_neuron.x(self._weights[int(pos)])",
"def update_speed_weights_step(self):\n \n weights_list = [self.W_speed_east, self.W_speed_west,self.W_speed_north,self.W_speed_south]\n speed_input_list = [self.speed_inputs_east,self.speed_inputs_west,\n self.speed_inputs_north,self.speed_inputs_south]\n \n if self.use_eight_directions is True:\n weights_list+=[self.W_speed_north_east,\n self.W_speed_north_west,self.W_speed_south_east,self.W_speed_south_west]\n \n speed_input_list+=[self.speed_inputs_north_east,self.speed_inputs_north_west, \n self.speed_inputs_south_east,self.speed_inputs_south_west]\n\n \n for weights,speed_input in zip(weights_list,speed_input_list):\n \n \n weight_update=speed_input*(self.rr[:self.N_e]-self.input_mean)*(self.rr_e_trace.T-self.input_mean)\n weights+=self.learn_rate_speed_weights*weight_update\n\n\n # normalize to fixed mean of incoming and outgoing weights\n weights-=(weights.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n weights-=(weights.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n np.clip(weights,0,self.W_max_e,out=weights)",
"def doWeights(self, index, o, odelta, weights, oldDeltas, LR, M):\n\n delta = LR*o*odelta + M*oldDeltas[index]\n weights[index] += delta\n oldDeltas[index] = delta",
"def updateWeights(self, LR, M):\n assert self._batch\n for i,(dws,odws) in enumerate(zip(self._wDeltas, self._oldWDeltas)):\n ws = self._layer.weightsAt(i)\n for j, (dw, odw) in enumerate(zip(dws, odws)):\n dw = LR*dw + M*odw\n ws[j] += dw\n odws[j] = dw\n dws[j] = 0.0",
"def set_weights(self, weights):\n\n self._cnn_model.set_weights(weights)",
"def _set_weights(self, weights):\n i = iter(weights)\n\n for param in self.params:\n param.set_value(i.next())",
"def _set_weights(self, weights):\n i = iter(weights)\n\n for param in self.params:\n param.set_value(i.next())",
"def set_weights(self, weights):\n self._weights = weights\n self.normalize_weights() ########################added\n #self.get_weights()",
"def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)"
]
| [
"0.70630175",
"0.67701286",
"0.6765941",
"0.6713401",
"0.6688865",
"0.6647569",
"0.66165936",
"0.65347296",
"0.6495384",
"0.6468586",
"0.6456321",
"0.63830173",
"0.63485736",
"0.63249886",
"0.6295442",
"0.62903076",
"0.62822104",
"0.62651116",
"0.62480175",
"0.62394434",
"0.62287074",
"0.62047046",
"0.61601335",
"0.61416143",
"0.6138344",
"0.61058444",
"0.60855687",
"0.60855687",
"0.6084909",
"0.6061942"
]
| 0.71305776 | 0 |
Overwrite the network's state_dict or change it along directions with a step size. | def set_states(net, states, directions=None, step=None):
if directions is None:
net.load_state_dict(states)
else:
assert step is not None, 'If direction is provided then the step must be specified as well'
if len(directions) == 2:
dx = directions[0]
dy = directions[1]
changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]
else:
changes = [d*step for d in directions[0]]
new_states = copy.deepcopy(states)
assert (len(new_states) == len(changes))
for (k, v), d in zip(new_states.items(), changes):
d = torch.tensor(d)
v.add_(d.type(v.type()))
net.load_state_dict(new_states) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateSimState(self):\n self.sim_state = {k: v for k,v in self.state.iteritems()}",
"def setStepSize(self, step_size):\n assert isinstance(step_size, int)\n self.step_size = step_size\n self.step_directions = [np.array([i[0], i[1]]) for i in [(0,0),\n (0,step_size),\n (0,-step_size),\n (step_size, 0),\n (-step_size,0)]]",
"def set_state(self, new_state):\n if self.state_size != len(new_state):\n raise()\n \n self.index = 0\n self.mt = [0] * self.state_size\n \n for i in range(self.state_size):\n self.mt[i] = new_state[i]",
"def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict",
"def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict",
"def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict",
"def forgiving_state_restore(net, loaded_dict):\n net_state_dict = net.state_dict()\n new_loaded_dict = {}\n for k in net_state_dict:\n if k in loaded_dict and net_state_dict[k].size() == loaded_dict[k].size():\n new_loaded_dict[k] = loaded_dict[k]\n else:\n print(\"Skipped loading parameter\", k)\n # logging.info(\"Skipped loading parameter %s\", k)\n net_state_dict.update(new_loaded_dict)\n net.load_state_dict(net_state_dict)\n return net",
"def set_states(self, state_dict):\n self.trainer.get_model().load_state_dict(state_dict)",
"def upgrade_state_dict(self, state_dict):\n self.upgrade_state_dict_named(state_dict, \"\")",
"def __setstate__(self, state):\n self.__dict__.update(state)",
"def upgrade_state_dict_named(self, state_dict, name):\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]",
"def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)",
"def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)",
"def set_sim_state_fn(state_dict: dict):\n # state_dicts = [deepcopy(state_dict) for j in range(num_cpu)]\n sim_env.set_env_state(state_dict)",
"def reintegrate_state(self, state_chunks):\n num_chunks = len(state_chunks.keys())\n\n # Get the bounds\n bounds = self.chunk_bounds(num_chunks)\n\n # Now reset the master state vector\n for cnum, bnds in bounds.items():\n self.state[dict(location=slice(bnds[0],bnds[1]))] = state_chunks[cnum].state",
"def update_step_size(self):\n self.setSingleStep(10 ** self.step_exponent)\n self.update_format_string()",
"def load_state_dict(self, state_dict):\n self.XY_net.load_state_dict(state_dict['XY_net'])\n self.XY_optimizer_minee.load_state_dict(\n state_dict['XY_optimizer_minee'])\n self.X_net.load_state_dict(state_dict['X_net'])\n self.X_optimizer_minee.load_state_dict(state_dict['X_optimizer_minee'])\n self.Y_net.load_state_dict(state_dict['Y_net'])\n self.Y_optimizer_minee.load_state_dict(state_dict['Y_optimizer_minee'])\n self.X = state_dict['X']\n self.Y = state_dict['Y']\n if 'lr' in state_dict:\n self.lr = state_dict['lr']\n if 'batch_size' in state_dict:\n self.batch_size = state_dict['batch_size']\n if 'ref_batch_factor' in state_dict:\n self.ref_batch_factor = state_dict['ref_batch_factor']",
"def update_to_state(self, game_state):\n pass",
"def init_states(self, batch_size: int) -> NestedMap:\n raise NotImplementedError('Abstract method')",
"def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n print(\"deleting {0}\".format(weights_key))\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n for i in range(self.num_layers):\n # update layer norms\n self.layers[i].upgrade_state_dict_named(\n state_dict, \"{}.layers.{}\".format(name, i)\n )\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n return state_dict",
"def __setstate__(self, state: Dict[str, Any]) -> None:\n self.__dict__ = state.copy()\n # Once state is ingested - repopulate, NOT recursing.\n # Child segments will do it for themselves on unpickling.\n self.set_as_parent(recurse=False)",
"def set_state(self, state_dict):\n super().set_state(copy.deepcopy(state_dict))\n self._client_state = copy.deepcopy(state_dict.get(\"_client_state\"))\n self._trials_map = copy.deepcopy(state_dict.get(\"_trials_map\"))",
"def update_state(self, dstate):\n pass",
"def update_params(self, loss, step_size=0.5, first_order=False):\n grads = torch.autograd.grad(loss, self.parameters(),\n create_graph=not first_order)\n updated_params = OrderedDict()\n for (name, param), grad in zip(self.named_parameters(), grads):\n updated_params[name] = param - step_size * grad\n\n return updated_params",
"def set_layer_size(self, layer_size, layer_ind):\n assert(not self._is_build)\n assert(layer_ind < self.num_layers-1)\n self._layer_sizes[layer_ind] = layer_size",
"def update_target_network(self) -> NoReturn:\n self.target.load_state_dict(self.model.state_dict())",
"def set_step_size(self, step_size):\r\n # Max step size for each unit is based on one byte of SIGNED data -> range = 2**8 / 2 = 128\r\n # if step_size < 12.8:\r\n # if self._current_units != 'A':\r\n # self.set_units('A')\r\n # time.sleep(1)\r\n # step_size_angstrom = int(step_size * 10)\r\n # cmd = struct.pack('>B', 55) + struct.pack('>b', step_size_angstrom) # note: lower case b here since we want a signed step_size byte (sign indicates direction)\r\n if step_size < 128:\r\n if self._current_units != 'nm':\r\n self.set_units('nm')\r\n cmd = struct.pack('>B', 55) + struct.pack('>b', int(step_size))\r\n else:\r\n raise ValueError('Step size should be less than 128 nm. Overriding this is possible by sending size in micron units - needs this code to be updated.')\r\n self.send(cmd)",
"def set_state(self, state):\n\n self.model = self.model_creator(self.config)\n self.epoch = state[\"epoch\"]\n self.model.set_weights(state[\"weights\"])",
"def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)",
"def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)"
]
| [
"0.62555414",
"0.5988356",
"0.59602517",
"0.58462554",
"0.5836205",
"0.57758117",
"0.5772188",
"0.5750633",
"0.5728731",
"0.57174855",
"0.5712029",
"0.5693783",
"0.5693783",
"0.56853724",
"0.5677654",
"0.56629467",
"0.56623954",
"0.56580573",
"0.5647897",
"0.5638729",
"0.5623061",
"0.562053",
"0.56034017",
"0.55855536",
"0.5556401",
"0.5549984",
"0.55370224",
"0.5505586",
"0.54926175",
"0.54926175"
]
| 0.6162025 | 1 |
Produce a direction from 'weights' to 'weights2'. | def get_diff_weights(weights, weights2):
return [w2 - w for (w, w2) in zip(weights, weights2)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_target_direction(net, net2, dir_type='states'):\n\n assert (net2 is not None)\n # direction between net2 and net\n if dir_type == 'weights':\n w = get_weights(net)\n w2 = get_weights(net2)\n direction = get_diff_weights(w, w2)\n elif dir_type == 'states':\n s = net.state_dict()\n s2 = net2.state_dict()\n direction = get_diff_states(s, s2)\n\n return direction",
"def connect_both(node1, node2, weight):\n connect_one_way(node1, node2, weight)\n connect_one_way(node2, node1, weight)",
"def normalize_directions_for_weights(direction, weights, norm='filter', ignore='biasbn'):\n assert(len(direction) == len(weights))\n for d, w in zip(direction, weights):\n if d.dim() <= 1:\n if ignore == 'biasbn':\n d.fill_(0) # ignore directions for weights with 1 dimension\n else:\n d.copy_(w) # keep directions for weights/bias that are only 1 per node\n else:\n normalize_direction(d, w, norm)",
"def l2(weights):\n\treturn np.sqrt(np.sum(weights * weights))",
"def _right_weights(self, x1, x2, d, p):\n w1 = self._phi_int(x1, d, p)\n w2 = self._phi_int(x2, d, p)\n z1 = self._xphi_int(x1, d, p)\n z2 = self._xphi_int(x2, d, p)\n return -x1 / (x2 - x1) * (w2 - w1) + 1 / (x2 - x1) * (z2 - z1)",
"def backward(self, weights):\n\n return self.lambd * weights",
"def merge2(x, y, weight=0.5):\n z = normalize(weight * x + (1-weight) * y)\n return z",
"def get_weight(self, val1, val2):\n\n\t\tnode1 = self.get_node(val1)\n\t\tnode2 = self.get_node(val2)\n\n\t\treturn node1.get_weight(node2)",
"def add_edge(self, n1, n2, weight=0):\n self.add_node(n1)\n self.add_node(n2)\n self.dict[n1][n2] = weight",
"def connect_one_way(node1, node2, weight):\n node1.add_or_update_neighbour(node2, weight)",
"def init_weight(n1, n2):\n\n w = tf.Variable(tf.truncated_normal([n1, n2], stddev=1.0 / np.sqrt(n2)))\n\n return w",
"def weight_rotate(weight):\n weight = weight.permute(1, 2, 3, 0)\n return weight",
"def weight_to_line(w):\n b = -(w[0] / w[2])\n m = -(w[1] / w[2])\n \n return b, m",
"def _mutate_weights(self, weights):\n return weights + normal(loc=0, scale=self.standard_deviation, size=weights.shape[0])",
"def init_weights2(net):\n\tfor m in net.modules():\n\t\tif isinstance(m, nn.Conv2d):\n\t\t\tnn.init.xavier_uniform_(m.weight)\n\t\t\tif m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\t\n\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\tnn.init.constant_(m.weight, 1)\n\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\n\t\telif isinstance(m, nn.Linear):\n\t\t\tnn.init.xavier_uniform_(m.weight)\n\n\t\t\tif m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\n\treturn net",
"def initialize_weights(self):\n w1 = np.random.uniform(-1.0, 1.0, size = self.n_hidden * (self.n_features + 1)).reshape(self.n_hidden, (self.n_features + 1))/(self.n_features + 1)\n w2 = np.random.uniform(-1.0, 1.0, size=self.n_output*(self.n_hidden+1)).reshape(self.n_output, self.n_hidden+ 1)/(self.n_hidden + 1)\n return w1, w2",
"def set_edge_weight(self, vertex1, vertex2, weight):\n if not self.is_weighted():\n print(\"WARNING: Graph is NOT weighted!\")\n return None\n self._graph[vertex1][vertex2] = weight\n if self.is_directed():\n self._graph[vertex2][vertex1] = weight\n return True",
"def get_weight(self, start_direction, current_weight, **kwargs):\n return self.weights.get(start_direction, self.default_weight)",
"def pair_weights(self, labels, ranks):\n del ranks # Unused.\n return tf.abs(_apply_pairwise_op(tf.subtract, labels))",
"def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1",
"def get_edge_weight(self, vertex1, vertex2):\n if not self.is_weighted():\n print(\"WARNING: Graph is NOT weighted!\")\n return None\n if self.adjacent(vertex1, vertex2):\n return self._graph[vertex1][vertex2]",
"def coupling_W2(coupling_1, coupling_2, source, target, epsilon):\n cost_matrix = coupling_to_coupling_cost_matrix(source, target)\n return ot.sinkhorn2(coupling_1.flatten(), coupling_2.flatten(), cost_matrix, epsilon)",
"def _extract_weights(self,W):\n wl1_size = self._D*self._hidden_layer_size\n bl1_size = self._hidden_layer_size\n \n wl2_size = self._hidden_layer_size*self._output_size\n bl2_size = self._output_size\n\n \n weights_L1 = W[0:wl1_size].reshape((self._D,self._hidden_layer_size))\n bias_L1 = W[wl1_size:wl1_size+bl1_size]\n \n start_l2 = wl1_size+bl1_size\n\n weights_L2 = W[start_l2: start_l2 + wl2_size].reshape((self._hidden_layer_size,self._output_size))\n bias_L2 = W[start_l2 + wl2_size : start_l2 + wl2_size + bl2_size]\n \n \n \n return weights_L1,bias_L1,weights_L2,bias_L2",
"def backward_step(nx, nh, ndlogp, w2):\n dw2 = np.einsum('nh,nk->kh', nh, ndlogp)\n dh = np.einsum('nk,kh->nh', ndlogp, w2)\n dh[nh <= 0] = 0\n dw1 = np.einsum('nh,ni->hi', dh, nx)\n return dw1, dw2",
"def add_edge(self, val1, val2, weight, directional=False):\n\t\tnode1 = self.get_node(val1)\n\t\tnode2 = self.get_node(val2)\n\n\t\tnode1.add_edge(node2, weight)\n\t\tif not directional:\n\t\t\tnode2.add_edge(node1, weight)",
"def create_complete_graph(pair_weights, flip_weights=True):\n g = nx.Graph()\n for k, v in pair_weights.items():\n wt_i = -v if flip_weights else v\n g.add_edge(k[0], k[1], attr_dict={\"distance\": v, \"weight\": wt_i})\n return g",
"def update_speed_weights_step(self):\n \n weights_list = [self.W_speed_east, self.W_speed_west,self.W_speed_north,self.W_speed_south]\n speed_input_list = [self.speed_inputs_east,self.speed_inputs_west,\n self.speed_inputs_north,self.speed_inputs_south]\n \n if self.use_eight_directions is True:\n weights_list+=[self.W_speed_north_east,\n self.W_speed_north_west,self.W_speed_south_east,self.W_speed_south_west]\n \n speed_input_list+=[self.speed_inputs_north_east,self.speed_inputs_north_west, \n self.speed_inputs_south_east,self.speed_inputs_south_west]\n\n \n for weights,speed_input in zip(weights_list,speed_input_list):\n \n \n weight_update=speed_input*(self.rr[:self.N_e]-self.input_mean)*(self.rr_e_trace.T-self.input_mean)\n weights+=self.learn_rate_speed_weights*weight_update\n\n\n # normalize to fixed mean of incoming and outgoing weights\n weights-=(weights.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n weights-=(weights.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n np.clip(weights,0,self.W_max_e,out=weights)",
"def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight\n self.edges[n2.identifier][n1.identifier] = weight",
"def add_edge(self, n1, n2, weight):\n self.edges[n1.identifier][n2.identifier] = weight",
"def add_weighted_path(self, path, weights, scale=1):\n mn, mx = float(min(weights)), max(weights)\n n = len(weights)\n weights_normed = np.array(weights)\n #weights_normed = (weights - mn) / (mx - mn)\n\n last_w = weights_normed[0]\n lat1 = (path[0][0] + path[1][0]) / 2.\n lon1 = (path[0][1] + path[1][1]) / 2.\n\n newpath = [path[0], [lat1, lon1]]\n\n for i in xrange(1, n - 1):\n lat1 = (path[i - 1][0] + path[i][0]) / 2.\n if np.sign(path[i - 1][1]) == np.sign(path[i][1]):\n lon1 = (path[i - 1][1] + path[i][1]) / 2.\n else:\n lat1, lon1 = path[i - 1]\n\n lat2 = (path[i][0] + path[i + 1][0]) / 2.\n\n if np.sign(path[i][1]) == np.sign(path[i + 1][1]):\n lon2 = (path[i][1] + path[i + 1][1]) / 2.\n else:\n lat2, lon2 = path[i + 1]\n\n if weights_normed[i] == last_w:\n newpath.append([lat1, lon1])\n newpath.append(path[i])\n newpath.append([lat2, lon2])\n else:\n col = val2hex(last_w, scale)\n self.add_path(newpath, \"#\" + col)\n newpath = []\n newpath.append([lat1, lon1])\n newpath.append(path[i])\n newpath.append([lat2, lon2])\n last_w = weights_normed[i]\n\n if weights_normed[- 1] != last_w:\n col = val2hex(last_w, scale)\n self.add_path(newpath, \"#\" + col)\n newpath = []\n\n lat1 = (path[- 2][0] + path[- 1][0]) / 2.\n lon1 = (path[- 2][1] + path[- 1][1]) / 2.\n newpath.append([lat1, lon1])\n newpath.append(path[-1])\n col = val2hex(weights_normed[-1], scale)\n self.add_path(newpath, \"#\" + col)"
]
| [
"0.607518",
"0.6047125",
"0.60434145",
"0.5932767",
"0.5829164",
"0.5781955",
"0.57382554",
"0.5580101",
"0.55623007",
"0.5560823",
"0.5552634",
"0.5545572",
"0.5509103",
"0.54875374",
"0.54456526",
"0.5415965",
"0.54091614",
"0.53876656",
"0.5335502",
"0.52996564",
"0.5299604",
"0.5290811",
"0.52884954",
"0.52452374",
"0.5242079",
"0.52384907",
"0.5234298",
"0.5231328",
"0.52293813",
"0.5192214"
]
| 0.63707787 | 0 |
Rescale the direction so that it has similar norm as their corresponding model in different levels. | def normalize_direction(direction, weights, norm='filter'):
if norm == 'filter':
# Rescale the filters (weights in group) in 'direction' so that each
# filter has the same norm as its corresponding filter in 'weights'.
for d, w in zip(direction, weights):
d.mul_(w.norm()/(d.norm() + 1e-10))
elif norm == 'layer':
# Rescale the layer variables in the direction so that each layer has
# the same norm as the layer variables in weights.
direction.mul_(weights.norm()/direction.norm())
elif norm == 'weight':
# Rescale the entries in the direction so that each entry has the same
# scale as the corresponding weight.
direction.mul_(weights)
elif norm == 'dfilter':
# Rescale the entries in the direction so that each filter direction
# has the unit norm.
for d in direction:
d.div_(d.norm() + 1e-10)
elif norm == 'dlayer':
# Rescale the entries in the direction so that each layer direction has
# the unit norm.
direction.div_(direction.norm()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def norm(self):\r\n old_origin = np.array(self.origin)\r\n self.origin = [0, 0, 0]\r\n old_origin[0] = old_origin[0] / self.x[0]\r\n old_origin[1] = old_origin[1] / self.y[1]\r\n old_origin[2] = old_origin[2] / self.z[2]\r\n self.data = ndimage.shift(self.data, -old_origin, mode='wrap')",
"def renorm(self):\n self.U /= (np.sum(np.abs(self.U)**2)*self.dx)**0.5",
"def normalize(self):\n self.vector /= np.linalg.norm(self.vector)",
"def normalize(self):\n if self.norm():\n self._ar = self._ar / self.norm()",
"def renorm(self):\n self.data -= self.data.min()\n self.data /= self.data.max()\n return self",
"def normalize( self ):\n self.set_magnitude( 1.0 )\n return self",
"def _normalize(self):\r\n self.dataframe['norm_intensity'] = self.dataframe['intensity']\r\n self.dataframe['norm_intensity'] -= self.dataframe['norm_intensity'].min()\r\n self.dataframe['norm_intensity'] /= self.dataframe['norm_intensity'].max() * 0.01",
"def normalize(self):\r\n max = np.amax(self.matrix)\r\n min = np.amin(self.matrix)\r\n\r\n self.matrix = ((self.matrix - min) / (max - min))",
"def edit_scale(scale, direction):\n if direction in (up, shift_up, plus):\n scale = scale*2\n elif direction in (down, shift_down, minus):\n scale = scale/2\n return scale",
"def normalize(self):\n self._data /= self.norm()",
"def normalize(self):\n return (1. / abs(self)) * self",
"def _init_norm(self):\n with tf.name_scope('init_norm'):\n flat = tf.reshape(self.v, [-1, self.layer_depth])\n self.g.assign(\n tf.reshape(tf.linalg.norm(flat, axis=0), (self.layer_depth,)))",
"def model_normalize_(self, ref_point: 'ModelParameters', order=2):\n for parameter in self.parameters:\n parameter *= (ref_point.model_norm(order) / self.model_norm())",
"def normalize(self):\n\n if not self.magnitude():\n return Vector(0, 0)\n\n l = 1 / self.magnitude()\n return self.scale(l)",
"def normalize(self, m1=0., m2=1.):\n self.img = self.img - self.img.min()\n self.img = self.img / self.img.max()\n\n self.img = self.img * (m2 - m1) + m1",
"def normalize(self):\n self.desc += \", normalize\"\n self._vecs /= np.linalg.norm(self._vecs, axis=1)[:, np.newaxis]\n self.reindex()",
"def get_normalized_direction(self, direction):\n return round(self.normal_joystick_slope * direction + self.normal_joystick_intercept, 2)",
"def normalize(self, mel_db: np.ndarray) -> np.ndarray:\n mel_norm = ((mel_db - self.ref_level_db) - self.min_level_db) / (\n -self.min_level_db\n )\n if self.symmetric_norm:\n # Symmetric norm\n mel_norm = ((2 * self.max_norm) * mel_norm) - self.max_norm\n if self.clip_norm:\n mel_norm = np.clip(mel_norm, -self.max_norm, self.max_norm)\n else:\n # Asymmetric norm\n mel_norm = self.max_norm * mel_norm\n if self.clip_norm:\n mel_norm = np.clip(mel_norm, 0, self.max_norm)\n\n return mel_norm",
"def normalize(self):\n if self.normed:\n return\n self._normalize()",
"def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n",
"def normalize(self):\n self.number_of_vectors = self.values.shape[0]\n norm_2 = np.linalg.norm(self.values, axis=1)\n norm_1 = np.sum(self.values_planar, axis=1)\n norm_2 = np.repeat(norm_2, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_1 = np.repeat(norm_1, self.number_of_objectives).reshape(\n self.number_of_vectors, self.number_of_objectives\n )\n norm_2[norm_2 == 0] = np.finfo(float).eps\n self.values = np.divide(self.values, norm_2)\n self.values_planar = np.divide(self.values_planar, norm_1)",
"def apply_direction_scale( vectors, direction, scale ):\n \"\"\"\n scaling is defined as:\n \n [p'][1 + (k - 1)n.x^2, (k - 1)n.x n.y^2, (k - 1)n.x n.z ]\n S(n,k) = [q'][(k - 1)n.x n.y, 1 + (k - 1)n.y, (k - 1)n.y n.z ]\n [r'][(k - 1)n.x n.z, (k - 1)n.y n.z, 1 + (k - 1)n.z^2 ]\n \n where:\n v' is the resulting vector after scaling\n v is the vector to scale\n n is the direction of the scaling\n n.x is the x component of n\n n.y is the y component of n\n n.z is the z component of n\n k is the scaling factor\n \"\"\"\n scaleMinus1 = scale - 1\n matrix = numpy.array(\n [\n # m1\n [\n # m11 = 1 + (k - 1)n.x^2\n 1 + scaleMinus1 * (direction[ 0 ]**2),\n # m12 = (k - 1)n.x n.y^2\n scaleMinus1 * direction[ 0 ] * direction[ 1 ]**2,\n # m13 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ]\n ],\n # m2\n [\n # m21 = (k - 1)n.x n.y\n scaleMinus1 * direction[ 0 ] * direction[ 1 ],\n # m22 = 1 + (k - 1)n.y\n 1 + scaleMinus1 * direction[ 1 ],\n # m23 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ]\n ],\n # m3\n [\n # m31 = (k - 1)n.x n.z\n scaleMinus1 * direction[ 0 ] * direction[ 2 ],\n # m32 = (k - 1)n.y n.z\n scaleMinus1 * direction[ 1 ] * direction[ 2 ],\n # m33 = 1 + (k - 1)n.z^2\n 1 + scaleMinus1 * direction[ 2 ]**2\n ]\n ],\n dtype = numpy.float\n )\n \n return numpy.dot( vectors, matrix )",
"def update_normal(self):\n options = self.get_direction_options()\n if self.is_at_intersection() or self.last_position == (self.rect.centerx, self.rect.centery):\n self.direction = self.get_chase_direction(options)\n if self.direction == 'u' and 'u' in options:\n self.rect.centery -= self.speed\n elif self.direction == 'l' and 'l' in options:\n self.rect.centerx -= self.speed\n elif self.direction == 'd' and 'd' in options:\n self.rect.centery += self.speed\n elif self.direction == 'r' and 'r' in options:\n self.rect.centerx += self.speed\n self.change_eyes(self.direction or 'r') # default look direction to right\n self.image = self.norm_images.next_image()",
"def norm(self):",
"def norm(self) -> \"Vector\":\n self.values = tuple(self/self.mag())\n return self",
"def set_normalize(self, new_normalize=False):\n self.normalize = new_normalize",
"def _scale(self, normalize, mat):\n mat = mat.astype(float)\n if normalize:\n mat = sklearn_norm(mat,\n feature_range=(0, 1),\n axis=0,\n copy=True)\n else:\n return mat\n return mat",
"def normalize(self):\n n = 1.0 / self.norm()\n self.mV = [ x * n for x in self.mV ]\n return self",
"def _assure_normalized(self):\n for iwann in range(self.nwann):\n norm = np.trace(\n self.wannR[:, :, iwann].conj().T @ self.wannR[:, :, iwann])\n #print(f\"Norm {iwann}: {norm}\")",
"def model_norm(self, order=2) -> float:\n # L-n norm of model where we treat the model as a flat other\n return math.pow(sum([\n torch.pow(layer, order).sum().item()\n for layer in self.parameters\n ]), 1.0 / order)"
]
| [
"0.6777673",
"0.66808975",
"0.65217906",
"0.64889526",
"0.6374746",
"0.6235027",
"0.6218624",
"0.61755383",
"0.61742705",
"0.61604995",
"0.61592346",
"0.61486703",
"0.61451757",
"0.61233693",
"0.61096954",
"0.6099822",
"0.60399234",
"0.6030842",
"0.60131377",
"0.60000825",
"0.5992732",
"0.59926254",
"0.596698",
"0.59590644",
"0.595607",
"0.5947895",
"0.59468323",
"0.59163064",
"0.59062505",
"0.59020054"
]
| 0.69959813 | 0 |
Setup the h5 file to store the directions. | def setup_direction(args, dir_file, net):
print('-------------------------------------------------------------------')
print('setup_direction')
print('-------------------------------------------------------------------')
# Setup env for preventing lock on h5py file for newer h5py versions
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
# Skip if the direction file already exists
if exists(dir_file):
if args.no_resume:
os.remove(dir_file)
else:
f = h5py.File(dir_file, 'r')
if (args.y and 'ydirection' in f.keys()) or 'xdirection' in f.keys():
f.close()
print ("%s is already setted up" % dir_file)
return
f.close()
# Create the plotting directions
f = h5py.File(dir_file,'w') # create file, fail if exists
if not args.dir_file:
print("Setting up the plotting directions...")
if args.model_file2:
net2 = model_loader.load(args.dataset, args.model, args.model_file2)
xdirection = create_target_direction(net, net2, args.dir_type)
else:
xdirection = create_random_direction(net, args.dir_type, args.xignore, args.xnorm)
h5_util.write_list(f, 'xdirection', xdirection)
if args.y:
if args.same_dir:
ydirection = xdirection
elif args.model_file3:
net3 = model_loader.load(args.dataset, args.model, args.model_file3)
ydirection = create_target_direction(net, net3, args.dir_type)
else:
ydirection = create_random_direction(net, args.dir_type, args.yignore, args.ynorm)
h5_util.write_list(f, 'ydirection', ydirection)
f.close()
print ("direction file created: %s" % dir_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup(self):\n print(\"Looking for \", self.filename)\n if os.path.exists(self.filename):\n n, ext = os.path.splitext(self.filename)[:2]\n if ext == \".h5\" or ext == \".hdf5\":\n with h5py.File(self.filename, \"r\") as file:\n keys = list(file.keys())\n self.data = file[keys[0]].value\n print(\"Behavior Data length is \", self.data.shape[2])\n\n else:\n raise FileNotFoundError",
"def ToH5(self,h5File=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if h5File == None:\r\n h5File=self.h5File\r\n\r\n #Delete .h5 File if exists\r\n if os.path.exists(h5File): \r\n logger.debug(\"{0:s}{1:s}: Delete ...\".format(logStr,h5File)) \r\n os.remove(h5File)\r\n\r\n #Determine .h5 BaseKey\r\n\r\n relPath2XmlromCurDir=os.path.normpath(os.path.relpath(os.path.normpath(self.xmlFile),start=os.path.normpath(os.path.curdir))) # ..\\..\\..\\..\\..\\3S\\Modelle\\....XML\r\n #print(repr(relPath2XmlromCurDir)) # '..\\\\..\\\\..\\\\..\\\\..\\\\3S\\\\Modelle\\\\....XML'\r\n h5KeySep='/'\r\n h5KeyCharForDot='_'\r\n h5KeyCharForMinus='_'\r\n relPath2XmlromCurDirH5BaseKey=re.sub('\\.',h5KeyCharForDot,re.sub(r'\\\\',h5KeySep,re.sub('-',h5KeyCharForMinus,re.sub('.xml','',relPath2XmlromCurDir,flags=re.IGNORECASE))))\r\n #__/__/__/__/__/3S/Modelle/...\r\n\r\n warnings.filterwarnings('ignore',category=pd.io.pytables.PerformanceWarning) #your performance may suffer as PyTables will pickle object types that it cannot map directly to c-types \r\n warnings.filterwarnings('ignore',category=tables.exceptions.NaturalNameWarning) #\\lib\\site-packages\\tables\\path.py:100: NaturalNameWarning: object name is not a valid Python identifier: '3S'; it does not match the pattern ``^[a-zA-Z_][a-zA-Z0-9_]*$``; you will not be able to use natural naming to access this object; using ``getattr()`` will still work, though)\r\n \r\n #Write .h5 File\r\n logger.debug(\"{0:s}pd.HDFStore({1:s}) ...\".format(logStr,h5File)) \r\n with pd.HDFStore(h5File) as h5Store: \r\n #for tableName,table in self.dataFrames.items():\r\n for tableName in sorted(self.dataFrames.keys()):\r\n table=self.dataFrames[tableName]\r\n h5Key=relPath2XmlromCurDirH5BaseKey+h5KeySep+tableName \r\n logger.debug(\"{0:s}{1:s}: Writing DataFrame {2:s} with h5Key={3:s}\".format(logStr,h5File,tableName,h5Key)) \r\n try:\r\n h5Store.put(h5Key,table)#,format='table') \r\n except Exception as e:\r\n logger.error(\"{0:s}{1:s}: Writing DataFrame {2:s} with h5Key={3:s} FAILED!\".format(logStr,h5File,tableName,h5Key)) \r\n raise e\r\n \r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n finally:\r\n h5Store.close()\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def setUp(self):\n\n super().setUp()\n self.h5fname = self._getTempFileName()\n self.h5file = tb.open_file(\n self.h5fname, self.open_mode, title=self._getName(),\n **self.open_kwargs)",
"def generate_file(self, hdf5_file_name) -> None:\n self.f = h5py.File(hdf5_file_name, \"w\")\n\n print(Fore.GREEN + \"Generating simulation condition list...\")\n self.generate_simulation_condition_map()\n\n print(Fore.GREEN + \"Generating parameter list...\")\n self.generate_parameter_list()\n\n print(Fore.GREEN + \"Generating fixed parameters matrix...\")\n self.generate_fixed_parameter_matrix()\n\n print(Fore.GREEN + \"Generating measurement matrix...\")\n self.generate_measurement_matrices()\n\n print(Fore.GREEN + \"Handling scaling parameters...\")\n self.generate_hierarchical_optimization_data()\n\n print(Fore.GREEN + \"Copying default AMICI options...\")\n self.copy_amici_options()\n\n print(Fore.GREEN + \"Writing default optimization options...\")\n self.write_optimization_options()",
"def _loadHDF5File(self, filename):\n matfile = h5py.File(filename)\n\n self.StokesI = np.transpose(matfile['StokesI'][:,:])\n self.StokesQ = np.transpose(matfile['StokesQ'][:,:])\n self.StokesU = np.transpose(matfile['StokesU'][:,:])\n self.StokesV = np.transpose(matfile['StokesV'][:,:])\n self.detectorPosition = matfile['detectorPosition'][:,0]\n self.detectorDirection = matfile['detectorDirection'][:,0]\n self.detectorVisang = matfile['detectorVisang'][0,0]\n\n try: self.wall = matfile['wall'][:,:]\n except KeyError: pass\n\n try: self.separatrix = matfile['separatrix'][:,:]\n except KeyError: pass",
"def on_init(self):\n self.model.maze.initialize(os.path.join(\n config.value['src']['data'], 'maze.csv'))",
"def _readHDF5(self):\n\n h5 = h5py.File(self.pointInputFile, 'r')\n self.coords = h5['geometry/vertices'][:]\n self.stations = h5['stations'][:]\n self.dispRaw = h5['vertex_fields/displacement'][self.timeStep,:,:]\n h5.close()\n\n self.numStations = self.coords.shape[0]\n\n return",
"def save_as_hdf5(self, filename):",
"def setup(self):\n EGS5.setup(self)\n if not len(self.inputs):\n raise Exception(\"Missing required input LHE file.\")",
"def _generate_testdata_h5(cls, test_filepath):\n # Generate some test data\n data = numpy.indices( (10, 100, 200, 3) )\n assert data.shape == (4, 10, 100, 200, 3)\n data = data.astype( numpy.uint32 )\n cls.original_data = data\n\n # Choose names\n cls.dvid_dataset = \"datasetA\"\n cls.data_uuid = \"abcde\"\n cls.data_name = \"indices_data\"\n cls.volume_location = \"/datasets/{dvid_dataset}/volumes/{data_name}\".format( **cls.__dict__ )\n cls.node_location = \"/datasets/{dvid_dataset}/nodes/{data_uuid}\".format( **cls.__dict__ )\n cls.voxels_metadata = voxels.VoxelsMetadata.create_default_metadata(data.shape, data.dtype, \"cxyzt\", 1.0, \"\")\n\n # Write to h5 file\n with H5MockServerDataFile( test_filepath ) as test_h5file:\n test_h5file.add_node( cls.dvid_dataset, cls.data_uuid )\n test_h5file.add_volume( cls.dvid_dataset, cls.data_name, data, cls.voxels_metadata )\n\n test_h5file.add_node( \"datasetB\", \"12345\" )\n test_h5file.add_volume( \"datasetB\", cls.data_name, data, cls.voxels_metadata )",
"def test_hdf5_topo_view():\n skip_if_no_h5py()\n import h5py\n\n # save random data to HDF5\n handle, filename = tempfile.mkstemp()\n dataset = random_one_hot_topological_dense_design_matrix(\n np.random.RandomState(1), num_examples=10, shape=(2, 2), channels=3,\n axes=('b', 0, 1, 'c'), num_classes=3)\n with h5py.File(filename, 'w') as f:\n f.create_dataset('topo_view', data=dataset.get_topological_view())\n f.create_dataset('y', data=dataset.get_targets())\n\n # instantiate Train object\n trainer = yaml_parse.load(topo_view_yaml % {'filename': filename})\n trainer.main_loop()\n\n # cleanup\n os.remove(filename)",
"def setup():\n if not os.path.isfile(etymology_file):\n page = re.compile(r'index.php\\?l=\\w+&p=\\d+&allowed_in_frame=0.html')\n pages = list(find_files(directory=site, pattern=page, recursive=False))\n etymology = etymologies(pages)\n dump(etymology, etymology_file)\n for affix, dictionary in affixes(etymology):\n affix_file = os.path.join('resources', '{}.json'.format(affix))\n if not os.path.isfile(affix_file):\n dump(dictionary, affix_file)",
"def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):\n n_total = n_train + n_valid + n_test\n splits = create_splits(n_train, n_valid, n_test)\n hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)\n vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))\n hdf5_file.create_dataset('encoded_images', shape=(n_total,),\n dtype=vlen_dtype)\n hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)\n hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def setup(self):\n EventGenerator.setup(self)\n\n if self.egs5_dir is None:\n self.egs5_dir = self.get_install_dir()\n logger.debug(\"Using EGS5 from install dir: \" + self.egs5_dir)\n\n ## data directory\n self.egs5_data_dir = os.path.join(self.egs5_dir, \"data\")\n ## config directory\n self.egs5_config_dir = os.path.join(self.egs5_dir, \"config\")\n\n logger.debug(\"egs5_data_dir=%s\" % self.egs5_data_dir)\n logger.debug(\"egs5_config_dir=%s\" % self.egs5_config_dir)\n\n if os.path.exists(\"data\"):\n os.unlink(\"data\")\n os.symlink(self.egs5_data_dir, \"data\")\n\n if os.path.exists(\"pgs5job.pegs5inp\"):\n os.unlink(\"pgs5job.pegs5inp\")\n os.symlink(self.egs5_config_dir + \"/src/esa.inp\", \"pgs5job.pegs5inp\")\n\n logger.debug(\"Reading run parameters: {}\".format(self.run_params))\n ## run parameters\n self.run_param_data = RunParameters(self.run_params)\n\n # Set target thickness from job parameter or use the default from run parameters\n if self.target_thickness is not None:\n self.target_z = self.target_thickness\n logger.debug(\"Target thickness set from job param: {}\".format(self.target_z))\n else:\n self.target_z = self.run_param_data.get(\"target_z\")\n logger.debug(\"Target thickness set from run_params: {}\".format(self.target_z))\n\n ebeam = self.run_param_data.get(\"beam_energy\")\n electrons = self.run_param_data.get(\"num_electrons\") * self.bunches\n\n seed_data = \"%d %f %f %d\" % (self.seed, self.target_z, ebeam, electrons)\n logger.debug(\"Seed data (seed, target_z, ebeam, electrons): {}\".format(seed_data))\n seed_file = open(\"seed.dat\", 'w')\n seed_file.write(seed_data)\n seed_file.close()",
"def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()",
"def main():\n parser = ArgumentParser(description=\"write to a file\")\n\n parser.add_argument(\"-i\",\"--input\", type=setup.is_valid_h5_file, required=True, nargs='+',\n help=\"path(s) of HDF5 master file(s)\")\n\n parser.add_argument(\"-b\",\"--beamcenter\", nargs=2, required=True,\n help=\"beam center in X and Y (two arguments)\")\n\n parser.add_argument(\"-r\",\"--oscillation\", type=float, default=1,\n help=\"oscillation angle per well, default = 1\")\n\n parser.add_argument(\"-d\",\"--distance\", type=float, default=100,\n help=\"detector distance in mm\")\n\n parser.add_argument(\"-w\",\"--wavelength\", type=float, default=1.216,\n help=\"Wavelength in Angstrom, default is 1.216\")\n\n parser.add_argument(\"-f\",\"--framesperdegree\", type=int, default=5,\n help=\"Number of frames per degree, default is 5\")\n\n parser.add_argument(\"-t\",\"--totalframes\", type=int, default=0,\n help=\"Total number of frames to be processed, default all\")\n\n parser.add_argument(\"--output\", default=os.getcwd(),\n help=\"Use this option to change output directory, default pwd\")\n\n parser.add_argument(\"-sg\",\"--spacegroup\", type=int, default=0,\n help=\"Space group\")\n\n parser.add_argument(\"-u\",\"--unitcell\", type=str, default=\"50 50 50 90 90 90\",\n help=\"unit cell\")\n\n argslist = parser.parse_args()\n for masterfile in argslist.input:\n master1= Master(argslist,masterfile)\n master1.printDataWells()",
"def generate_setups(self,filename=DEFAULT_FILENAME):\n \n self._create_main_shape()\n self._create_margin_shape()\n\n for section, setup in self.setups.iteritems():\n self._generate_section_structures(setup['distance'],\n setup['radius'],\n setup['structure'],\n section)\n self.write(filename)",
"def setUp(self):\n self._file = 'test.g3'\n write_example_file(self._file)",
"def transition_to_static(self, h5_filepath):\n print(\"transition to static\")",
"def create_devh5(self):\n if os.path.exists(self.dev_h5_path):\n print(\"[LOGGING]: \" + self.dev_h5_path + \" exists!\")\n return\n\n with h5py.File(self.dev_h5_path, 'w') as f:\n\n # create a group: f['train']\n train = f.create_group('train')\n self.extract_fea_for_datagroup(train, mode='train')\n\n # f['test']\n test = f.create_group('test')\n self.extract_fea_for_datagroup(test, mode='test')\n\n f.close()",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def setUp(self):\n\n self.hw = HMMERWrapper\n\n modpath = os.path.abspath(os.path.dirname(__file__))\n self.seqfile = os.path.join(modpath, 'data', 'P00929.fasta')\n self.badfile = os.path.join(modpath, 'data', 'bad.fasta')",
"def to_hdf5(self, filename):\n\n f = h5py.File(filename, 'w')\n f['xyz'] = self.xyz\n f.close()\n\n return",
"def test_hdf5_load_all():\n skip_if_no_h5py()\n import h5py\n\n # save random data to HDF5\n handle, filename = tempfile.mkstemp()\n dataset = random_one_hot_dense_design_matrix(np.random.RandomState(1),\n num_examples=10, dim=5,\n num_classes=3)\n with h5py.File(filename, 'w') as f:\n f.create_dataset('X', data=dataset.get_design_matrix())\n f.create_dataset('y', data=dataset.get_targets())\n\n # instantiate Train object\n trainer = yaml_parse.load(load_all_yaml % {'filename': filename})\n trainer.main_loop()\n\n # cleanup\n os.remove(filename)",
"def setup(self, path_to_conf_file):\n\n self.track = Track.SENSORS\n self.num_frames = 0\n\n with open(path_to_conf_file, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n setattr(self, key, value)\n\n self.device = torch.device('cuda')\n\n self.image_model = CameraModel(config).to(self.device)\n self.image_model.load_state_dict(torch.load(self.main_model_dir))\n self.image_model.eval()\n\n self.vizs = []\n\n self.waypointer = None\n\n if self.log_wandb:\n wandb.init(project='carla_evaluate')\n \n self.steers = torch.tensor(np.linspace(-self.max_steers,self.max_steers,self.num_steers)).float().to(self.device)\n self.throts = torch.tensor(np.linspace(0,self.max_throts,self.num_throts)).float().to(self.device)\n\n self.prev_steer = 0\n self.lane_change_counter = 0\n self.stop_counter = 0",
"def load_directions(dir_file):\n\n xdirection = h5_util.read_list(f, 'xdirection')\n ydirection = h5_util.read_list(f, 'ydirection')\n directions = [xdirection, ydirection]\n\n return directions",
"def _init_h5_out(self, fout, save_hybrid_meta=True):\n dsets = []\n shapes = {}\n attrs = {}\n chunks = {}\n dtypes = {}\n\n for dset, data in self.profiles.items():\n dsets.append(dset)\n shapes[dset] = data.shape\n chunks[dset] = None\n attrs[dset] = {Outputs.UNIT_ATTR: \"MW\"}\n dtypes[dset] = data.dtype\n\n meta = self.hybrid_meta.copy()\n for c in meta.columns:\n try:\n meta[c] = pd.to_numeric(meta[c])\n except ValueError:\n pass\n\n Outputs.init_h5(fout, dsets, shapes, attrs, chunks, dtypes,\n meta, time_index=self.hybrid_time_index)\n\n if save_hybrid_meta:\n with Outputs(fout, mode='a') as out:\n hybrid_meta = to_records_array(self.hybrid_meta)\n out._create_dset('meta', hybrid_meta.shape,\n hybrid_meta.dtype, data=hybrid_meta)",
"def _set_guess(self, h5_guess):\n '''\n Get the Spectroscopic and Position datasets from `self.h5_main`\n '''\n self._sho_spec_inds = self.h5_main.h5_spec_inds\n self._sho_spec_vals = self.h5_main.h5_spec_vals\n self._sho_pos_inds = self.h5_main.h5_pos_inds\n\n '''\n Find the Spectroscopic index for the DC_Offset\n '''\n fit_ind = np.argwhere(get_attr(self._sho_spec_vals, 'labels') == self._fit_dim_name).squeeze()\n self._fit_spec_index = fit_ind\n self._fit_offset_index = 1 + fit_ind\n\n '''\n Get the group and projection datasets\n '''\n self._h5_group = h5_guess.parent\n self.h5_projected_loops = self._h5_group['Projected_Loops']\n self.h5_loop_metrics = self._h5_group['Loop_Metrics']\n self._met_spec_inds = self._h5_group['Loop_Metrics_Indices']\n\n self.h5_guess = h5_guess",
"def export_to_hdf5(cls, h5_file, model, loads):\n #encoding = model._encoding\n #comments = []\n sid = []\n node = []\n cid = []\n mag = []\n xyz = []\n for load in loads:\n #comments.append(loads.comment)\n sid.append(load.sid)\n node.append(load.node)\n cid.append(load.cid)\n mag.append(load.mag)\n xyz.append(load.xyz)\n\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('sid', data=sid)\n h5_file.create_dataset('node', data=node)\n h5_file.create_dataset('cid', data=cid)\n h5_file.create_dataset('mag', data=mag)\n h5_file.create_dataset('xyz', data=xyz)"
]
| [
"0.66554785",
"0.5904285",
"0.58772445",
"0.5786827",
"0.57170254",
"0.5689569",
"0.56485534",
"0.56033355",
"0.5592535",
"0.55645245",
"0.5550623",
"0.5537372",
"0.55245066",
"0.552284",
"0.54868084",
"0.545466",
"0.5440785",
"0.5435956",
"0.541684",
"0.5396151",
"0.5393677",
"0.53798103",
"0.5351827",
"0.5351355",
"0.5341887",
"0.53208303",
"0.52969635",
"0.5288162",
"0.5284552",
"0.52770907"
]
| 0.6418381 | 1 |
Name the direction file that stores the random directions. | def name_direction_file(args):
if args.dir_file:
assert exists(args.dir_file), "%s does not exist!" % args.dir_file
return args.dir_file
dir_file = ""
file1, file2, file3 = args.model_file, args.model_file2, args.model_file3
# name for xdirection
if file2:
# 1D linear interpolation between two models
assert exists(file2), file2 + " does not exist!"
if file1[:file1.rfind('/')] == file2[:file2.rfind('/')]:
# model_file and model_file2 are under the same folder
dir_file += file1 + '_' + file2[file2.rfind('/')+1:]
else:
# model_file and model_file2 are under different folders
prefix = commonprefix([file1, file2])
prefix = prefix[0:prefix.rfind('/')]
dir_file += file1[:file1.rfind('/')] + '_' + file1[file1.rfind('/')+1:] + '_' + \
file2[len(prefix)+1: file2.rfind('/')] + '_' + file2[file2.rfind('/')+1:]
else:
dir_file += file1
dir_file += '_' + args.dir_type
if args.xignore:
dir_file += '_xignore=' + args.xignore
if args.xnorm:
dir_file += '_xnorm=' + args.xnorm
# name for ydirection
if args.y:
if file3:
assert exists(file3), "%s does not exist!" % file3
if file1[:file1.rfind('/')] == file3[:file3.rfind('/')]:
dir_file += file3
else:
# model_file and model_file3 are under different folders
dir_file += file3[:file3.rfind('/')] + '_' + file3[file3.rfind('/')+1:]
else:
if args.yignore:
dir_file += '_yignore=' + args.yignore
if args.ynorm:
dir_file += '_ynorm=' + args.ynorm
if args.same_dir: # ydirection is the same as xdirection
dir_file += '_same_dir'
# index number
if args.idx > 0: dir_file += '_idx=' + str(args.idx)
dir_file += ".h5"
return dir_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"",
"def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'",
"def generate_name():\n return random.choice(ADJECTIVES) + \"_\" + random.choice(TOOLS)",
"def _make_random_file(self, dir, num_chars=10000):\n filename = os.path.join(dir, \"f-%d\" % random.randint(1, 2**63 - 1))\n content = \"\".join([random.choice(\"0123456789abcdefghijklmnopqrstuvwxyz\\n\") for _ in range(num_chars)])\n with open(filename, \"w\") as f:\n f.writelines(content)\n return filename",
"def rand_dir():\n\n return random.choice([\"U\", \"D\", \"L\", \"R\"])",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def construct_name_file(size_sample, randomness, pos_equal_neg, kernel):\n if randomness:\n randomness = \"rand\"\n else:\n randomness = \"nrand\"\n\n if pos_equal_neg:\n pos_equal_neg = \"pos-neg-eq\"\n else:\n pos_equal_neg = \"pos-neg-neq\"\n\n return \"{}_{}_{}_{}.json\".format(size_sample, randomness, pos_equal_neg, kernel)",
"def generate_rand_name() -> str:\n suf = \"\".join(random.choices(string.ascii_uppercase + string.digits, k=6))\n return f\"exporters_{suf}\"",
"def generateRandomInput(filename, num_people, travel_db):\n import random\n routes = []\n for i in range(num_people):\n route = travel_db.randomRoute()\n route.insert(0,\"Person \" + str(i)) # Add a name for each route.\n routes.append(route)\n if FileHandler.writeRoutesCSV(filename,routes): # If it's successful writing the file\n print(\"File {0} created successfully with {1} people.\".format(filename, num_people))\n else:\n print(\"File {0} could not be created.\".format(filename))",
"def generateFileName(self):\n return 'Covid' + self.map_type + '.html'",
"def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)",
"def generate_direction(self):\n random_enum = random.randint(1, 4)\n random_direction = flow_processing_input.Direction(random_enum)\n assert isinstance(random_direction, flow_processing_input.Direction)\n return random_direction",
"def generate_direction(self):\n random_enum = random.randint(1, 4)\n random_direction = flow_processing_input.Direction(random_enum)\n assert isinstance(random_direction, flow_processing_input.Direction)\n return random_direction",
"def generateName(self):\n\n weights = self.w_firsts if self.use_weights else None\n first = random.choices(self.firsts, weights)[0]\n\n weights = self.w_middles if self.use_weights else None\n middle = random.choices(self.middles, weights)[0]\n\n weights = self.w_lasts if self.use_weights else None\n last = random.choices(self.lasts, weights)[0]\n\n print('{0} {1} {2}\\n{0} {2}'.format(first, middle, last))\n print('{0}{1}{2}'.format(first[0].upper(), middle[0].upper(), last[0].upper()))\n print()",
"def generate_random_name(filename):\n ext = filename.split('.')[-1]\n rns = [random.randint(0, len(LETTER_SET) - 1) for _ in range(3)]\n name = ''.join([LETTER_SET[rn] for rn in rns])\n return \"{new_fn}.{ext}\".format(new_fn=name, ext=ext)",
"def create_savename(self):\n \n savename = self.config.get('grid', 'dir') + self.fname.split('/')[-1]\n newsuffix = '_gridded_%ix%ix%i.nc' % (self.nx, self.ny, self.nz)\n savename = savename.replace('.nc', newsuffix)\n \n return savename",
"def change_world_name(file, name):\n with open(file, 'w') as f:\n world_name = name\n f.write(world_name)",
"def generateFilename(self, name):\n return self.context.generateUniqueId(type_name='Module')",
"async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()",
"def define_name(self):\n\n self._name = 'dwi-noddi'",
"def file(self):\n\n dlos_filename = super(DlosPhotoz, self).file()\n\n photoz_str = 'DLOS_photoz_'\n \n file_name = photoz_str.join( \n dlos_filename.split('DLOS_')\n ) \n\n return file_name",
"def generate_director_name():\n return movie_director_surnames[random.randint(0, len(movie_director_surnames) - 1)] + \" \" + movie_director_lastnames[random.randint(0, len(movie_director_lastnames) - 1)]",
"def name_generator():\n firsts = [\"Albrecht\", \"Lysa\", \"Yvette\", \"Jésus\", \"Amanitus\"]\n lasts = [\"Andersson\", \"Natt och Dag\", \"av Pommern\", \"Krusmynta\"]\n\n random.seed()\n first = firsts[random.randint(0, len(firsts)-1)]\n last = lasts[random.randint(0, len(lasts)-1)]\n\n name = first + \" \" + last\n return name",
"def _get_random_name(self, base_name):\n return base_name + '_' + self.__id_generator()",
"def init_data_file_name():\n now = datetime.datetime.now().isoformat().split('.')[0].replace(':', '-')\n filename = 'show-commands-' + now + \".txt\"\n return filename",
"def random_filename():\n\n return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))",
"def __newFileName(self):\n now = datetime.now()\n dateTimeAppend = now.strftime('%y%m%d_%H%M%S')\n self.__fileName = '{}/{}_{}.wav'.format(RECORDING,\n FILE_NAME_PREFIX, \n dateTimeAppend)",
"def create_random_file_name():\n\n def random_file_name_factory():\n length = random.randint(10, 15)\n chars = string.ascii_letters + string.digits + \"-_\"\n return f\"{''.join(random.choice(chars) for _ in range(length))}.jpg\"\n\n return random_file_name_factory",
"def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp",
"def generate(self, name):\n return ''"
]
| [
"0.60667473",
"0.60138476",
"0.59846383",
"0.59264207",
"0.5895491",
"0.5847575",
"0.5836146",
"0.58264697",
"0.58220315",
"0.57896686",
"0.5780307",
"0.57518154",
"0.57518154",
"0.57246995",
"0.57121605",
"0.5672118",
"0.5657772",
"0.56308764",
"0.56201655",
"0.56112003",
"0.5607141",
"0.56001365",
"0.55944896",
"0.5580524",
"0.55639994",
"0.5542674",
"0.5534426",
"0.55163",
"0.5510888",
"0.5504352"
]
| 0.64361393 | 0 |
Load direction(s) from the direction file. | def load_directions(dir_file):
f = h5py.File(dir_file, 'r')
if 'ydirection' in f.keys(): # If this is a 2D plot
xdirection = h5_util.read_list(f, 'xdirection')
ydirection = h5_util.read_list(f, 'ydirection')
directions = [xdirection, ydirection]
else:
directions = [h5_util.read_list(f, 'xdirection')]
return directions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_directions(dir_file):\n\n xdirection = h5_util.read_list(f, 'xdirection')\n ydirection = h5_util.read_list(f, 'ydirection')\n directions = [xdirection, ydirection]\n\n return directions",
"def load_full(self):\n\t\tfor filename in self.FILENAMES:\n\t\t\tself.load(filename)\n\t\tself.reverse_dicts()",
"def _load(self, directory):\n pass",
"def fetch_direction(file_list, column_name):\r\n start_time_aggrigating_vector_components = time.time()\r\n res_dir_x_list, res_dir_y_list, mag, direction = draw_dirs2(file_list, column_name)\r\n all_in_x = np.zeros_like(mag[0])\r\n all_in_y = np.zeros_like(mag[0])\r\n all_mag = np.zeros_like(mag[0])\r\n for i in range(len(res_dir_x_list)):\r\n all_in_x = np.add(all_in_x, res_dir_x_list[i])\r\n all_in_y = np.add(all_in_y, res_dir_y_list[i])\r\n res_x = all_in_x.ravel()\r\n res_y = all_in_y.ravel()\r\n data = pd.read_csv(file_list[0])\r\n data['res_x'] = res_x\r\n data['res_y'] = res_y\r\n print(\"For aggregating vector components %s seconds\" % (time.time() - start_time_aggrigating_vector_components))\r\n return data",
"def setup_direction(args, dir_file, net):\n print('-------------------------------------------------------------------')\n print('setup_direction')\n print('-------------------------------------------------------------------')\n \n # Setup env for preventing lock on h5py file for newer h5py versions\n os.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n \n # Skip if the direction file already exists\n if exists(dir_file):\n if args.no_resume:\n os.remove(dir_file)\n else: \n f = h5py.File(dir_file, 'r')\n if (args.y and 'ydirection' in f.keys()) or 'xdirection' in f.keys():\n f.close()\n print (\"%s is already setted up\" % dir_file)\n return\n f.close()\n\n # Create the plotting directions\n f = h5py.File(dir_file,'w') # create file, fail if exists\n if not args.dir_file:\n print(\"Setting up the plotting directions...\")\n if args.model_file2:\n net2 = model_loader.load(args.dataset, args.model, args.model_file2)\n xdirection = create_target_direction(net, net2, args.dir_type)\n else:\n xdirection = create_random_direction(net, args.dir_type, args.xignore, args.xnorm)\n h5_util.write_list(f, 'xdirection', xdirection)\n\n if args.y:\n if args.same_dir:\n ydirection = xdirection\n elif args.model_file3:\n net3 = model_loader.load(args.dataset, args.model, args.model_file3)\n ydirection = create_target_direction(net, net3, args.dir_type)\n else:\n ydirection = create_random_direction(net, args.dir_type, args.yignore, args.ynorm)\n h5_util.write_list(f, 'ydirection', ydirection)\n\n f.close()\n print (\"direction file created: %s\" % dir_file)",
"def load(self, path):\n pass",
"def load(self, path):\n pass",
"def load_dis_data(self, filename):\n logger.info('load data')\n self.distance, self.data_size = {}, 1\n for line in open(path + filename, 'r'):\n x1, x2, d = line.strip().split(' ')\n x1, x2, d = int(x1), int(x2), float(d)\n self.data_size = max(x2 + 1, self.data_size)\n self.max_dis = max(self.max_dis, d)\n self.min_dis = min(self.min_dis, d)\n self.distance[(x1, x2)] = d\n self.master = np.zeros(self.data_size, dtype=int)\n logger.info('load accomplish')",
"def load_gloves(self, dir):\n self.word2vec = {}\n glove_file = os.path.join(dir, 'glove.6B.'+str(self.dim_embed)+'d.txt')\n with open(glove_file, encoding=\"utf8\") as f:\n for line in f:\n l = line.split()\n self.word2vec[l[0]] = [float(x) for x in l[1:]]\n self.word2vec[\"<RARE>\"] = [0. for i in range(self.dim_embed)]\n self.word2vec[\"<EMPTY>\"] = [0. for i in range(self.dim_embed)]",
"def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()",
"def loadWaypoints(self, fname=\"gauze_pts.p\"):\n\n\t\tlst = []\n\t\tf3 = open(fname, \"rb\")\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tpos2 = pickle.load(f3)\n\t\t\t\tlst.append(pos2)\n\t\t\texcept EOFError:\n\t\t\t\tf3.close()\n\t\t\t\tbreak\n\n\t\tpts = np.matrix(lst)\n\t\tpts = self.interpolation(pts, self.interpolation_factor)\n\t\tself.waypoints = pts",
"def load(path):\n pass",
"def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]",
"def load(self):\r\n self.read(self.filename)",
"def load(self, dirname=None):\n self.genio.load(dirname)\n logging.info(f'Loaded word vectorizations at {dirname}')",
"def load_from_file(self):\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x * size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x * size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x * size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x * size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x * size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x * size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x * size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x * size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x * size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x * size_sprite))\n # -tc- Le placement aléatoire des objets se fait bien une seule fois,\n # -tc- je ne vois pas de soucis ici\n self.objects_to_find = sample(self.paths, 3)\n # -tc- Ne pas utiliser print pour débugger mais un debugger\n print(self.paths)\n\n # -tc- return inutile et pas utilisé. Ce n'est pas comme cela qu'on procède pour retourner \n # -tc- plusieurs valeurs.\n return self.paths and self.wall0 and self.wall1 and self.wall2 and self.wall3 and self.wall4 and self.wall5 and self.wall6 and self.wall7 and self.wall8 and self.wall9 and self.objects_to_find and self.start and self.end",
"def load_d(prefix):\n vel_x = np.genfromtxt(file('%s_x.csv' % prefix), delimiter=',')\n vel_y = np.genfromtxt(file('%s_y.csv' % prefix), delimiter=',')\n\n # make a 3d height x width x 2 matrix to hold the vectors\n vel = np.zeros(list(vel_x.shape) + [2])\n vel[:, :, 0] = vel_y # note, this y here is correct--and it's important it be this order\n vel[:, :, 1] = vel_x\n return vel",
"def load_data(self):\n\t\ti = 0\n\n\t\tpaths = glob.glob(self.file_path+'/rollout_*')\n\t\tself.rollouts = []\n\n\n\t\tfor path in paths:\n\t\t\tdata_point = np.load(path,encoding='latin1')\n\t\t\tself.rollouts.append(data_point)\n\n\t\treturn paths",
"def __read_pond_file(self, pondfile):\r\n self.currents = []\r\n with open(pondfile, 'r') as infile:\r\n reader = csv.reader(infile)\r\n start_end = [int(v) for v in next(reader)]\r\n self.start_state = tuple(start_end[:2])\r\n self.end_state = tuple(start_end[2:])\r\n for row in reader:\r\n self.currents.append(row)\r\n self.currents = self.currents[::-1]",
"def loader(self):\n\n with open(self.source, 'rb') as labels_file:\n self.distance_map = pd.read_pickle(labels_file)\n\n return self.distance_map",
"def _load_road(self) -> road_module.Road:\n seed = str(\n self.param.seed\n if self.param.seed != \"__no_value__\"\n else self.param.default_seed\n )\n\n return road_module.load(self.param.road, seed=seed)",
"def Load_File(file_list_pos):\n\t#Check size of input file\n\tfile_info=os.stat(file_list_pos)\n\tif file_info.st_size >= 250000000:\n\t\tprint \"\\n***WARNING: FILE SIZE EXCEEDS 250 MB***\" \n\t\tprint \"CONSIDER USING --rnd_sample TO SPEED UP PROCESSING\"\n\t\n\t#Opens the binned ld file\n\tldFile = open(file_list_pos)\n\t#Loads the distance between the pairs as well as the different linkage statistics into numpy nd arrays\n\tBPDist,r2Pear,D,DPrime,r2GLS=np.loadtxt(ldFile, usecols=(2,3,4,5,6), unpack=True)\n\n\t#Sets the x to the distance between pairs and the response data to the r^2 value (can also change data to D, DPrime, and r2GLS)\n\tx=BPDist\n\t\n\t#Choose data type matching specified option\n\tif args.data_type == 'r2Pear':\n\t\tdata=r2Pear\n\telif args.data_type == 'D':\n\t\tdata=D\n\telif args.data_type == 'DPrime':\n\t\tdata=DPrime\n\telif args.data_type == 'r2GLS':\n\t\tdata=r2GLS\n\tAxis_Data=[x,data]\n\treturn Axis_Data",
"def load_rooms(self, filename):\n with open(filename, \"r\") as f:\n roomss = []\n for line in f:\n line = line.strip()\n\n # Add id, name and description to each room object\n if line.isdigit():\n id = line\n line = f.readline()\n line = line.strip()\n name = line\n line = f.readline()\n line = line.strip()\n description = line\n room = Room(id, name, description)\n roomss.append(room)\n\n # Add the connected routes to the room\n elif line.isupper():\n line = line.split()\n direction = line[0]\n room_number = line[1]\n\n # Add multiple routes to a direction if needed\n if not direction in roomss[-1].connection:\n roomss[-1].connection[direction] = [room_number]\n else:\n roomss[-1].connection[direction].append(room_number)\n return roomss",
"def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))",
"def load(self):\n for i in range(8):\n image = \"pacman_\" + str(i) + \".png\"\n\n if i in range(2):\n self.animated_right.append(pygame.image.load(os.path.join(self.dir, image)))\n if i in range(2, 4):\n self.animated_up.append(pygame.image.load(os.path.join(self.dir, image)))\n if i in range(4, 6):\n self.animated_left.append(pygame.image.load(os.path.join(self.dir, image)))\n if i in range(6, 8):\n self.animated_down.append(pygame.image.load(os.path.join(self.dir, image)))\n\n # Initialize position on the screen\n self.rect = self.animated_right[0].get_rect()\n self.rect.x, self.rect.y = self.x, self.y\n self.image = self.animated_right[0]",
"def load(self, path: str):\n pass",
"def load(self):\n super(YacoFile, self).load(self._filename)",
"def read_relations(db, openfile):\n pass",
"def loadWorld(self, filename):\n worldFile = open(filename, 'r');\n\n for line in worldFile:\n info = line.split(' ');\n if(info[0]==\"WIDTH\"):\n self.mWidth = int(info[1]);\n elif info[0] == \"HEIGHT\":\n self.mHeight = int(info[1]);\n elif info[0] == \"SPACE\":\n if info[1] == \"rect\":\n self.mSpaces += [Rect( int(info[2]), int(info[3]),int(info[4]),int(info[5]) )];\n elif info[1] == \"circle\":\n self.mSpaces += [Rect( int(info[2]), int(info[3]),int(info[4]) )];\n elif info[0] == \"OBSTACLE\":\n if info[1] == \"rect\":\n self.mObstMgr.addObstacle( Rect( int(info[2]), int(info[3]),int(info[4]),int(info[5]) ));\n pass\n elif info[1] == \"circle\":\n self.mObstMgr.addObstacle( Circle( int(info[2]), int(info[3]),int(info[4]) ));\n pass\n pass",
"def load(self):\n file = os.path.join(\"./data\", self.name + \".map\")\n with open(file) as fp:\n lines = fp.readlines()\n self.row, self.col = map(int, lines[0].split())\n self.default = int(lines[1]) # デフォルト値\n for line in lines[2:]: # マップデータを読み込む\n line = line.rstrip() # 改行除去\n self.map.append([int(x) for x in list(line)])"
]
| [
"0.73147833",
"0.5536552",
"0.55358",
"0.53545064",
"0.5216563",
"0.51911306",
"0.51911306",
"0.5178756",
"0.51715374",
"0.51498336",
"0.5132414",
"0.51317346",
"0.5125808",
"0.5114883",
"0.51074964",
"0.5095933",
"0.5092058",
"0.50822014",
"0.5078533",
"0.50670224",
"0.50472164",
"0.5045303",
"0.5005688",
"0.5002841",
"0.50013965",
"0.50004065",
"0.49972215",
"0.49960446",
"0.49920675",
"0.49860582"
]
| 0.67186105 | 1 |
Simulates the game compute_func takes a board and a list of legal moves s and returns a move | def start(compute_func):
board = gen_board()
turn = 1
while True:
print("\nTurn:", turn)
print_board(board)
lm = legal_moves(board)
print("Legal: ", *[DIRECTIONS[i] for i in lm])
chosen_move = compute_func(board,lm)
print("Chosen: " + DIRECTIONS[chosen_move])
board, res = perform_turn(board, chosen_move)
turn += 1
if res==TURN_ILLEGAL:
print("ILLEGAL MOVE! Press enter.")
turn -= 1
input()
elif res==TURN_GAME_OVER:
print("\nTurn:", turn)
print_board(board)
print("Whoops. Dead :)")
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def computer_move(board,move,player):\r\n com_execution(board, move, player)",
"def mm_move(board, player):\n moves = []\n results = []\n best_score = None\n best_move = None\n \n opponet = op_player(player)\n \n if board.check_win() != None:\n \n if board.check_win() == provided.PLAYERX:\n return SCORES[provided.PLAYERX] , (-1, -1)\n \n if board.check_win() == provided.PLAYERO:\n return SCORES[provided.PLAYERO] , (-1, -1)\n \n if board.check_win() == provided.DRAW:\n return SCORES[provided.DRAW] , (-1, -1)\n \n free_steps = board.get_empty_squares()\n \n for step in free_steps:\n clone = board.clone() \n clone.move(step[0],step[1],player)\n temp = mm_move(clone,opponet)\n \n if temp != None:\n if temp[0] == SCORES[player]: \n return temp[0] , step \n else: \n results.append(temp)\n moves.append(step)\n \n for result, move in zip(results, moves): \n if result[0] * SCORES[player] > best_score:\n best_score = result[0]\n best_move = move\n return best_score, best_move",
"def legal_moves(self, player, board):\r\n #go through the whole board and check whether the piece is on the board or not\r\n #num/row size - num%col == num2/row size - num@%col\r\n #num/row size + num%col\r\n moves = list()\r\n opp = self.opponent(player)\r\n #print(board)\r\n for i in self.squares():\r\n if board[i] == core.EMPTY:\r\n for d in core.DIRECTIONS:\r\n endPt = self.find_bracket(i, player, board, d)\r\n if endPt!= None:\r\n moves.append(i)\r\n break\r\n\r\n return moves",
"def execution(move,legal,board,player):\r\n \r\n if player == 1:\r\n if move in legal:\r\n for i in range(0,len(board.white)):\r\n if board.white[i] == move[0]:\r\n board.white[i] = move[1]\r\n if len(move) == 3:\r\n board.black.remove(move[-1])\r\n\r\n else:\r\n print(\"Illegal move, please input a legal move\")\r\n human_move(board,player)\r\n else:\r\n if move in legal:\r\n if len(move) == 3:\r\n board.white.remove(move[-1])\r\n for i in range(0,len(board.black)):\r\n if board.black[i] == move[0]:\r\n board.black[i] = move[1]\r\n else:\r\n print(\"Illegal move, please input a legal move\")\r\n human_move(board,player)",
"def get_possible_moves(board):\n\n possible_moves = []\n\n ret_tuple_left = move_left(board)\n ret_tuple_right = move_right(board)\n ret_tuple_up = move_up(board)\n ret_tuple_down = move_down(board)\n\n if ret_tuple_left[0]:\n possible_moves.append(ret_tuple_left[1])\n if ret_tuple_right[0]:\n possible_moves.append(ret_tuple_right[1])\n if ret_tuple_up[0]:\n possible_moves.append(ret_tuple_up[1])\n if ret_tuple_down[0]:\n possible_moves.append(ret_tuple_down[1])\n\n return possible_moves",
"def result(board, action):\n # Ensure manipulations of hypothetical board don't alter current board values\n possible_board = copy.deepcopy(board)\n current_player = player(possible_board)\n\n # Generate boards for all possible moves by current player\n if action in actions(possible_board):\n possible_board[action[0]][action[1]] = current_player\n return possible_board\n\n raise Exception(\"Invalid move.\")",
"def execute_move(move, board):\n\n if move == UP:\n return game.merge_up(board)\n elif move == DOWN:\n return game.merge_down(board)\n elif move == LEFT:\n return game.merge_left(board)\n elif move == RIGHT:\n return game.merge_right(board)\n else:\n sys.exit(\"No valid move\")",
"def execute_move(move, board):\n\n if move == UP:\n return game.merge_up(board)\n elif move == DOWN:\n return game.merge_down(board)\n elif move == LEFT:\n return game.merge_left(board)\n elif move == RIGHT:\n return game.merge_right(board)\n else:\n sys.exit(\"No valid move\")",
"def execute_move(move, board):\n\n if move == UP:\n return game.merge_up(board)\n elif move == DOWN:\n return game.merge_down(board)\n elif move == LEFT:\n return game.merge_left(board)\n elif move == RIGHT:\n return game.merge_right(board)\n else:\n sys.exit(\"No valid move\")",
"def mc_move(board, player, trials):\n grid_of_scores = [[0 for dummy_i in range(board.get_dim())] for dummy_j in range(board.get_dim())]\n test_board = board.clone()\n for dummy_i in range(trials):\n mc_trial(test_board, player)\n mc_update_scores(grid_of_scores, test_board, player)\n test_board = board.clone()\n best_move = get_best_move(board, grid_of_scores) \n return best_move",
"def move(self, board, tracks_left, state): # pylint: disable=W0613\n\n #After, look at all the possible moves, and compute their impact on the total by\n # aggregating eval_move (which doesn't modify state). Return the best one.\n possible_moves = list(state.fast_get_moves(self.hub, tracks_left))\n values = []\n for move in possible_moves:\n value = []\n tempdistances = copy.deepcopy(state.distances_left)\n tempdistances = state.eval_move(self.num, move, tempdistances)\n for i in range(0, len(self.cities)):\n total = tempdistances[self.num][self.cities[i]]\n value.append(total)\n values.append(value)\n best_move = 0\n for i in range(0, len(possible_moves)):\n if sum(values[i]) < sum(values[best_move]):\n best_move = i\n return possible_moves[best_move]",
"async def move(self, board, valid_actions):\n self._move = None\n output_move_row = Value('d', -1)\n output_move_column = Value('d', 0)\n try:\n # await self.search(board, valid_actions) \n p = Process(\n target=self.search, \n args=(\n self._color, board, valid_actions, \n output_move_row, output_move_column))\n p.start()\n while p.is_alive():\n await asyncio.sleep(0.1)\n self._move = np.array([output_move_row.value,output_move_column.value],dtype=np.int32)\n except asyncio.CancelledError as e:\n print('The previous player is interrupted by a user or a timer.')\n except Exception as e:\n print(type(e).__name__)\n print('move() Traceback (most recent call last): ')\n traceback.print_tb(e.__traceback__)\n finally:\n p.kill()\n self._move = np.array(\n [output_move_row.value, output_move_column.value],\n dtype=np.int32)\n return self.best_move",
"def evaluate(self, board):",
"def action(self, board, vals):\n\t\t# Get all the moves\n\t\tall_moves = get_all_moves(board, vals, self.which)\n\n\t\tif (len(all_moves) == 0):\n\t\t\treturn []\n\n\t\t# Get all the boards for those moves\n\t\tboard = np.copy(board)\n\t\tboards = np.zeros([len(all_moves), 28])\n\t\ti = 0\n\t\tfor move in all_moves:\n\t\t\tboards[i] = update_by_moves(board, move)\n\n\t\t# Get the predicted probabilities for each board\n\t\tprobs = self.model(boards)\n\n\t\t# Take the move with argmax board probs\n\t\tbest_move = all_moves[np.argmax(probs)]\n\n\t\treturn best_move",
"def _policy(self, gameboard):\r\n valid_moves = self._all_valid_moves(gameboard)\r\n _reflex_ = Reflex(self.color)\r\n best_move = None\r\n moves = []\r\n \r\n # step 1, check going to win\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_going_to_win(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n\r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n \r\n # step 2, check opponent 4\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_._alter_check_opponent_4(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n \r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 3, check opponent 3\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_opponent_3(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n \r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 4, winning blocks\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n temp = _reflex_.check_winning_blocks(position, gameboard)\r\n if len(temp) != 0:\r\n moves += temp\r\n\r\n if len(moves) > 0:\r\n moves = list(set(moves))\r\n moves.sort(key=lambda x: x[2], reverse=True)\r\n max_count = moves[0][2]\r\n new_moves = []\r\n\r\n for t in moves:\r\n if t[2] < max_count:\r\n break\r\n else:\r\n new_moves.append((t[0], t[1]))\r\n\r\n moves = new_moves.copy()\r\n\r\n if len(moves) > 0:\r\n idx = np.random.choice(len(moves), 1)[0]\r\n best_move = moves[idx]\r\n return best_move\r\n\r\n # step 5, random pick one\r\n idx = np.random.choice(len(valid_moves), 1)[0]\r\n return valid_moves[idx]",
"def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board",
"def get_move(self, board, possible_moves, player_1_or_2):\n\n # Given a Tic-Tac-Toe 3x3 board position where 1 => current player's square,\n # -1 => opponent's square, 0 => blank square,\n # this will return the current player's best move [as the x and y indexes into \n # the board array.]\n # The second input parameter, player_1_or_2, is 1 or -1 to indicate which player's\n # move it is. \n \n print('RL ~ Current player 1 or 2 (= -1):', player_1_or_2)\n \n print('RL ~ Current board: ')\n print(board)\n \n print('RL ~ possible_moves:', possible_moves)\n\n next_move = () \n\n # This will be the best move i.e. the move with the current\n # value of highest winning probability except when it is making exploratory\n # (as opposed to greedy) moves.\n\n next_move = self.board_position_states.get_next_move(board, possible_moves, self.current_player)\n\n next_move_location_tuple = possible_moves[next_move]\n board[next_move_location_tuple] = self.current_player\n\n self.list_board_positions_moved_to.append(board.copy()) # This board that we are\n # appending here could be changed by the next line of code, for example.\n # Hence we need to make a copy\n\n board[next_move_location_tuple] = 0 # undo the move in case it affects the calling method.\n\n return next_move",
"def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves",
"def result(board, action):\n\n # Create completely new board\n temp_board = copy.deepcopy(board)\n # Location of move to be made\n row_index = action[0]\n col_index = action[1]\n\n # Check for valid action\n if not 0 <= row_index <= 2 or not 0 <= col_index <= 2:\n raise Exception(\"Invalid Action\")\n\n # Make move and update board\n if board[row_index][col_index] is None:\n temp_board[row_index][col_index] = player(board)\n else:\n raise Exception(\"Invalid Action\")\n\n return temp_board",
"def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves",
"def get_next_moves(board, player):\r\n\r\n if player == 'hare':\r\n moves = []\r\n next_moves = []\r\n\r\n (row_from, col_from) = get_hare_positions(board)\r\n moves = possible_moves_list(row_from, col_from)\r\n\r\n for move in moves:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_from, col_from, row_to, col_to):\r\n \"\"\" if move is allowed then add to list of next moves\"\"\"\r\n next_moves.append(move)\r\n\r\n return next_moves\r\n\r\n else:\r\n \"\"\" for individual hounds\r\n get next moves\"\"\"\r\n moves = []\r\n next_moves_hound1 = []\r\n next_moves_hound2 = []\r\n next_moves_hound3 = []\r\n\r\n (row_hound_1, col_hound_1), (row_hound_2, col_hound_2), (row_hound_3, col_hound_3) = get_hound_positions(board)\r\n moves_hound1 = possible_moves_list(row_hound_1, col_hound_1)\r\n moves_hound2 = possible_moves_list(row_hound_2, col_hound_2)\r\n moves_hound3 = possible_moves_list(row_hound_3, col_hound_3)\r\n\r\n for move in moves_hound1:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_1, col_hound_1, row_to, col_to):\r\n next_moves_hound1.append(move)\r\n\r\n for move in moves_hound2:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_2, col_hound_2, row_to, col_to):\r\n next_moves_hound2.append(move)\r\n\r\n for move in moves_hound3:\r\n row_to = move[0]\r\n col_to = move[1]\r\n\r\n if is_legal_move(player, row_hound_3, col_hound_3, row_to, col_to):\r\n next_moves_hound3.append(move)\r\n\r\n return (next_moves_hound1, next_moves_hound2, next_moves_hound3)",
"def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])",
"def mm_move(board, player): \r\n if board.check_win() != None:\r\n score = SCORES[board.check_win()]\r\n return score, (-1,-1)\r\n else:\r\n best_score = -2\r\n score_list = []\r\n move_list = []\r\n for each_cell in board.get_empty_squares():\r\n passboard = board.clone()\r\n passboard.move(each_cell[0], each_cell[1], player) \r\n other_player = provided.switch_player(player)\r\n nextmove = mm_move(passboard, other_player)\r\n score_list.append(nextmove[0])\r\n move_list.append(nextmove[1])\r\n if nextmove[0] == SCORES[player]:\r\n return nextmove[0], each_cell\r\n #print score_list\r\n #print move_list\r\n #print \"\"\r\n if player == provided.PLAYERX:\r\n best_score = max(score_list)\r\n else:\r\n best_score = min (score_list)\r\n best_move = move_list[score_list.index(best_score)]\r\n return best_score, best_move",
"def evaluate(self, board):\r\n\r\n self_moves = self.find_possible_moves(board, self.my_color)\r\n opponent_moves = self.find_possible_moves(board, self.opponent_color)\r\n\r\n mobility = 0 # Mobility captures Self's profit in amount of available moves\r\n disk_parity = 0 # Disk parity captures Self's profit in raw disk amount\r\n corners = 0 # Corners captures Self's profit in occupied corners\r\n corner_proximity = 0 # Corner proximity captures the risk of giving away a free corner\r\n stability = 0 # Stability captures Self's profit in unflippable disks\r\n\r\n # Calculating mobility heuristic\r\n self_immediate_mobility = len(self_moves)\r\n opponent_immediate_mobility = len(opponent_moves)\r\n\r\n if self_immediate_mobility + opponent_immediate_mobility != 0:\r\n mobility = 100 * (self_immediate_mobility - opponent_immediate_mobility) / (self_immediate_mobility + opponent_immediate_mobility)\r\n\r\n # Calculate disk parity heuristic\r\n self_disks = self.get_disk_count(self.my_color, board)\r\n opponent_disks = self.get_disk_count(self.opponent_color, board)\r\n\r\n disk_parity = 100 * (self_disks - opponent_disks) / (self_disks + opponent_disks)\r\n\r\n # Calculating corner heuristic\r\n corners_list = [(0,0), (0,7), (7,0), (7,7)]\r\n self_corners = 0\r\n opponent_corners = 0\r\n\r\n for corner in corners_list:\r\n if board[corner[0]][corner[1]] == self.my_color:\r\n self_corners += 1\r\n if board[corner[0]][corner[1]] == self.opponent_color:\r\n opponent_corners += 1\r\n\r\n if self_corners + opponent_corners != 0:\r\n corners = 100 * (self_corners - opponent_corners) / (self_corners + opponent_corners)\r\n\r\n # Calculating corner proximity heuristic\r\n corners_proximity_list = [(0, 1), (1, 0), (1, 1), (0, 6), (1, 6), (1, 7), (6, 0), (6, 1), (7, 1), (6, 6), (7, 6), (6, 7)]\r\n self_corner_proximity = 0\r\n opponent_corner_proximity = 0\r\n\r\n for cell in corners_proximity_list:\r\n if board[cell[0]][cell[1]] == self.my_color:\r\n self_corner_proximity += 1\r\n if board[cell[0]][cell[1]] == self.opponent_color:\r\n opponent_corner_proximity += 1\r\n\r\n if self_corner_proximity + opponent_corner_proximity != 0:\r\n corner_proximity = 100 * (self_corner_proximity - opponent_corner_proximity) / (self_corner_proximity + opponent_corner_proximity)\r\n\r\n # Calculating stability heuristic\r\n self_stability = self.get_stable_disks(board, self.my_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.my_color, (7, 7))\r\n\r\n opponent_stability = self.get_stable_disks(board, self.opponent_color, (0, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (0, 7)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 0)) + \\\r\n self.get_stable_disks(board, self.opponent_color, (7, 7))\r\n\r\n if self_stability + opponent_stability != 0:\r\n stability = 100 * (self_stability - opponent_stability) / (self_stability + opponent_stability)\r\n\r\n # Calculating the final value\r\n disk_total = self.get_disk_count(self.my_color, board) + self.get_disk_count(self.opponent_color, board)\r\n\r\n # In early-game, focus on maximal mobility and stability. Avoid amassing too many disks.\r\n if disk_total < 15:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 30 * mobility + \\\r\n 30 * stability\r\n\r\n # In mid-game, focus on capturing corners and further building stability\r\n elif disk_total < 45:\r\n heuristic_value = 30 * corners - \\\r\n 15 * corner_proximity + \\\r\n 20 * mobility + \\\r\n 35 * stability\r\n\r\n # In late-game, focus on getting as many discs as possible\r\n else:\r\n heuristic_value = 30 * corners + \\\r\n 15 * mobility + \\\r\n 30 * stability + \\\r\n 35 * disk_parity\r\n\r\n return heuristic_value",
"def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()",
"def mc_move(board, player, trials):\n scored_board = [[0 for dummy_col in range(board.get_dim())] for dummy_row in range(board.get_dim())]\n\n for dummy_num in range(trials):\n clone_board = board.clone()\n mc_trial(clone_board, player)\n mc_update_scores(scored_board, clone_board, player)\n\n return get_best_move(board, scored_board)",
"def solve(self, board: List[List[str]]) -> None:",
"def mc_move(board, player, trials):\n scores = [[0 for dummy_i in range(board.get_dim())] \n for dummy_i in range(board.get_dim())]\n \n for dummy_i in range(trials):\n clone_board = board.clone()\n mc_trial(clone_board, player)\n mc_update_scores(scores, clone_board, player)\n \n return get_best_move(board, scores)",
"def mc_move(board, player, trials):\n dim = board.get_dim()\n \n temp_list = [0 for val in range(dim)] \n scores = [list(temp_list) for val in range(dim)]\n \n for trial in range(trials):\n new_board = board.clone()\n mc_trial(new_board, player)\n mc_update_scores(scores, new_board, player)\n\n return get_best_move(board, scores)",
"def get_available_moves(self, board):\n pass"
]
| [
"0.6738075",
"0.6715203",
"0.6689361",
"0.66600084",
"0.6643982",
"0.65430266",
"0.64409846",
"0.64409846",
"0.64409846",
"0.64378023",
"0.643162",
"0.6431567",
"0.63864464",
"0.63557446",
"0.6352269",
"0.6314513",
"0.6314211",
"0.6296794",
"0.629567",
"0.62893003",
"0.6285323",
"0.6277404",
"0.62588364",
"0.6246642",
"0.62403387",
"0.62222195",
"0.6221998",
"0.6216694",
"0.62099373",
"0.6203407"
]
| 0.6749187 | 0 |
Set an instance attribute indicating the device's pairing status | def _get_pairing_status(self):
try:
self.is_paired = is_paired(ignore_errors=False)
except BackendDown:
LOG.error('Cannot complete device updates due to backend issues.')
self.backend_down = True
if self.is_paired:
LOG.info('Device is paired') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_device_attributes_on_backend(self):\n if self.is_paired:\n LOG.info('Sending updated device attributes to the backend...')\n try:\n api = DeviceApi()\n api.update_version()\n except Exception:\n self._notify_backend_down()",
"def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)",
"def activate_pair(device_pair):\n command = 'pair_activate \"%s\"' % (device_pair.identifier,)\n _run_command(command)",
"def set_vpn_state(self, status):\n if hasattr(self, status):\n self.change_to(getattr(self, status))",
"def test_device_state_attributes(self):\n self.port.data = {\"v_rms\": 1.25, \"i_rms\": 2.75}\n assert {\"volts\": 1.2, \"amps\": 2.8} == self.switch.device_state_attributes",
"def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes",
"def update(self):\n bondState = self._bond.getDeviceState(self._deviceId)\n if 'power' in bondState:\n self._state = True if bondState['power'] == 1 else False\n if self._state and bondState['speed'] in self._speed_name_by_value:\n self._attributes['current_speed'] = self._speed_name_by_value[bondState['speed']]\n else:\n self._attributes['current_speed'] = SPEED_OFF\n\n if 'direction' in bondState:\n if bondState['direction'] == Directions.REVERSE:\n self._attributes['current_direction'] = \"reverse\"\n else:\n self._attributes['current_direction'] = \"forward\"",
"def _async_update_attrs(self) -> None:\n self._attr_is_on = self._device.light_on\n if self._device.light_brightness is not None:\n self._attr_brightness = int(min(255, self._device.light_brightness * 16))",
"def interpret_attributes(self, msg_data):\n struct = OrderedDict([('sequence', 8),\n ('short_addr', 16),\n ('endpoint', 8),\n ('cluster_id', 16),\n ('attribute_id', 16),\n ('attribute_status', 8),\n ('attribute_type', 8),\n ('attribute_size', 'len16'),\n ('attribute_data', 'raw'),\n ('end', 'rawend')])\n msg = self.decode_struct(struct, msg_data)\n device_addr = msg['short_addr']\n endpoint = msg['endpoint']\n cluster_id = msg['cluster_id']\n attribute_id = msg['attribute_id']\n attribute_size = msg['attribute_size']\n attribute_data = msg['attribute_data']\n self.set_device_property(device_addr, endpoint, ZGT_LAST_SEEN, strftime('%Y-%m-%d %H:%M:%S'))\n\n if msg['sequence'] == b'00':\n ZGT_LOG.debug(' - Sensor type announce (Start after pairing 1)')\n elif msg['sequence'] == b'01':\n ZGT_LOG.debug(' - Something announce (Start after pairing 2)')\n\n # Device type\n if cluster_id == b'0000':\n if attribute_id == b'0005':\n self.set_device_property(device_addr, endpoint, 'type', attribute_data.decode())\n ZGT_LOG.info(' * type : {}'.format(attribute_data))\n ## proprietary Xiaomi info including battery\n if attribute_id == b'ff01' and attribute_data != b'':\n struct = OrderedDict([('start', 16), ('battery', 16), ('end', 'rawend')])\n raw_info = unhexlify(self.decode_struct(struct, attribute_data)['battery'])\n battery_info = int(hexlify(raw_info[::-1]), 16)/1000\n self.set_device_property(device_addr, endpoint, 'battery', battery_info)\n ZGT_LOG.info(' * Battery info')\n ZGT_LOG.info(' * Value : {} V'.format(battery_info))\n # Button status\n elif cluster_id == b'0006':\n ZGT_LOG.info(' * General: On/Off')\n if attribute_id == b'0000':\n if hexlify(attribute_data) == b'00':\n self.set_device_property(device_addr, endpoint, ZGT_STATE, ZGT_STATE_ON)\n ZGT_LOG.info(' * Closed/Taken off/Press')\n else:\n self.set_device_property(device_addr, endpoint, ZGT_STATE, ZGT_STATE_OFF)\n ZGT_LOG.info(' * Open/Release button')\n elif attribute_id == b'8000':\n clicks = int(hexlify(attribute_data), 16)\n self.set_device_property(device_addr, endpoint, ZGT_STATE, ZGT_STATE_MULTI.format(clicks))\n ZGT_LOG.info(' * Multi click')\n ZGT_LOG.info(' * Pressed: {} times'.format(clicks))\n # Movement\n elif cluster_id == b'000c': # Unknown cluster id\n if attribute_id == b'ff05':\n if hexlify(attribute_data) == b'01f4':\n ZGT_LOG.info(' * Rotation horizontal')\n elif attribute_id == b'0055':\n ZGT_LOG.info(' * Rotated: %s°' % (unpack('!f', attribute_data)[0]))\n elif cluster_id == b'0012': # Unknown cluster id\n if attribute_id == b'0055':\n if hexlify(attribute_data) == b'0000':\n ZGT_LOG.info(' * Shaking')\n elif hexlify(attribute_data) in [b'0100', b'0101', b'0102', b'0103', b'0104', b'0105']:\n ZGT_LOG.info(' * Sliding')\n else:\n ZGT_LOG.info(' * Rotating vertical')\n if hexlify(attribute_data) in [b'0050', b'0042',\n b'0044', b'0060',\n b'0045', b'0068',\n b'0041', b'0048',\n\n b'0063', b'005c',\n b'0059', b'004b',\n b'005d', b'006b',\n b'005a', b'0053',\n\n b'004a', b'0051',\n b'0054', b'0062',\n b'0069', b'004d',\n b'006c', b'0065',]:\n ZGT_LOG.info(' * Rotated: 90°')\n if hexlify(attribute_data) in [b'0080', b'0083',\n b'0081', b'0084',\n b'0085', b'0082',]:\n ZGT_LOG.info(' * Rotated: 180°')\n # Illuminance Measurement\n elif cluster_id == b'0400':\n # MeasuredValue\n if attribute_id == b'0000':\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n self.set_device_property(device_addr, endpoint, ZGT_ILLUMINANCE_MEASUREMENT, illuminance)\n # MinMeasuredValue\n elif attribute_id == b'0001':\n if attribute_data == b'FFFF':\n ZGT_LOG.info('Minimum illuminance is unused.')\n else:\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n ZGT_LOG.info('Minimum illuminance is ', illuminance)\n # MaxMeasuredValue\n elif attribute_id == b'0002':\n if attribute_data == b'FFFF':\n ZGT_LOG.info('Maximum illuminance is unused.')\n else:\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n ZGT_LOG.info('Maximum illuminance is ', illuminance)\n # Tolerance\n elif attribute_id == b'0003':\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n ZGT_LOG.info('Illuminance tolerance is ', illuminance)\n # Sensor type\n elif attribute_id == b'0004':\n sensor_type = 'Unknown'\n if attribute_data == b'00':\n sensor_type = 'Photodiode'\n elif attribute_data == b'01':\n sensor_type = 'CMOS'\n elif b'02' <= attribute_data <= b'3F':\n sensor_type = 'Reserved'\n elif b'40' <= attribute_data <= b'FE':\n sensor_type = 'Reserved for manufacturer'\n ZGT_LOG.info('Sensor type is: ', sensor_type)\n # Temperature\n elif cluster_id == b'0402':\n temperature = int.from_bytes(attribute_data, 'big', signed=True) / 100\n #temperature = int(hexlify(attribute_data), 16) / 100\n self.set_device_property(device_addr, endpoint, ZGT_TEMPERATURE, temperature)\n ZGT_LOG.info(' * Measurement: Temperature'),\n ZGT_LOG.info(' * Value: {} °C'.format(temperature))\n # Atmospheric Pressure\n elif cluster_id == b'0403':\n ZGT_LOG.info(' * Atmospheric pressure')\n pressure = int(hexlify(attribute_data), 16)\n if attribute_id == b'0000':\n self.set_device_property(device_addr, endpoint, ZGT_PRESSURE, pressure)\n ZGT_LOG.info(' * Value: {} mb'.format(pressure))\n elif attribute_id == b'0010':\n self.set_device_property(device_addr, endpoint, ZGT_DETAILED_PRESSURE, pressure/10)\n ZGT_LOG.info(' * Value: {} mb'.format(pressure/10))\n elif attribute_id == b'0014':\n ZGT_LOG.info(' * Value unknown')\n # Humidity\n elif cluster_id == b'0405':\n humidity = int(hexlify(attribute_data), 16) / 100\n self.set_device_property(device_addr, endpoint, ZGT_HUMIDITY, humidity)\n ZGT_LOG.info(' * Measurement: Humidity')\n ZGT_LOG.info(' * Value: {} %'.format(humidity))\n # Presence Detection\n elif cluster_id == b'0406':\n # Only sent when movement is detected\n if hexlify(attribute_data) == b'01':\n self.set_device_property(device_addr, endpoint, ZGT_EVENT, ZGT_EVENT_PRESENCE)\n ZGT_LOG.debug(' * Presence detection')\n\n ZGT_LOG.info(' FROM ADDRESS : {}'.format(msg['short_addr']))\n ZGT_LOG.debug(' - Source EndPoint : {}'.format(msg['endpoint']))\n ZGT_LOG.debug(' - Cluster ID : {}'.format(msg['cluster_id']))\n ZGT_LOG.debug(' - Attribute ID : {}'.format(msg['attribute_id']))\n ZGT_LOG.debug(' - Attribute type : {}'.format(msg['attribute_type']))\n ZGT_LOG.debug(' - Attribute size : {}'.format(msg['attribute_size']))\n ZGT_LOG.debug(' - Attribute data : {}'.format(hexlify(msg['attribute_data'])))",
"async def set_bit(self, instance, value):\n print(f\"Server: {'set_bit'} Got 'put' request from outside: new value is {value} and type {type(value)}\")\n if self.device is not None:\n self.device.set_bit_server(value)\n else:\n print('device is None')",
"async def _set_watch_pair(self, pair: str):\n\n if pair not in self.market.pairs:\n if pair in self.market.extra_base_pairs:\n self.market.extra_base_pairs.remove(pair)\n\n self.market.pairs.append(pair)\n self.watch_only_pairs.append(pair)\n self.log.info('Setting watch-only pair {}.', pair, stack_depth=1)",
"async def async_update(self) -> None:\n try:\n status = await self._device.command(\"status_102_0\")\n except pyaehw4a1.exceptions.ConnectionError as library_error:\n _LOGGER.warning(\n \"Unexpected error of %s: %s\", self._unique_id, library_error\n )\n self._attr_available = False\n return\n\n self._attr_available = True\n\n self._on = status[\"run_status\"]\n\n if status[\"temperature_Fahrenheit\"] == \"0\":\n self._attr_temperature_unit = UnitOfTemperature.CELSIUS\n else:\n self._attr_temperature_unit = UnitOfTemperature.FAHRENHEIT\n\n self._current_temperature = int(status[\"indoor_temperature_status\"], 2)\n\n if self._on == \"1\":\n device_mode = status[\"mode_status\"]\n self._attr_hvac_mode = AC_TO_HA_STATE[device_mode]\n\n fan_mode = status[\"wind_status\"]\n self._fan_mode = AC_TO_HA_FAN_MODES[fan_mode]\n\n swing_mode = f'{status[\"up_down\"]}{status[\"left_right\"]}'\n self._swing_mode = AC_TO_HA_SWING[swing_mode]\n\n if self._attr_hvac_mode in (HVACMode.COOL, HVACMode.HEAT):\n self._target_temperature = int(status[\"indoor_temperature_setting\"], 2)\n else:\n self._target_temperature = None\n\n if status[\"efficient\"] == \"1\":\n self._preset_mode = PRESET_BOOST\n elif status[\"low_electricity\"] == \"1\":\n self._preset_mode = PRESET_ECO\n elif status[\"sleep_status\"] == \"0000001\":\n self._preset_mode = PRESET_SLEEP\n elif status[\"sleep_status\"] == \"0000010\":\n self._preset_mode = \"sleep_2\"\n elif status[\"sleep_status\"] == \"0000011\":\n self._preset_mode = \"sleep_3\"\n elif status[\"sleep_status\"] == \"0000100\":\n self._preset_mode = \"sleep_4\"\n else:\n self._preset_mode = PRESET_NONE\n else:\n self._attr_hvac_mode = HVACMode.OFF\n self._fan_mode = None\n self._swing_mode = None\n self._target_temperature = None\n self._preset_mode = None",
"def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n description: ProtectSwitchEntityDescription,\n ) -> None:\n super().__init__(data, device, description)\n\n if self.device.is_privacy_on:\n extra_state = self.extra_state_attributes or {}\n self._previous_mic_level = extra_state.get(ATTR_PREV_MIC, 100)\n self._previous_record_mode = extra_state.get(\n ATTR_PREV_RECORD, RecordingMode.ALWAYS\n )\n else:\n self._previous_mic_level = self.device.mic_volume\n self._previous_record_mode = self.device.recording_settings.mode",
"def update(self) -> None:\n self._status = self._get_status()\n if self._device_info is None:\n self._device_info = self._get_device_info()",
"def affection_status_switch_on(self):\n self._affection_status_switch = True",
"def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True",
"def __init__(self, device_pair_identifier, device_pair_info):\n\n self.raw_info = device_pair_info\n self.identifier = device_pair_identifier\n self.watch_udid = device_pair_info[\"watch\"][\"udid\"]\n self.phone_udid = device_pair_info[\"phone\"][\"udid\"]",
"def set_pair(self, pair: Pair):\n if pair != self.pair:\n self.pair = pair\n self.load_candles()",
"def __setattr__(self, attr, value):\n\t\treturn setattr(self.__instance, attr, value)",
"def __init__(\n self,\n unique_id: str,\n coordinator: RoborockDataUpdateCoordinator,\n entity_description: RoborockSwitchDescription,\n initial_value: bool,\n ) -> None:\n self.entity_description = entity_description\n super().__init__(unique_id, coordinator.device_info, coordinator.api)\n self._attr_is_on = initial_value",
"def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")",
"def device_state_attributes(self): # Can be remove from 0.99\n return self._attr",
"def check_device_state(self):",
"def update(self) -> None:\n self._state = b'\\x00' == self._device.readCharacteristic(self._handle)\n print(\"state\", self._state)",
"def device_state_attributes(self):\n return self._hass.data[DATA_UPCOMING]",
"def _ensure_device_is_paired(self):\n if not self.is_paired and not self.backend_down:\n LOG.info('Device not paired, invoking the pairing skill')\n payload = dict(utterances=[\"pair my device\"], lang=\"en-us\")\n self.bus.emit(Message(\"recognizer_loop:utterance\", payload))",
"def __setattr__(self, attr, value):\r\n return setattr(self.__instance, attr, value)",
"def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n description: ProtectSwitchEntityDescription,\n ) -> None:\n super().__init__(data, device, description)\n self._attr_name = f\"{self.device.display_name} {self.entity_description.name}\"\n self._switch_type = self.entity_description.key",
"def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)",
"def __setattr__(self, attr, value):\n return setattr(self.__instance, attr, value)"
]
| [
"0.5751612",
"0.56873393",
"0.56726533",
"0.56146926",
"0.5601535",
"0.5590141",
"0.54604495",
"0.5445805",
"0.54274327",
"0.5426972",
"0.5362671",
"0.5336365",
"0.53292793",
"0.53007454",
"0.5300508",
"0.52943027",
"0.5283178",
"0.5253588",
"0.5251245",
"0.52454257",
"0.5233007",
"0.5219339",
"0.521667",
"0.5215683",
"0.52129066",
"0.52026415",
"0.51942664",
"0.51915133",
"0.5183085",
"0.5183085"
]
| 0.64097786 | 0 |
Force a sync of the local clock with the Network Time Protocol. The NTP sync is only forced on Raspberry Pi based devices. The assumption being that these devices are only running Mycroft services. We don't want to sync the time on a Linux desktop device, for example, because it could have a negative impact on other software running on that device. | def _update_system_clock(self):
if self.platform in RASPBERRY_PI_PLATFORMS:
LOG.info('Updating the system clock via NTP...')
if self.is_paired:
# Only display time sync message when paired because the prompt
# to go to home.mycroft.ai will be displayed by the pairing
# skill when pairing
self.enclosure.mouth_text(dialog.get("message_synching.clock"))
self.bus.wait_for_response(
Message('system.ntp.sync'),
'system.ntp.sync.complete',
15
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def updateRTCFromNTP(strip, start = False):\r\n wifi = connectToWifi(strip, start)\r\n try:\r\n ntptime.settime()\r\n except OSError:\r\n for i in range(0, 3):\r\n ledFlash(strip, LED_COLOR_RED, 0.5)\r\n print(\"Can not connect to NTP server\")\r\n machine.reset()\r\n localTime = getLocalTime() \r\n print(\"Local time after synchronization:%s\" %str(localTime))\r\n disconnectFromWifi(wifi)",
"def set_clock():\n import package\n package.install(\"ntpdate\")\n sudo(\"ntpdate 0.fi.pool.ntp.org 1.fi.pool.ntp.org 2.fi.pool.ntp.org\")",
"def _sync_clock(self, ignore_errors=False):\n try:\n utils.sync_clock(ignore_errors=ignore_errors)\n # Sync the system hardware clock from the software clock,\n # as they are independent and the HW clock can still drift\n # with long running ramdisks.\n utils.execute('hwclock', '-v', '--systohc')\n except (processutils.ProcessExecutionError,\n errors.CommandExecutionError) as e:\n msg = 'Failed to sync hardware clock: %s' % e\n LOG.error(msg)\n if CONF.fail_if_clock_not_set or not ignore_errors:\n raise errors.ClockSyncError(msg)",
"async def test_sync_time_local(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n broadcast_type=\"TIME\",\n )\n with patch(\"time.localtime\") as mock_time:\n mock_time.return_value = time.struct_time([2017, 1, 7, 9, 13, 14, 6, 0, 0])\n await self.datetime.sync()\n\n telegram = xknx.telegrams.get_nowait()\n assert telegram.destination_address == GroupAddress(\"1/2/3\")\n assert len(telegram.payload.value.value) == 3\n assert telegram.payload.value.value == (0xE9, 0x0D, 0x0E)",
"def ntp_time_updatetime(localport):\r\n\r\n try:\r\n ip = getmyip()\r\n except Exception, e:\r\n raise TimeError, str(e)\r\n\r\n timeservers = [\"time-a.nist.gov\", \"time-b.nist.gov\", \"time-a.timefreq.bldrdoc.gov\", \"time-b.timefreq.bldrdoc.gov\", \"time-c.timefreq.bldrdoc.gov\", \"utcnist.colorado.edu\", \"time.nist.gov\", \"time-nw.nist.gov\", \"nist1.symmetricom.com\", \"nist1-dc.WiTime.net\", \"nist1-ny.WiTime.net\", \"nist1-sj.WiTime.net\", \"nist1.aol-ca.symmetricom.com\", \"nist1.aol-va.symmetricom.com\", \"nist1.columbiacountyga.gov\", \"nist.expertsmi.com\", \"nist.netservicesgroup.com\"]\r\n\r\n listenhandle = recvmess(ip,localport, _time_decode_NTP_packet)\r\n mycontext['ntp_time_got_time'] = False\r\n\r\n # I'm going to get the time from up to 5 sources and then use the median\r\n mycontext['ntp_time_received_times'] = []\r\n\r\n # always close the handle before returning...\r\n try: \r\n # try five random servers times...\r\n for servername in random_sample(timeservers,5):\r\n\r\n # this sends a request, version 3 in \"client mode\"\r\n ntp_request_string = chr(27)+chr(0)*47\r\n try: \r\n sendmess(servername,123, ntp_request_string, ip, localport) # 123 is the NTP port\r\n except Exception:\r\n # most likely a lookup error...\r\n continue\r\n\r\n # wait for 5 seconds for a response before retrying\r\n for junkiterations in range(10):\r\n sleep(.5)\r\n\r\n if mycontext['ntp_time_got_time']:\r\n # If we've had a response, we sleep one second, choose the time,\r\n # and then quit\r\n sleep(1)\r\n\r\n # set the time...\r\n _time_choose_NTP_time_to_settime()\r\n\r\n # clean-up and return\r\n stopcomm(listenhandle)\r\n return\r\n \r\n \r\n finally:\r\n stopcomm(listenhandle)\r\n\r\n # Failure, tried servers without luck...\r\n raise TimeError, \"Time Server update failed. Perhaps retry later...\"",
"def do_sync(self):\n # Synch up the station's clock if it's been more than clock_check\n # seconds since the last check:\n now_ts = time.time()\n if now_ts - self.last_synch_ts >= self.clock_check:\n self.last_synch_ts = now_ts\n try:\n console_time = self.engine.console.getTime()\n if console_time is None: return\n # getTime can take a long time to run, so we use the current\n # system time\n diff = console_time - time.time()\n syslog.syslog(syslog.LOG_INFO, \n \"engine: Clock error is %.2f seconds (positive is fast)\" % diff)\n if abs(diff) > self.max_drift:\n try:\n self.engine.console.setTime()\n except NotImplementedError:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Station does not support setting the time\")\n except NotImplementedError:\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Station does not support reading the time\")",
"def service_syncTime(self, context, server=None):\n # TODO lock\n # if self.lock.can_acquire\n # default servers in /etc/ntp.conf will be used if not previously configured\n newtime = yield deferToThread(self.doSyncTime, server)\n\n # try/finally to sync secondary cluster item asap,\n # and generate the event later whatever happens.\n try:\n yield self.ha_time_sync()\n finally:\n pass\n # event = AuditEvent.fromNTPSync(context)\n # self.core.audit.emit(event)\n\n returnValue(newtime)",
"def sync_time(self, event=None):\n if self.collect: return\n time_obj= localtime()\n serial_time = strftime(\"t%Y,%m,%d,%H,%M,%S\", time_obj)\n print(serial_time)\n self.system_timestamp = f\"\\nSystem start time is: {serial_time}\"\n print(serial_time.encode(encoding=\"ascii\"))\n self.ser.write(serial_time.encode(encoding=\"ascii\"))",
"def test_execute_clock_sync(self):\n self.assert_enter_command_mode()\n\n self.assert_execute_resource(ProtocolEvent.CLOCK_SYNC)\n\n # get the time from the driver\n check_new_params = self.instrument_agent_client.get_resource([Parameter.CLOCK])\n # convert driver's time from formatted date/time string to seconds integer\n instrument_time = time.mktime(time.strptime(check_new_params.get(Parameter.CLOCK).lower(), \"%Y/%m/%d %H:%M:%S\"))\n\n # need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes\n # get time from local machine\n lt = time.strftime(\"%d %b %Y %H:%M:%S\", time.gmtime(time.mktime(time.localtime())))\n # convert local time from formatted date/time string to seconds integer to drop DST\n local_time = time.mktime(time.strptime(lt, \"%d %b %Y %H:%M:%S\"))\n\n # Now verify that the time matches to within 5 seconds\n self.assertLessEqual(abs(instrument_time - local_time), 5)",
"def set_ntp_time(a_offset = 0):\n import ntptime\n import machine\n import utime\n\n t = ntptime.time() + a_offset\n tm = utime.localtime(t)\n tm = tm[0:3] + (0,) + tm[3:6] + (0,)\n machine.RTC().datetime(tm)",
"def reset_sync_time(self):\n self._sync_timestamp = 0",
"def test_execute_clock_sync_autossample_mode(self):\n self.assert_initialize_driver(DriverProtocolState.AUTOSAMPLE)\n\n # command the instrument to sync clock.\n self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLOCK_SYNC)\n\n reply = self.driver_client.cmd_dvr('get_resource', Parameter.CLOCK)\n \n # convert driver's time from formatted date/time string to seconds integer\n instrument_time = time.mktime(time.strptime(reply.get(Parameter.CLOCK).lower(), \"%Y/%m/%d %H:%M:%S\"))\n\n # need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes\n # get time from local machine\n lt = time.strftime(\"%d %b %Y %H:%M:%S\", time.gmtime(time.mktime(time.localtime())))\n # convert local time from formatted date/time string to seconds integer to drop DST\n local_time = time.mktime(time.strptime(lt, \"%d %b %Y %H:%M:%S\"))\n\n # Now verify that the time matches to within 5 seconds\n self.assertLessEqual(abs(instrument_time - local_time), 5)",
"def get_ntp(self):\n return self.mycam.devicemgmt.GetNTP()",
"def SyncTimeWithFactoryServer():\n time_sanitizer = None\n if _HAS_PLUGIN_CONTROLLER:\n time_sanitizer = plugin_controller.GetPluginRPCProxy('time_sanitizer')\n if time_sanitizer is not None:\n time_sanitizer.SyncTimeWithFactoryServer(force=True)\n return True\n return False",
"def desired_ntp(task):\n\n config = replace_ntp(task)\n task.run(task=napalm_configure, configuration=config, replace=True)",
"def test_execute_clock_sync_command_mode(self):\n self.assert_initialize_driver()\n\n # command the instrument to sync clock.\n self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLOCK_SYNC)\n\n reply = self.driver_client.cmd_dvr('get_resource', Parameter.CLOCK)\n \n # convert driver's time from formatted date/time string to seconds integer\n instrument_time = time.mktime(time.strptime(reply.get(Parameter.CLOCK).lower(), \"%Y/%m/%d %H:%M:%S\"))\n\n # need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes\n # get time from local machine\n lt = time.strftime(\"%d %b %Y %H:%M:%S\", time.gmtime(time.mktime(time.localtime())))\n # convert local time from formatted date/time string to seconds integer to drop DST\n local_time = time.mktime(time.strptime(lt, \"%d %b %Y %H:%M:%S\"))\n\n # Now verify that the time matches to within 5 seconds\n self.assertLessEqual(abs(instrument_time - local_time), 5)",
"def system_to_ntp_time(date):\n return date + NTP_DELTA",
"def ntp_to_system_time(date):\n return date - NTP_DELTA",
"async def test_sync_time_custom(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n group_address_state=\"1/2/4\",\n broadcast_type=\"TIME\",\n localtime=False,\n )\n assert self.datetime.has_group_address(GroupAddress(\"1/2/4\"))\n await self.datetime.sync()\n\n telegram = xknx.telegrams.get_nowait()\n assert telegram.destination_address == GroupAddress(\"1/2/4\")\n assert isinstance(telegram.payload, GroupValueRead)",
"def on_CheckNodeSyncNTP_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def test_clock_external():\n clock = Clock(time=0.0)\n assert not clock.realtime\n assert clock.time == 0.0\n clock.update()\n assert clock.time == 0.0\n clock.update(time=0.1)\n assert clock.time == 0.1\n clock.update()\n assert clock.time == 0.1\n clock.update(time=0.0)\n assert clock.time == 0.0\n clock.reset()\n assert clock.realtime\n assert clock.time > 0",
"def SyncDate(dut=None):\n\n if not dut:\n dut = device_utils.CreateDUTInterface()\n\n if not dut.link.IsLocal():\n now = datetime.datetime.utcnow()\n # set DUT time\n dut.CheckCall(['date', '-u', '{:%m%d%H%M%Y.%S}'.format(now)], log=True)",
"def init_ntp_pck(num_of_digits_to_fill_up: int = 12) -> NTP:\n ntp = NTP()\n ntp.ref = ntp_time_now()\n ntp.sent = ntp_time_now()\n ntp.orig = ntp_time_now()\n ntp.recv = ntp_time_now()\n raw_ntp = RawNTP(ntp)\n\n f_ref = raw_ntp.reference_timestamp()\n f_trans = raw_ntp.transmit_timestamp()\n f_orig = raw_ntp.origin_timestamp()\n f_recv = raw_ntp.receive_timestamp()\n\n for i in range(num_of_digits_to_fill_up):\n pos = 64 - i\n f_ref = f_ref[:pos - 1] + str(random.randint(0, 1)) + f_ref[pos:]\n f_trans = f_trans[:pos - 1] + str(random.randint(0, 1)) + f_trans[pos:]\n f_orig = f_orig[:pos - 1] + str(random.randint(0, 1)) + f_orig[pos:]\n f_recv = f_recv[:pos - 1] + str(random.randint(0, 1)) + f_recv[pos:]\n\n assert len(f_ref) == 64\n assert len(f_trans) == 64\n assert len(f_orig) == 64\n assert len(f_recv) == 64\n\n raw_ntp.set_reference_timestamp(f_ref)\n raw_ntp.set_transmit_timestamp(f_trans)\n raw_ntp.set_origin_timestamp(f_orig)\n raw_ntp.set_receive_timestamp(f_recv)\n ntp = raw_ntp.ntp()\n return ntp",
"def init_ntp_client_pck(num_of_digits_to_fill_up: int = 12):\n ntp = NTP()\n ntp.sent = ntp_time_now()\n ntp.ref = 0\n ntp.orig = 0\n ntp.recv = 0\n raw_ntp = RawNTP(ntp)\n f_trans = raw_ntp.transmit_timestamp()\n\n for i in range(num_of_digits_to_fill_up):\n pos = 64 - i\n f_trans = f_trans[:pos - 1] + str(random.randint(0, 1)) + f_trans[pos:]\n\n assert len(f_trans) == 64\n\n raw_ntp.set_transmit_timestamp(f_trans)\n ntp = raw_ntp.ntp()\n return ntp",
"def _sync_clock(self, date_time_param, prompts, timeout, delay=1, time_format=\"%d %b %Y %H:%M:%S\"):\n prompt = self._wakeup(timeout=timeout, delay=delay)\n\n # lets clear out any past data so it doesnt confuse the command\n self._linebuf = ''\n self._promptbuf = ''\n\n log.debug(\"Set time format(%s) '%s''\", time_format, date_time_param)\n str_val = get_timestamp_delayed(time_format)\n log.debug(\"Set time value == '%s'\", str_val)\n self._set_params({date_time_param: str_val}, True)\n\n return True",
"def sync_time(self):\n return self.get_sync_time()",
"async def test_sync_datetime(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx, \"TestDateTime\", group_address=\"1/2/3\", broadcast_type=\"DATETIME\"\n )\n\n with patch(\"time.localtime\") as mock_time:\n mock_time.return_value = time.struct_time([2017, 1, 7, 9, 13, 14, 6, 0, 0])\n await self.datetime.sync()\n\n assert xknx.telegrams.qsize() == 1\n telegram = xknx.telegrams.get_nowait()\n assert telegram.destination_address == GroupAddress(\"1/2/3\")\n assert len(telegram.payload.value.value) == 8\n assert telegram.payload.value.value == (\n 0x75,\n 0x01,\n 0x07,\n 0xE9,\n 0x0D,\n 0x0E,\n 0x20,\n 0x80,\n )",
"def clock(strip):\r\n times = determineTimes()\r\n fadeInterval = (FADE_TIME * 60) /STEPS\r\n while True:\r\n hourMin = getLocalTime()[3:5]\r\n if hourMin == times[0]:\r\n lightsOn(strip, fadeInterval)\r\n if hourMin == times[1]: \r\n lightsOff(strip, fadeInterval)\r\n if hourMin == (0, 0) or hourMin == (6, 0) or hourMin == (12, 0) or hourMin == (18, 0):\r\n updateRTCFromNTP(strip)\r\n utime.sleep(30)",
"def tcp_time_updatetime(localport):\r\n\r\n # Get the ips and ports of servers hosting time_server.repy, retrying nine\r\n # times if there is an exception.\r\n gotval = False\r\n attemptretrieval = 0\r\n while attemptretrieval < 2:\r\n try:\r\n serveraddresses = advertise_lookup(\"time_server\")\r\n except Exception:\r\n attemptretrieval = attemptretrieval + 1\r\n sleep(2) # Look up the value again in 10 seconds\r\n else:\r\n if serveraddresses != [] and serveraddresses[0] != '':\r\n gotval = True\t # Successfully obtained the value\r\n break\r\n else:\r\n attemptretrieval = attemptretrieval + 1\r\n\r\n\r\n if not gotval:\r\n raise Exception(\"Unable to locate any servers running time_server.repy\")\r\n\r\n\r\n timelength = 25 # Max length of string, representing the time, to be received\r\n shuffledserveraddresses = random_sample(serveraddresses,min(len(serveraddresses),5))\r\n\r\n # Open a connection with a random server hosting time_server.repy\r\n timeobtained = False\r\n serverindex = 0\r\n while serverindex < len(shuffledserveraddresses):\r\n remoteaddress = shuffledserveraddresses[serverindex].split(':')\r\n remoteip = remoteaddress[0]\r\n remoteport = int(remoteaddress[1])\r\n\r\n try:\r\n sockobject = timeout_openconn(remoteip,remoteport)\r\n except Exception:\r\n serverindex +=1\r\n else:\r\n timeobtained = True\r\n break\r\n\r\n\r\n if not timeobtained:\r\n raise Exception(\"Unable to open connection with any of the \",len(shuffledserveraddresses),\"servers running time_server.repy.\")\r\n\r\n\r\n currenttime =''\r\n while '$' not in currenttime:\r\n currenttime += sockobject.recv(20)\r\n sockobject.close()\r\n currenttime = float(currenttime[:-1])\r\n\r\n # finally, set the time\r\n time_settime(currenttime)\r\n\r\n return shuffledserveraddresses[serverindex]",
"def set_time_sync_smart_mode_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncSmartModeEnabled', self.handle, bEnabled)"
]
| [
"0.72237253",
"0.6886432",
"0.66464496",
"0.6514424",
"0.65060776",
"0.6419905",
"0.6269265",
"0.62354267",
"0.6102252",
"0.60891914",
"0.60587496",
"0.5958736",
"0.5929074",
"0.59208417",
"0.5900496",
"0.58931005",
"0.5816147",
"0.5742855",
"0.57249767",
"0.57112616",
"0.5704643",
"0.56318945",
"0.5607347",
"0.5571163",
"0.5497278",
"0.5476714",
"0.5459628",
"0.54286283",
"0.5426473",
"0.54044515"
]
| 0.7669739 | 0 |
Indicate to the user that skills are being loaded. | def _display_skill_loading_notification(self):
self.enclosure.eyes_color(189, 183, 107) # dark khaki
self.enclosure.mouth_text(dialog.get("message_loading.skills")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadallskills(self):\r\n for skill in os.listdir( os.path.join( es.getAddonPath( info.basename ), \"skills\" )):\r\n es.load(\"%s/skills/%s\" % (info.basename, skill))",
"def required_skills(self, required_skills):\n\n self._required_skills = required_skills",
"def load_skill_list(skills_to_load):\n if exists(SKILLS_DIR):\n # checking skills dir and getting all priority skills there\n skill_list = [folder for folder in filter(\n lambda x: os.path.isdir(os.path.join(SKILLS_DIR, x)),\n os.listdir(SKILLS_DIR)) if folder in skills_to_load]\n for skill_folder in skill_list:\n skill = {\"id\": hash(os.path.join(SKILLS_DIR, skill_folder))}\n skill[\"path\"] = os.path.join(SKILLS_DIR, skill_folder)\n # checking if is a skill\n if not MainModule + \".py\" in os.listdir(skill[\"path\"]):\n continue\n # getting the newest modified date of skill\n last_mod = _get_last_modified_date(skill[\"path\"])\n skill[\"last_modified\"] = last_mod\n # loading skill\n skill[\"loaded\"] = True\n skill[\"instance\"] = load_skill(\n create_skill_descriptor(skill[\"path\"]),\n ws, skill[\"id\"])\n loaded_skills[skill_folder] = skill",
"def skills(self):\n if \"skills\" in self._prop_dict:\n return self._prop_dict[\"skills\"]\n else:\n return None",
"def get_initial(self):\n initial = super(SkillsView, self).get_initial()\n ai = get_ai(\n self.request.session.get('token', False),\n self.kwargs['aiid']\n )\n initial = {\n 'skills': ai['linked_bots']\n }\n return initial",
"def load(self, skillName):\r\n es.load(\"%s/skills/%s\" % (info.basename, skillName))",
"def _skills_manager_dispatch():\n global ws\n ws.emit(Message(\"skill_manager\", {}))",
"def getSkills(self):\n return self.skills",
"def requires_matching_skills(self):\n return self._requires_matching_skills",
"def test_skills_updated(self):\n assert self.agent_config.skills == {self.new_skill_id}",
"def skill(self):\n return self._get(\"skill\")",
"def check_if_enough_skill_points(self):\r\n for skill_string in self.__skills:\r\n if (self.__skills[skill_string].points_to_up >\r\n self.__skill_points):\r\n self.skill_up_disable(skill_string)",
"def on_load(self):\n pass",
"def on_load(self):\n pass",
"def load_skills():\n\n Skill.query.delete()\n\n # get all the qualifications text from postings\n postings = db.session.query(Posting.qualifications).all()\n # combine qualifications into a list\n all_skills = []\n with open('filler.txt') as filler:\n del_words = filler.read()\n for post in postings:\n words = post.qualifications.lower().split()\n # iterate through a list of those skills\n for word in words:\n word = word.strip(\"-()/\\,.:;* 1234567890\")\n # check to see if that word isn't in our filler document\n # if not, add it to the table\n if word not in del_words and word not in all_skills:\n all_skills.append(word)\n skill = Skill(skill=word)\n db.session.add(skill)\n db.session.commit()",
"def __init__(self):\r\n self.skills = {}\r\n self.orderedSkills = []",
"def candidate_skills(self, source_object: Dict) -> CandidateSkillYielder:\n pass",
"def test_skills_updated(self):\n assert self.skill_config.skills == {self.new_skill_id}",
"def skills():\n with app.app_context():\n results = Skill.query.all()\n return SkillsResponse(skills=results).json(), 200",
"def use_skill(self, g, i, x, y):\n # @ param g a reference to the game engine\n # @ param i the index of the skill (basically what skill)\n # @ param x the x target coordinate in game pixels\n # @ param y the y target coordinate in game pixels\n if self.attackTimer < self.attackDelay:\n print(\"attack on CD\")\n return\n \n if self.skill[i].skillAttr == 0:\n g.fire_skill_sound.play()\n elif self.skill[i].skillAttr == 1:\n g.ice_skill_sound.play()\n elif self.skill[i].skillAttr == 2:\n g.lightning_skill_sound.play()\n elif self.skill[i].skillAttr == 3:\n g.poison_skill_sound.play()\n \n \n if self.skill[i].skillKey == 0: #Aura\n #turn the aura on/off\n if self.skill[i].active == False:\n #print(\"aura on\")\n self.skill[i].active = True\n else:\n self.skill[i].active = False\n #print(\"aura off\")\n \n elif self.skill[i].skillKey == 1: #Missile\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n target = Target(x, y)\n center_x = self.rect.x + (self.rect.width / 2)\n center_y = self.rect.y + (self.rect.height / 2)\n #bullet types: fire 5, ice 6, lightning 7\n #skill types: fire 0, ice 1, lightning 2\n g.bullets.append(self.bulletFactory.createBullet(g, self.skill[i].skillAttr + 5, 0, self.attack, 1024, target, center_x, center_y))\n #print(\"missile\")\n\n elif self.skill[i].skillKey == 2: #Breath\n #for each creep in the AoE cone, do damage.\n if self.mana[0] > self.skill[i].skillCost:\n self.mana[0] -= self.skill[i].skillCost\n self.attackTimer = 0\n #get low and high angle (-45 degrees and +45 degrees from player -> point angle)\n lowAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) - 3.1415 / 2.0\n highAngle = math.atan2(y - self.rect.centery, x - self.rect.centerx) + 3.1415 / 2.0\n for creep in g.creeps:\n #get angle to creep\n creepAngle = math.atan2(creep.rect.centery - self.rect.centery, creep.rect.centerx - self.rect.centerx)\n \n #if angle to the creep is between the two angles\n if creepAngle > lowAngle and creepAngle < highAngle:\n #and the distance to the creep is below the skill's range\n if ( (creep.rect.centerx - self.rect.centerx) ** 2 + (creep.rect.centery - self.rect.centery) ** 2 ) ** 0.5 < 4 * 24:\n creep.take_damage( self.attack )\n #print(\"breath\")\n #apply debuffs, based on type\n if self.skill[i].skillAttr == 0: #fire\n creep.applyBurning()\n elif self.skill[i].skillAttr == 1: #frost\n creep.applyChilled()\n elif self.skill[i].skillAttr == 2: #lightning\n creep.applyShocked()",
"def get_skills(self):\n return self.skills[:]",
"def unstudied_skills(self):\n undone = [task for task in self.skills if task not in self.done_skills]\n print(\"Hello \"+ str(self.user_name)+\" :)\\nHere are the skills that are still incomplete: \")\n for task in undone:\n num = 1\n print(\"[\"+str(num)+\"]:\"+ str(task))\n num += 1",
"def on_load(self):",
"def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##",
"def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##",
"def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##",
"def get_skill(self, other_card):\n ## YOUR CODE IS HERE ##",
"def on_launch(launch_request, session):\r\n\r\n print(\"on_launch requestId=\" + launch_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # Dispatch to your skill's launch\r\n return skill_information()",
"def skills_manager(message):\n global skills_manager_timer\n\n if connected():\n if skills_manager_timer is None:\n pass\n # Install default skills and look for updates via Github\n LOG.debug(\"==== Invoking Mycroft Skill Manager: \" + MSM_BIN)\n install_default_skills(False)\n\n # Perform check again once and hour\n skills_manager_timer = Timer(3600, _skills_manager_dispatch)\n skills_manager_timer.daemon = True\n skills_manager_timer.start()",
"async def skill(self, ctx, *, skill: str):\n\n try:\n skill = self.get_entry('Skill', skill.lower())\n except RuntimeError as e:\n return await ctx.send(e)\n\n name = skill['Name']\n\n embed = discord.Embed(title=name)\n embed.set_thumbnail(url='attachment://skill.png')\n embed.add_field(name='Learned', value=skill['Class/Rank'], inline=False)\n embed.add_field(name='Effect', value=skill['Effect'])\n\n await ctx.send(file=discord.File(f'xenox/skills/{name}.png', 'skill.png'), embed=embed)"
]
| [
"0.6233247",
"0.6201158",
"0.6122491",
"0.60886675",
"0.60022455",
"0.5993554",
"0.59113675",
"0.58863217",
"0.5861418",
"0.57072824",
"0.561377",
"0.55963355",
"0.5585389",
"0.5585389",
"0.5572549",
"0.55570704",
"0.5554645",
"0.5545811",
"0.5543801",
"0.5529301",
"0.5514039",
"0.54425806",
"0.5409322",
"0.539311",
"0.539311",
"0.539311",
"0.539311",
"0.53868896",
"0.5376887",
"0.5373167"
]
| 0.68717 | 0 |
Determine if device is paired, if not automatically start pairing. Pairing cannot be performed if there is no connection to the back end. So skip pairing if the backend is down. | def _ensure_device_is_paired(self):
if not self.is_paired and not self.backend_down:
LOG.info('Device not paired, invoking the pairing skill')
payload = dict(utterances=["pair my device"], lang="en-us")
self.bus.emit(Message("recognizer_loop:utterance", payload)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_pairing_status(self):\n try:\n self.is_paired = is_paired(ignore_errors=False)\n except BackendDown:\n LOG.error('Cannot complete device updates due to backend issues.')\n self.backend_down = True\n\n if self.is_paired:\n LOG.info('Device is paired')",
"def bt_is_paired(self):\n is_paired = False\n try:\n self.show_more_for_paired_devices()\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.paired_device_list, 5)\n pair_device_list = self.find_elements(self.driver.appium_driver,\n self.paired_device_list, 0)\n\n logger.debug('Checks if the phone is paired with the any devices')\n if len(pair_device_list) > 0:\n if pair_device_list[0].text.upper() == \"PAIR NEW DEVICE\":\n return False\n\n logger.debug(\n \"phone {} paired with some bluetooth device\".format(\n self.phone_info.bluetooth_name))\n is_paired = True\n\n except Exception as e:\n logger.warning(\"Need to attempt pair before is_paired\")\n return is_paired",
"def is_paired(ignore_errors=True):\n global _paired_cache\n if _paired_cache:\n # NOTE: This assumes once paired, the unit remains paired. So\n # un-pairing must restart the system (or clear this value).\n # The Mark 1 does perform a restart on RESET.\n return True\n\n api = DeviceApi()\n _paired_cache = api.identity.uuid and check_remote_pairing(ignore_errors)\n\n return _paired_cache",
"def is_paired(self, phone):\n bt_util = BTUtils()\n target_addr = self.dut.bluetooth_address\n return bt_util.android_device_in_paired_state(phone, target_addr)",
"def bt_is_paired_to(self, paired_bluetooth_device):\n is_paired_with_device = False\n try:\n bt_is_paired = self.bt_is_paired()\n if not bt_is_paired:\n return is_paired_with_device\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.paired_device_list, 10)\n pair_element = self.find_elements(self.driver.appium_driver,\n self.paired_device_list, 0)\n\n\n for index in range(len(pair_element)):\n if self.is_same_bluetooth_name(pair_element[index],\n paired_bluetooth_device):\n is_paired_with_device = True\n break\n except Exception as e:\n logger.warning(\"Need to attempt pair or unpair before is_paired.\")\n logger.warning(repr(e))\n # raise\n return is_paired_with_device",
"def has_been_paired():\n # This forces a load from the identity file in case the pairing state\n # has recently changed\n id = IdentityManager.load()\n return id.uuid is not None and id.uuid != \"\"",
"def bt_try_connect(self, bluetooth_device_name_to_connect,\n contact_sharing=False): # TODO: Need to update to\n # use the new/refactored bt_connect() design from above.\n is_bluetooth_connect = False\n try:\n is_already_connected = self.bt_is_connected_to(\n bluetooth_device_name_to_connect)\n if is_already_connected is True:\n is_bluetooth_connect = True\n else:\n is_bt_paired = self.bt_is_paired_to(\n bluetooth_device_name_to_connect)\n if contact_sharing:\n if is_bt_paired:\n if self.phone_info.os_version.startswith('9') or self.phone_info.os_version.startswith('10'):\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver,\n self.previously_paired_device_button, 5)\n self.find_element(self.driver.appium_driver,\n self.previously_paired_device_button,\n 0).click()\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, self.paired_device_list,\n 10)\n pair_element = self.find_elements(\n self.driver.appium_driver, self.paired_device_list,\n 1)\n for index in range(len(pair_element)):\n if self.is_same_bluetooth_name(pair_element[index],\n bluetooth_device_name_to_connect):\n pair_element[index].click()\n # self._bt_swipe_and_connect(pair_element,\n # index) # Not sure if this is required for\n # tests to work? I can get my Nexus6P (\n # Android 6.0.1) and iPhone 7 Plus (iOS\n # 10.3.2) to work without it... (So far)\n is_bluetooth_connect = True\n self._go_to_connected_device_screen(\n no_of_back_click=1)\n return is_bluetooth_connect\n else:\n if is_bt_paired:\n self.bt_unpair(bluetooth_device_name_to_connect)\n self.bt_radio('off')\n self.bt_radio('on')\n\n try:\n if '8.1' in self.phone_info.os_version or self.phone_info.os_version.startswith('9') or self.phone_info.os_version.startswith('10'):\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver,\n self.bluetooth_pair_new_device_in_android_8_1_button,\n 10)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_pair_new_device_in_android_8_1_button,\n 2).click()\n sleep(10)\n except:\n logger.debug(\"Pair new device option is not available\")\n is_device_found = False\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_pair_device,\n 5)\n element_list = self.find_elements(self.driver.appium_driver,\n self.bluetooth_pair_device,\n 1)\n # Wait till bluetooth device found in list and click when it\n # is visible in list\n for retry in range(1):\n if retry == 0:\n for index in range(len(element_list)):\n element_text = element_list[index].text\n # For some reason my iPhone 6 (iOS 11.1.1) is\n # getting stuck here because one of the\n # element's text is None.\n # So adding bit to ignore that.\n if type(element_text) is not str:\n logger.warn(\n \"Found pairing list element's text was \"\n \"None! Ignoring for now.\")\n continue\n if self.is_same_bluetooth_name(element_list[index],\n bluetooth_device_name_to_connect):\n element_list[index].click()\n # self._bt_swipe_and_connect(element_list,\n # index) # Not sure if this is required for\n # tests to work? I can get my Nexus6P (\n # Android 6.0.1) and iPhone 7 Plus (iOS\n # 10.3.2) to work without it... (So far)\n logger.debug(\"Connecting to \" +\n bluetooth_device_name_to_connect)\n is_device_found = True\n # NOTE: Removed a bunch of stuff after this...\n break\n else:\n is_device_found = self._bt_retry_to_connect(\n bluetooth_device_name_to_connect)\n if is_device_found == False:\n if '8.1' in self.phone_info.os_version \\\n or self.phone_info.os_version.startswith('9') or self.phone_info.os_version.startswith('10'):\n self.driver.appium_driver.back()\n self.bt_radio('off')\n self.bt_radio('on')\n self.perform_bottom_to_up_swipe(\n self.driver.appium_driver)\n logger.debug(\"Retries count : \" + str(retry))\n sleep(1)\n else:\n # The below can become strangely slow (take ~12\n # seconds) randomly, so skipping it...\n # is_bt_button_visible = self.__verify_current_screen()\n # logger.debug(\"The BT button is visible? {\n # }\".format(is_bt_button_visible))\n # if not is_bt_button_visible:\n # self.__retry_to_bt_connect(\n # bluetooth_device_name_to_connect)\n break\n if is_device_found:\n is_bluetooth_connect = True\n else:\n self.take_screenshot(self.driver.appium_driver,\n 'bt_connect')\n logger.error(\"Not connecting to given mobile Device\")\n except Exception as e:\n if '8.1' in self.phone_info.os_version or \\\n self.phone_info.os_version.startswith('9') or self.phone_info.os_version.startswith('10'):\n self.driver.appium_driver.back()\n self.take_screenshot(self.driver.appium_driver, 'bt_connect')\n logger.error(\n \"Connection is not successfully with bluetooth device\")\n logger.error(repr(e))\n return is_bluetooth_connect",
"def pair(self, phone, companion_app=True):\n bt_util = BTUtils()\n target_addr = self.dut.bluetooth_address\n if bt_util.android_device_in_connected_state(phone, target_addr):\n self.logger.info('Already paired and connected, skipping pairing.')\n else:\n if bt_util.android_device_in_paired_state(phone, target_addr):\n self.logger.info(\n 'Device is paired but not connected, unpair first.')\n if not bt_util.bt_unpair(phone, self.dut):\n raise TestActsError('Unable to unpair the device')\n bt_util.bt_pair_and_connect(phone, self.dut)\n self.logger.info('DEVICE PAIRED')\n if companion_app:\n profiles = PROFILES_CONNECTED.copy()\n profiles.update(COMP_PROFILE_CONNECTED)\n else:\n profiles = PROFILES_CONNECTED\n self.logger.info(profiles)\n if not bt_util.check_device_bt(device=self.dut, profiles=profiles):\n raise TestActsError('Dut BT status check failed.')\n else:\n return True",
"def pair(self, mac_address):\n try:\n out = self.get_output(\"pair \" + mac_address, 4)\n except BluetoothctlError, e:\n print(e)\n return None\n else:\n res = self.child.expect([\"Failed to pair\", \"Pairing successful\", pexpect.EOF])\n success = True if res == 1 else False\n return success",
"def get_paired_devices(self):\n try:\n out = self.get_output(\"paired-devices\")\n except BluetoothctlError, e:\n print(e)\n return None\n else:\n paired_devices = []\n for line in out:\n device = self.parse_device_info(line)\n if device:\n paired_devices.append(device)\n\n return paired_devices",
"def check_connection():\n if connected():\n ws.emit(Message('mycroft.internet.connected'))\n # check for pairing, if not automatically start pairing\n if not is_paired():\n # begin the process\n payload = {\n 'utterances': [\"pair my device\"],\n 'lang': \"en-us\"\n }\n ws.emit(Message(\"recognizer_loop:utterance\", payload))\n else:\n thread = Timer(1, check_connection)\n thread.daemon = True\n thread.start()",
"def check_remote_pairing(ignore_errors):\n try:\n DeviceApi().get()\n return True\n except HTTPError as e:\n if e.response.status_code == 401:\n return False\n error = e\n except Exception as e:\n error = e\n\n LOG.warning('Could not get device info: {}'.format(repr(error)))\n\n if ignore_errors:\n return False\n\n if isinstance(error, HTTPError):\n if connected():\n raise BackendDown from error\n else:\n raise InternetDown from error\n else:\n raise error",
"def bt_unpair(self, target):\n is_target_unpaired = False\n try:\n # if phone is not paired with any device, return True\n any_paired_device = self.find_element(self.driver.appium_driver, self.paired_device_list, 0)\n wait(2)\n if any_paired_device is None:\n logger.info(\"There is no paired device.\")\n return True\n\n logger.debug('Unpair {} Bluetooth device'.format(\n target))\n\n # enter paired bluetooth device page\n target_detail_tab_xpath = '//android.widget.TextView[@text=\"{}\"]' \\\n '/../../descendant-or-self::android.widget.LinearLayout/android.widget.LinearLayout/' \\\n 'android.widget.ImageView[@resource-id=\"com.coloros.wirelesssettings:id/deviceDetails\"]'\\\n .format(target)\n try:\n self.driver.appium_driver.find_element_by_xpath(target_detail_tab_xpath).click()\n except:\n logger.error(\"Cannot find the target detail tab.\")\n return False\n else:\n logger.error(\"Found target detail tab and cliked it.\")\n\n bt_unpair_button = self.find_element(self.driver.appium_driver, self.bt_unpair_button, 0)\n\n fail_cnt = 0\n while bt_unpair_button is None and fail_cnt < 5:\n wait(2)\n fail_cnt += 1\n bt_unpair_button = self.find_element(self.driver.appium_driver, self.bt_unpair_button, 0)\n\n if fail_cnt == 5:\n logger.error(\"Cannot find bt_unpair_button.\")\n return False\n else:\n logger.info('Found unpair button and clicking it.')\n bt_unpair_button.click()\n is_target_unpaired = True\n\n except Exception as e:\n self.take_screenshot(self.driver.appium_driver, 'bt_unpair')\n logger.warning(\"Need to attempt pair before unpair\")\n logger.warning(repr(e))\n return is_target_unpaired",
"def check_for_activate(self):\n try:\n # Attempt to activate. If the user has completed pairing on the,\n # backend, this will succeed. Otherwise it throws and HTTPError()\n\n token = self.data.get(\"token\")\n login = self.api.activate(self.state, token) # HTTPError() thrown\n\n # When we get here, the pairing code has been entered on the\n # backend and pairing can now be saved.\n # The following is kinda ugly, but it is really critical that we\n # get this saved successfully or we need to let the user know that\n # they have to perform pairing all over again at the website.\n try:\n IdentityManager.save(login)\n except Exception as e:\n self.log.debug(\"First save attempt failed: \" + repr(e))\n time.sleep(2)\n try:\n IdentityManager.save(login)\n except Exception as e2:\n # Something must be seriously wrong\n self.log.debug(\"Second save attempt failed: \" + repr(e2))\n self.abort_and_restart()\n\n if mycroft.audio.is_speaking():\n # Assume speaking is the pairing code. Stop TTS of that.\n mycroft.audio.stop_speaking()\n\n self.enclosure.activate_mouth_events() # clears the display\n\n # Notify the system it is paired\n self.gui.show_page(\"pairing_done.qml\", override_idle=False)\n self.bus.emit(Message(\"mycroft.paired\", login))\n\n self.pairing_performed = True\n with self.pair_dialog_lock:\n if self.mycroft_ready:\n # Tell user they are now paired\n self.speak_dialog(self.paired_dialog)\n mycroft.audio.wait_while_speaking()\n else:\n self.speak_dialog(\"wait.for.startup\")\n mycroft.audio.wait_while_speaking()\n\n # Un-mute. Would have been muted during onboarding for a new\n # unit, and not dangerous to do if pairing was started\n # independently.\n self.bus.emit(Message(\"mycroft.mic.unmute\", None))\n\n # Send signal to update configuration\n self.bus.emit(Message(\"configuration.updated\"))\n\n # Allow this skill to auto-update again\n self.reload_skill = True\n except HTTPError:\n # speak pairing code every 60th second\n with self.counter_lock:\n if self.count == 0:\n self.speak_code()\n self.count = (self.count + 1) % 6\n\n if time.monotonic() > self.time_code_expires:\n # After 20 hours the token times out. Restart\n # the pairing process.\n with self.counter_lock:\n self.count = -1\n self.data = None\n self.handle_pairing()\n else:\n # trigger another check in 10 seconds\n self.__create_activator()\n except Exception as e:\n self.log.debug(\"Unexpected error: \" + repr(e))\n self.abort_and_restart()",
"def bt_start_discovery(self):\n is_start_discovery = False\n try:\n is_bluetooth_on = self.bt_radio('on')\n if '8.1' in self.phone_info.os_version:\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_pair_new_device_in_android_8_1_button,\n 10)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_pair_new_device_in_android_8_1_button,\n 2).click()\n is_bluetooth_on = True\n if is_bluetooth_on:\n logger.debug(\"Bluetooth discovery Stared on {}\".format(\n self.phone_info.bluetooth_name))\n is_start_discovery = True\n else:\n logger.debug(\"Bluetooth discovery not Stared on {}\".format(\n self.phone_info.bluetooth_name))\n except Exception as e:\n logger.error(\"Trun on Bluetooth Button is not Visible\")\n logger.error(repr(e))\n return is_start_discovery",
"def bt_get_pairlist(self):\n logger.info('Getting the list of paired bluetooth devices.')\n\n bluetooth_paired_device_list = []\n try:\n\n # if there is no paired device, this will be an empty list, not None.\n paired_device_text_element_list = self.find_elements(self.driver.appium_driver,\n self.paired_device_text,\n 0)\n\n if len(paired_device_text_element_list) != 0:\n logger.info(\"phone is paired with %d device(s).\" % len(paired_device_text_element_list))\n bluetooth_paired_device_list = self._get_bluetooth_paired_device_list(paired_device_text_element_list)\n else:\n logger.info(\"Phone is not connected with any device.\")\n\n except Exception as e:\n logger.warning(repr(e))\n # raise\n return bluetooth_paired_device_list",
"def pair(self, mac_address):\n try:\n self.send(f\"pair {mac_address}\", 4)\n except Exception as e:\n logger.error(e)\n return False\n else:\n res = self.process.expect(\n [\"Failed to pair\", \"Pairing successful\", pexpect.EOF]\n )\n return res == 1",
"def getPairConDevices():\n \n # Enable bluetooth service if not enabled\n changeBluetoothService(enable=True)\n \n # List available bluetooth devices\n blueDevices = execCommand('bluetoothctl devices')\n \n # parse available devices to list\n availDevices = list()\n for device in blueDevices.split('\\n'):\n if 'Device' in device:\n deviceList = list()\n deviceList.append(device[25:])\n deviceList.append(device[7:24])\n availDevices.append(deviceList)\n \n # check paired and connected devices\n pairedDevices = list()\n connectedDevices = list()\n for device in availDevices:\n deviceInfo = execCommand('bluetoothctl info {}'.format(device[1]))\n if 'Paired: yes' in deviceInfo:\n pairedDevices.append(device)\n if 'Connected: yes' in deviceInfo:\n connectedDevices.append(device)\n \n return pairedDevices, connectedDevices",
"def requires_pairing(cls) -> bool:\n return False",
"def get_paired_devices(self):\n paired_devices = []\n try:\n out = self.get_output(\"paired-devices\")\n except Exception as e:\n logger.error(e)\n else:\n for line in out:\n device = self.parse_device_info(line)\n if device:\n paired_devices.append(device)\n return paired_devices",
"def test_pairing_metric(self):\n time_bonds = []\n for n in range(self.iterations):\n start_time = get_current_epoch_time()\n self.log.info(\"Pair bluetooth iteration {}.\".format(n + 1))\n if (not pair_pri_to_sec(\n self.android_devices[0],\n self.android_devices[1],\n attempts=1,\n auto_confirm=False)):\n self.log.error(\"Failed to bond devices.\")\n return False\n end_time = get_current_epoch_time()\n time_bonds.append((start_time, end_time))\n # A device bond will trigger a number of system routines that need\n # to settle before unbond\n time.sleep(2)\n for ad in self.android_devices:\n if not clear_bonded_devices(ad):\n return False\n # Necessary sleep time for entries to update unbonded state\n time.sleep(2)\n bonded_devices = ad.droid.bluetoothGetBondedDevices()\n if len(bonded_devices) > 0:\n self.log.error(\"Failed to unbond devices: {}\".format(\n bonded_devices))\n return False\n end_time = get_current_epoch_time()\n bluetooth_logs, bluetooth_logs_ascii = \\\n self.collect_bluetooth_manager_metrics_logs(\n [self.android_devices[0]])\n bluetooth_log = bluetooth_logs[0]\n bluetooth_log_ascii = bluetooth_logs_ascii[0]\n asserts.assert_equal(\n len(bluetooth_log.pair_event), 8, extras=bluetooth_log_ascii)\n for pair_event in bluetooth_log.pair_event:\n t = pair_event.event_time_millis\n asserts.assert_true(start_time <= t <= end_time,\n \"Event time %d not within limit [%d, %d]\" %\n (t, start_time, end_time))\n device_info = pair_event.device_paired_with\n asserts.assert_true(device_info, \"Device info is none\")\n asserts.assert_equal(device_info.device_type, self.android_devices[\n 0].bluetooth_proto_module.DeviceInfo.DEVICE_TYPE_BREDR,\n \"Device type does not match\")",
"def create_bond(device_address=None, adapter_address=None):\n con = pexpect.spawn('sudo bluetoothctl')\n con.expect(\"bluetooth\", timeout=1)\n \n print(\"selecting adapter ...\")\n con.sendline(\"select \" + adapter_address.upper())\n\n #check to see if already paired\n print(\"checking if bond exists already ...\")\n no_bond=False\n try:\n con.sendline(\"paired-devices\")\n con.expect(device_address.upper(), timeout=1)\n except(pexpect.TIMEOUT):\n no_bond = True\n else:\n print(\"bond already exists for %s\" % (device_address.upper()))\n print(\"successfully quiting bluetoothctl since bond is already formed\")\n con.sendline(\"quit\") \n return(0) \n \n con.sendline(\"select \" + adapter_address.upper())\n \n print(\"registering agent ...\")\n try:\n con.sendline(\"agent NoInputNoOutput\")\n con.expect(['Agent registered', 'Agent is already registered'], timeout=1)\n con.sendline(\"default-agent\")\n con.expect(\"Default agent request successful\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"unable to register agent\")\n return(1)\n\n print(\"enabling pairing ...\")\n try:\n con.sendline(\"pairable on\")\n con.expect(\"Changing pairable on succeeded\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"unable to turn pairing on\")\n return(1)\n\n print(\"starting scan ...\")\n try:\n con.sendline(\"scan on\")\n devfound = con.expect(device_address.upper(), timeout=5)\n if devfound == 0:\n try:\n con.sendline(\"scan off\")\n print (\"Found device. connecting to %s\" % (device_address.upper()))\n con.sendline(\"connect \" + device_address.upper())\n con.expect(\"Connection successful\", timeout=10)\n #sleep(10) #need extra time here to finish pairing\n except(pexpect.TIMEOUT):\n print(\"could not connect to %s\" % (device_address.upper()))\n return(1)\n try:\n #explicitly pair with the device\n con.sendline(\"pair \" + device_address.upper())\n con.expect(\"Pairing successful\", timeout=5)\n except(pexpect.TIMEOUT):\n print(\"pairing not successful\")\n try:\n con.sendline(\"info \" + device_address.upper()) \n con.expect(\"Paired: yes\", timeout=1)\n except(pexpect.TIMEOUT):\n print(\"could not pair with %s\" % (device_address.upper()))\n return(1)\n else:\n con.sendline(\"trust \" + device_address.upper())\n print(\"Connection and pairing successful!\")\n #try:\n #con.sendline(\"list-attributes\")\n #con.expect(\"6e400003-b5a3-f393-e0a9-e50e24dcca9e\", timeout=2)\n #print(con.before)\n #for line in con.before:\n # read_characteristics = line\n #print(read_characteristics)\n #except(pexpect.TIMEOUT):\n #print(\"could not list the attributes\")\n #return(1)\n try:\n print(\"disconnecting temporarily ...\")\n con.sendline(\"disconnect \" + device_address.upper())\n con.expect(\"Connected: no\", timeout=5)\n except(pexpect.TIMEOUT):\n print(\"could not disconnect.. \")\n con.sendline(\"quit\")\n return(1)\n else:\n print(\"successfully quiting bluetoothctl after forming bond\")\n con.sendline(\"quit\")\n return(0)\n except(pexpect.TIMEOUT):\n con.sendline(\"scan off\")\n print(\"unable to find device %s\" % (device_address))\n return(1)",
"async def pair(self, *args, **kwargs) -> bool:\n return await self._backend.pair(*args, **kwargs)",
"def enable_secure_simple_pairing(self):\n logging.info(\"Cert: Sending WRITE_SIMPLE_PAIRING_MODE [True]\")\n self._enqueue_hci_command(hci_packets.WriteSimplePairingModeBuilder(hci_packets.Enable.ENABLED), True)\n logging.info(\"Cert: Waiting for controller response\")\n assertThat(self._hci_event_stream).emits(lambda msg: b'\\x0e\\x04\\x01\\x56\\x0c' in msg.event)",
"def bt_is_connected(self):\n try:\n is_bluetooth_on = self.bt_enabled()\n\n # if bluetooth is OFF then throw Exception\n if not is_bluetooth_on:\n logger.error(\"The bluetooth is disabled on {}\".format(self.phone_info.bluetooth_name))\n\n self.bt_radio(\"on\")\n # return False\n # sys.exit(0)\n\n # displays all paired devices\n self.show_more_for_paired_devices()\n\n connected_devices = self.find_elements(\n self.driver.appium_driver,\n self.bluetooth_connected_indicator, 0)\n\n time.sleep(1)\n if len(connected_devices) > 0:\n logger.debug(\n \"phone {} is connected with some bluetooth device\".format(\n self.phone_info.bluetooth_name))\n return True\n except Exception as e:\n logger.warning(\n \"Need to attempt connect before checking connection status.\")\n\n logger.warning(repr(e))\n # raise\n return False",
"def bt_connect(self, bluetooth_device_name_to_connect, perform_unpair=True,\n no_of_attempt=1, enable_ga=False):\n try:\n is_already_connected = self.bt_is_connected_to(\n bluetooth_device_name_to_connect)\n if is_already_connected is True:\n return True\n\n if not perform_unpair:\n return self.connect_paired_device(\n bluetooth_device_name_to_connect)\n\n return self._connect_bluetooth_device(\n bluetooth_device_name_to_connect, no_of_attempt, enable_ga)\n\n except Exception as e:\n self.take_screenshot(self.driver.appium_driver, 'bt_connect')\n if '8.1' in self.phone_info.os_version or self.phone_info.os_version.startswith('9') or self.phone_info.os_version.startswith('10'):\n self.driver.appium_driver.back()\n logger.error(\"Connection failed {} with bluetooth device\".format(\n bluetooth_device_name_to_connect))\n logger.error(repr(e))\n return False",
"def __init_bluetooth_if_required(self):\n\n if self.__bluetooth_adapter and ((self.__bluetooth_adapter._running and not self.__bluetooth_adapter._running.is_set()) or not self.__bluetooth_adapter._running):\n try:\n self.__bluetooth_adapter.start()\n return self.__bluetooth_adapter._running.is_set()\n except Exception:\n self.on_error(self, None, 'Unable to initialize Bluetooth adapter', traceback.format_exc())\n return False\n if self.__bluetooth_adapter and self.__bluetooth_adapter._running:\n return self.__bluetooth_adapter._running.is_set()\n else:\n return False",
"async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output",
"def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False",
"def CheckIfWiredConnecting(self):\n if self.wired.connecting_thread:\n return self.wired.connecting_thread.is_connecting\n else:\n return False"
]
| [
"0.8065884",
"0.7769401",
"0.77569914",
"0.7557578",
"0.74007684",
"0.6839915",
"0.6752682",
"0.6557243",
"0.62115335",
"0.6207546",
"0.6190262",
"0.61526656",
"0.61224526",
"0.6051847",
"0.6030277",
"0.59858686",
"0.5979463",
"0.5896",
"0.58188164",
"0.5776929",
"0.57743293",
"0.5728147",
"0.57266265",
"0.57024676",
"0.5648145",
"0.5642918",
"0.5628526",
"0.5626524",
"0.5622357",
"0.5562825"
]
| 0.8198503 | 0 |
Initialize trainable variables randomly or from the given checkpoint. | def _initialize_variables(self, finetune: str=None, **kwargs) -> None:
if finetune is None:
super()._initialize_variables(**kwargs) # default initialization
else:
self._saver = tf.train.Saver(max_to_keep=100000000)
logging.info('Restoring variables from `%s`', finetune)
self._saver.restore(self.session, finetune) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resnet_init_from_checkpoint_fn(checkpoint):\n logging.info('Initializing model weights from %s', checkpoint)\n assignment_map = {}\n resnet_scope = _get_resnet_scope()\n for var in contrib_framework.get_variables(\n scope=resnet_scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES):\n if 'dense' not in var.op.name:\n # Remove the parent scope prefix.\n name_in_ckpt = var.op.name.replace(resnet_scope, 'resnet_model/')\n assignment_map[name_in_ckpt] = var\n tf.train.init_from_checkpoint(checkpoint, assignment_map)",
"def initialize_model_state(jax_model: model.BaseTask,\n prng_key: PRNGKey) -> TrainState:\n logging.info('init_var prng_seed: %s', prng_key)\n initial_vars = jax_model.instantiate_variables(prng_key)\n logging.debug('initial_vars: %s', initial_vars)\n learnable_vars = tf.nest.map_structure(\n lambda v: not base_layer.var_not_trainable(v), jax_model.vars)\n tf.nest.assert_same_structure(initial_vars, learnable_vars)\n return jax_model.create_train_state(initial_vars, jax_model.vars)",
"def _init_checkpoint_and_variables(pretrain_checkpoint_path,\n pretrain_checkpoint_exclude_scopes):\n checkpoint_reader = tf.contrib.framework.load_checkpoint(\n pretrain_checkpoint_path)\n return get_variables_to_restore_from_pretrain_checkpoint(\n pretrain_checkpoint_exclude_scopes,\n checkpoint_reader.get_variable_to_shape_map())",
"def init(self,sess):\n if not os.path.isfile(\\\n \"./Models/\" + self.mod_name + \".ckpt.meta\"):\n sess.run(tf.global_variables_initializer())\n return 0\n else:\n if self.gen_only:\n sess.run(tf.global_variables_initializer())\n self.load(sess)\n return 1",
"def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):\n sess.run(tf.group(\n tf.local_variables_initializer(),\n tf.global_variables_initializer()))\n if resume and not (logdir or checkpoint):\n raise ValueError('Need to specify logdir to resume a checkpoint.')\n if logdir:\n state = tf.train.get_checkpoint_state(logdir)\n if checkpoint:\n checkpoint = os.path.join(logdir, checkpoint)\n print('checkpoint {}'.format(checkpoint))\n if not checkpoint and state and state.model_checkpoint_path:\n checkpoint = state.model_checkpoint_path\n if checkpoint and resume is False:\n message = 'Found unexpected checkpoint when starting a new run.'\n raise RuntimeError(message)\n print('checkpoint {}'.format(checkpoint))\n if checkpoint:\n saver.restore(sess, checkpoint)",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def init(self, train):\n return",
"def initialize_training_state(\n self,\n config: ConfigDict,\n key: KeyArray,\n model: ModuleDef,\n variables0: Optional[ModelVarDict] = None,\n ):\n # Create Flax training state\n state = self.create_train_state(\n key, config, model, self.ishape, self.lr_schedule, variables0\n )\n if self.checkpointing and variables0 is None:\n # Only restore if no initialization is provided\n if have_tf: # Flax checkpointing requires tensorflow\n state = checkpoint_restore(state, self.workdir)\n else:\n raise RuntimeError(\n \"Tensorflow not available and it is required for Flax checkpointing.\"\n )\n self.log(get_parameter_overview(state.params))\n self.log(get_parameter_overview(state.batch_stats))\n\n self.state = state",
"def initialize_variables(self):\n self.sess.run(self.init)",
"def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)",
"def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n logging.info('Initializing parameters from scratch.')\n\n # If pretraining and no checkpoint imported, we jit the (sharded-) init\n # function to minimize fragmentation. We use the same partition\n # setup as the training step/loop to initialize everything \"in-place\" and\n # avoid communication or OOM.\n p_initialize_train_state_fn = self._partitioner.partition(\n self._initialize_train_state,\n in_axis_resources=None,\n out_axis_resources=self.train_state_axes)\n return p_initialize_train_state_fn(init_rng)",
"def initialize():\n new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED\n tf.get_default_session().run(tf.variables_initializer(new_variables))\n ALREADY_INITIALIZED.update(new_variables)",
"def initialize_variables():\n variables = tf.local_variables()\n uninitialized_variables = []\n for v in variables:\n if not hasattr(v, '_keras_initialized') or not v._keras_initialized:\n uninitialized_variables.append(v)\n v._keras_initialized = True\n if uninitialized_variables:\n sess = tf.keras.backend.get_session()\n sess.run(tf.variables_initializer(uninitialized_variables))",
"def initialize():\n new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED\n get_session().run(tf.variables_initializer(new_variables))\n\n ALREADY_INITIALIZED.update(new_variables)",
"def get_init_fn():\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n variables_to_restore.append(var)\n\n checkpoint_path = tf.train.latest_checkpoint(\"./base_checkpoint\")\n\n tf.logging.info('Fine-tuning from %s' % checkpoint_path)\n\n return slim.assign_from_checkpoint_fn(\n checkpoint_path,\n variables_to_restore,\n ignore_missing_vars=False)",
"def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())",
"def initialize_model(session, model, train_dir, expect_exists):\n print \"Looking for model at %s...\" % train_dir\n ckpt = tf.train.get_checkpoint_state(train_dir)\n v2_path = ckpt.model_checkpoint_path + \".index\" if ckpt else \"\"\n if ckpt and (tf.gfile.Exists(ckpt.model_checkpoint_path) or tf.gfile.Exists(v2_path)):\n print \"Reading model parameters from %s\" % ckpt.model_checkpoint_path\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n if expect_exists:\n raise Exception(\"There is no saved checkpoint at %s\" % train_dir)\n else:\n print \"There is no saved checkpoint at %s. Creating model with fresh parameters.\" % train_dir\n session.run(tf.global_variables_initializer())\n print 'Num params: %d' % sum(v.get_shape().num_elements() for v in tf.trainable_variables())",
"def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)",
"def _init_model(self, checkpoint_path: str) -> None:\n # load weights\n logger.info(f\"Load weights from the checkpoint {checkpoint_path}\")\n checkpoint = torch.load(checkpoint_path, map_location=torch.device(\"cpu\"))\n\n state_dict = checkpoint[\"state_dict\"]\n self.orig_acc = checkpoint[\"test_acc\"]\n\n is_pruned = (\n next((name for name in state_dict if \"mask\" in name), None) is not None\n )\n\n if is_pruned:\n logger.info(\"Dummy prunning to load pruned weights\")\n model_utils.dummy_pruning(self.params_all)\n\n model_utils.initialize_params(self.model, state_dict)\n logger.info(\"Initialized weights\")\n\n # check the trained model is pruned\n\n if is_pruned:\n logger.info(\n \"Get masks and remove prunning reparameterization for prepare_qat\"\n )\n self.mask = model_utils.get_masks(self.model)\n model_utils.remove_pruning_reparameterization(self.params_all)",
"def init(X1, Y1, X2, Y2):\n\n global X1_train\n global Y1_train\n global X2_train\n global Y2_train\n \n X1_train, Y1_train, X2_train, Y2_train = X1, Y1, X2, Y2",
"def initialize_if_not(self, training=False):\r\n if self._initialized:\r\n return\r\n\r\n # Build supporting operations\r\n with tf.variable_scope('savers'):\r\n self.checkpoint.build_savers() # Create savers\r\n if training:\r\n with tf.variable_scope('optimize'):\r\n self._build_optimizers()\r\n\r\n # Start pre-processing routines\r\n for _, datasource in self._train_data.items():\r\n datasource.create_and_start_threads()\r\n\r\n # Initialize all variables\r\n self._tensorflow_session.run(tf.global_variables_initializer())\r\n self._initialized = True",
"def _get_init_fn():\n exclusions = []\n if FLAGS.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in FLAGS.checkpoint_exclude_scopes.split(',')]\n\n variables_to_restore = []\n for var in slim.get_model_variables():\n excluded = False\n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Fine-tuning from {}'.format(checkpoint_path))\n\n return slim.assign_from_checkpoint_fn(checkpoint_path, variables_to_restore)",
"def init_model(session, model):\n # If there is a checkpoint, load it\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n\n # Else initialize the variables\n else:\n if FLAGS.decode:\n input(\"You sure you want to talk to an untrained chatbot? Press Ctrl-C to stop, Return to continue \")\n print(\"Fine.\")\n\n print(\"Creating model with fresh parameters.\")\n session.run(tf.global_variables_initializer())",
"def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):\n # Normalize var_list into a dictionary mapping names in the\n # checkpoint to the list of variables to initialize from that\n # checkpoint variable. Sliced (including partitioned) variables will\n # end up under the same key.\n grouped_vars = {}\n if isinstance(var_list, (tuple, list)):\n for var in var_list:\n ckpt_name = get_variable_full_name(var)\n if ckpt_name not in grouped_vars:\n grouped_vars[ckpt_name] = []\n grouped_vars[ckpt_name].append(var)\n\n else:\n for ckpt_name, value in var_list.items():\n if isinstance(value, (tuple, list)):\n grouped_vars[ckpt_name] = value\n else:\n grouped_vars[ckpt_name] = [value]\n\n # Read each checkpoint entry. Create a placeholder variable and\n # add the (possibly sliced) data from the checkpoint to the feed_dict.\n reader = pywrap_tensorflow.NewCheckpointReader(model_path)\n feed_dict = {}\n assign_ops = []\n for ckpt_name in grouped_vars:\n if not reader.has_tensor(ckpt_name):\n log_str = 'Checkpoint is missing variable [%s]' % ckpt_name\n if ignore_missing_vars:\n logging.warning(log_str)\n continue\n else:\n raise ValueError(log_str)\n ckpt_value = reader.get_tensor(ckpt_name)\n\n for var in grouped_vars[ckpt_name]:\n placeholder_tensor = array_ops.placeholder(\n dtype=var.dtype.base_dtype,\n shape=var.get_shape(),\n name='placeholder/' + var.op.name)\n assign_ops.append(var.assign(placeholder_tensor))\n\n if not var._save_slice_info:\n if var.get_shape() != ckpt_value.shape:\n raise ValueError(\n 'Total size of new array must be unchanged for %s '\n 'lh_shape: [%s], rh_shape: [%s]'\n % (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))\n\n feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)\n else:\n slice_dims = zip(var._save_slice_info.var_offset,\n var._save_slice_info.var_shape)\n slice_dims = [(start, start + size) for (start, size) in slice_dims]\n slice_dims = [slice(*x) for x in slice_dims]\n slice_value = ckpt_value[slice_dims]\n slice_value = slice_value.reshape(var._save_slice_info.var_shape)\n feed_dict[placeholder_tensor] = slice_value\n\n assign_op = control_flow_ops.group(*assign_ops)\n return assign_op, feed_dict",
"def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True",
"def _initial_setup(self, **train_kwargs):\n self._update(time_step=0., **train_kwargs)",
"def initialize_training(self, training_info):\n self.model.reset_weights()\n self.algo.initialize(self.settings, model=self.model, environment=self.environment, device=self.device)"
]
| [
"0.7199187",
"0.68767375",
"0.68545914",
"0.67681926",
"0.67573655",
"0.67527646",
"0.67527646",
"0.67527646",
"0.67527646",
"0.67527646",
"0.6746362",
"0.67407686",
"0.66633236",
"0.6610265",
"0.66059613",
"0.6599223",
"0.6592673",
"0.6522299",
"0.6428225",
"0.6428225",
"0.6412149",
"0.6326565",
"0.63064605",
"0.62611073",
"0.6258862",
"0.62546784",
"0.6246575",
"0.62373555",
"0.6225334",
"0.6164836"
]
| 0.6975668 | 1 |
Write Elasticluster configuration file with user and security information. | def _write_elasticluster_config(config, out_file):
orig_file = os.path.join(sys.prefix, "share", "bcbio-vm", "elasticluster", "config")
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
if os.path.exists(out_file):
bak_file = out_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.move(out_file, bak_file)
with open(orig_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith(tuple(config.keys())):
name, val = line.strip().split("=")
out_handle.write("%s=%s\n" % (name, config[name]))
else:
out_handle.write(line)
return out_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_config(self, filename=None):\n if filename is None:\n filename = f\"{self._nginx_path}/conf/nginx.conf\"\n self.dump_config(self._nodeconfig)\n cmd = f\"echo \\\"{self._nginx_config}\\\" | sudo tee {filename}\"\n exec_cmd_no_error(\n self._node, cmd, message=u\"Writing config file failed!\"\n )",
"def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())",
"def save_config(self):\n if not os.path.exists(USER_CONFIG_PATH):\n os.makedirs(USER_CONFIG_PATH)\n\n # obtener el config actual\n config = self.get_config()\n\n # obtener el cliente\n client = self._args.get('client')\n\n # ciertos parametros no se tienen que salvar\n args = self._args.copy()\n for item in ['doc', 'command', 'client']:\n if item in args:\n args.pop(item)\n\n # actualizar el cliente default\n config['client'] = client\n\n # actualizar el resto de los parametros para ese cliente\n for item in args:\n if client in config:\n config[client][item] = args.get(item)\n else:\n config[client] = {item: args.get(item)}\n\n with open(USER_CONFIG_FILE, 'w') as config_file:\n yaml.dump(config, config_file, default_flow_style=False,\n allow_unicode=True)",
"def write_config(self):\n logging.debug(\"Writing configuration file: %s\" % self.config_file)\n f = open(self.config_file, \"w\")\n self.config.write(f)\n f.close()",
"def __write_config(self):\n with open(self.config_file, 'w') as data_file:\n config = {\"ibooks_doc_root\":self.ibooks_doc_root,\n \"library_folder\":self.library_folder,\n \"annotation_folder\":self.annotation_folder,\n \"tmp_dir\":self.tmp_dir\n } \n data = json.dumps(config, ensure_ascii=False)\n data_file.write(data)",
"def write_config(self):\n\n #Before writing to file we must convert underscores to dashes, moreover _id must be written as id, and _type as type\n\n if not os.path.exists(NetworkManager_conf_dir):\n os.makedirs(NetworkManager_conf_dir, mode=0755)\n\n profile_path = os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id)\n with open(profile_path, \"wb\") as configfile:\n self.cfg.write(configfile)",
"def create_user_file(self):\n d = self.user2dict()\n with open(User.get_path(self.username), 'w', encoding='utf8') as file:\n yaml.dump(d, file, default_flow_style=False)",
"def write_config(self):\n xshear_conf=XShearConfig(self['run'])\n xshear_conf.write()",
"def write_config(self, config_file):\n \n # write root paths\n \n # write reference data\n \n # write tool paths\n \n pass",
"def write_config_file():\n\tif not config_parser:\n\t\tprint \"Config module not loaded. I don't save anything.\"\n\t\treturn\n\n\tf = file(config_file, \"w\")\n\tconfig_parser.write(f)\n\tf.close()",
"def store_username_in_config(username):\n # Needs global variable or arg instead.\n with open(cfg.config_file, 'a') as f:\n f.write(\"\\nusername: %s\" % username)",
"def write_config(self, filename):\n self.config.filename = filename\n self.config.write()",
"def _createConfigFile(self):\n configFile = self._configFile()\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass",
"def write(self, fn):\n with open(fn, 'w') as f:\n self.config.write(f)",
"def _write_ssh_configd(ssh_config=None,\n name=None,\n shortname=None,\n user='root',\n identity_file=None,\n public_ip=None,\n public_dns=None,\n verbose_level=1):\n\n if shortname is None:\n shortname = name\n if name is None:\n raise RuntimeError(\"[-] must specify 'name' for file name\")\n template_vars = dict()\n template_vars['identity_file'] = identity_file\n template_vars['port'] = 22\n template_vars['username'] = user\n template_vars['shortname'] = shortname\n template_vars['public_ip'] = public_ip\n template_vars['public_dns'] = public_dns\n for k, v in template_vars.items():\n if v is None or v == '':\n raise RuntimeError(f\"[-] variable '{k}' must be defined\")\n config_dir_path = ssh_config + '.d'\n if not os.path.exists(config_dir_path):\n raise RuntimeError(\n f\"[-] directory '{config_dir_path}' \"\n \"for update-dotdee does not exist\")\n config_file_path = os.path.join(config_dir_path, name)\n with open(config_file_path, 'w') as f:\n template = Template(SSH_CONFIG_TEMPLATE)\n output_text = template.render(dict(template_vars))\n f.writelines(output_text)\n if verbose_level > 2:\n print(output_text, file=sys.stderr, flush=True)",
"def write_config(self, config: dict):\n if self.config_in_use():\n raise BaseConfigInUseError()\n\n for conf_name, conf in create_config(\n self.config_dir, self.data_dir, config\n ).items():\n with open(abspath(join(self.config_dir, conf_name)), \"w\") as f:\n f.write(conf)",
"def write_kubeconfig(cluster_name, lb_ip, lb_port, ca_cert,\n client_cert, client_key):\n\n path = None\n master_uri = \"https://\" + lb_ip + \":\" + lb_port\n username = \"admin\"\n\n kubeconfig = get_kubeconfig_yaml(master_uri, ca_cert, username,\n client_cert, client_key)\n\n path = '-'.join((cluster_name, 'admin.conf'))\n LOGGER.success(\"You can use your config with:\")\n LOGGER.success(\"kubectl get nodes --kubeconfig=%s\" % path)\n with open(path, \"w\") as fh:\n fh.write(kubeconfig)\n\n return path",
"def write_config(self):\n cfg = {\n 'ALERT_API_KEY':self.api_key,\n 'APP_NAME':self.title,\n 'alertes':self.alertes\n }\n write_conf(self.CONF_FILE,cfg)",
"def write_pytan_user_config(self, **kwargs):\n puc_kwarg = kwargs.get('pytan_user_config', '')\n puc = puc_kwarg or self.puc\n puc = os.path.expanduser(puc)\n\n puc_dict = {}\n\n for k, v in vars(self).items():\n if k in ['mylog', 'methodlog', 'session', 'puc']:\n m = \"Skipping class variable {} from inclusion in: {}\".format\n self.mylog.debug(m(k, puc))\n continue\n\n m = \"Including class variable {} in: {}\".format\n self.mylog.debug(m(k, puc))\n puc_dict[k] = v\n\n # obfuscate the password\n puc_dict['password'] = pytan.utils.vig_encode(pytan.constants.PYTAN_KEY, self.password)\n\n try:\n with open(puc, 'w+') as fh:\n json.dump(puc_dict, fh, skipkeys=True, indent=2)\n except Exception as e:\n m = \"Failed to write PyTan User config: '{}', exception: {}\".format\n raise pytan.exceptions.HandlerError(m(puc, e))\n else:\n m = \"PyTan User config file successfully written: {} \".format\n self.mylog.info(m(puc))\n return puc",
"def write_config(self, username, port):\n user_path = 'users/' + username\n ip_path = user_path + '/.ipython'\n profile_path = ip_path + '/profile_nbserver/'\n \n # get local IP address\n hostname = urlparse('%s://%s' % (self.request.protocol, self.request.host)).hostname\n ip_address = socket.gethostbyname(hostname)\n\n # write configuration file\n conf_file = open(profile_path + 'ipython_notebook_config.py', 'w')\n\n conf_file.write('c = get_config()')\n conf_file.write('\\nc.NotebookApp.ip = \"' + ip_address + '\"')\n conf_file.write('\\nc.NotebookApp.port = ' + str(port))\n conf_file.write('\\nc.NotebookApp.port_retries = 0')\n conf_file.write('\\nc.NotebookApp.enable_mathjax = True')\n conf_file.write('\\nc.NotebookApp.open_browser = False')\n conf_file.write('\\nc.NotebookApp.ipython_dir = u\"' + ip_path + '\"')\n conf_file.write('\\nc.IPKernelApp.pylab = \"inline\"')\n conf_file.write('\\nc.NotebookManager.notebook_dir = u\"' + user_path + '/notebooks\"')\n conf_file.close()\n\n return profile_path",
"def write_config(self):\r\n obj = [\r\n [self.ip,\r\n self.gate,\r\n self.mask,\r\n self.name,\r\n self.time]\r\n ]\r\n with open('config.json', 'wt') as jsonfile:\r\n json.dump(obj, jsonfile)",
"def create_configfile():\n config = ConfigParser.ConfigParser()\n config.add_section('Common')\n config.set('Common', 'renewal days', 20)\n config.set('Common', 'delayed installation days', 5)\n config.set('Common', 'include chain', True)\n config.set('Common', 'account key', './config/key.pem')\n config.add_section('Load Balancer')\n config.set('Load Balancer', 'cluster', True)\n config.set('Load Balancer', 'Host 1', 'lb1.example.com')\n config.set('Load Balancer', 'Host 2', 'lb2.example.com')\n config.set('Load Balancer', 'username', 'admin')\n config.set('Load Balancer', 'password', 'password01')\n config.set('Load Balancer', 'datagroup', 'acme_responses_dg')\n config.set('Load Balancer', 'datagroup partition', 'Common')\n config.add_section('Certificate Authority')\n config.set('Certificate Authority', 'Directory URL',\n 'https://acme-v01.api.letsencrypt.org/directory')\n config.set('Certificate Authority', 'use proxy', False)\n config.set('Certificate Authority', 'proxy',\n 'http://proxy.example.com:8080')\n\n # As the config file contains password, we should be careful with permissions\n with os.fdopen(os.open(CONFIG_FILE, os.O_WRONLY | os.O_CREAT, 0o660), 'w') as config_file:\n config.write(config_file)",
"def write_auth_data(config_file, credentials):\n cred_list = []\n for cred in credentials:\n cred_dict = {\"username\": cred.username, \"password\": cred.password,\n \"protocol\": cred.protocol, \"mechanism\": cred.mechanism,\n \"token_endpoint\": cred.token_endpoint}\n if len(cred.hostname) > 0:\n cred_dict[\"hostname\"] = cred.hostname\n if cred.ssl_ca_location is not None:\n cred_dict[\"ssl_ca_location\"] = cred.ssl_ca_location\n\n cred_list.append(cred_dict)\n\n os.makedirs(os.path.dirname(config_file), exist_ok=True)\n fd = os.open(config_file, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, stat.S_IRUSR | stat.S_IWUSR)\n with open(fd, \"w\") as f:\n toml.dump({\"auth\": cred_list}, f)\n logger.info(f\"Wrote configuration to: {config_file}\")",
"def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)",
"def writeShREEKConfig(self, filename):\n self._ShREEKConfig.save(filename)\n return",
"def write_config():\n\n e = Element(\"Configuration\")\n r = SubElement(e, \"RepositoryList\")\n r = SubElement(r, \"Repository\", name = \"default\")\n SubElement(r, \"Module\").text = args.driver\n SubElement(r, \"TokenLabel\").text = args.token_label\n SubElement(r, \"PIN\").text = args.pin\n ElementTree(e).write(args.write_config)\n args.write_config.flush()",
"def write_config(self, data):\n logger.debug(\"[%s] Writing config\", self.name)\n self.config.write(data)",
"def write_config(self):\n cfg = {\n 'channel':self.channel,\n 'seuil_min':self.seuil_min,\n 'last_level':self.last_level,\n 'last_level_date':self.last_level_date\n }\n write_conf(self.CONF_FILE,cfg)",
"def write(self):\n cfgpath = os.path.join(self.config_dir, CONFIG_FILENAME)\n ofile = open(cfgpath, 'w')\n if ofile:\n log.debug( \"Write config: %s\" % cfgpath )\n cfg = yaml.dump(self.yaml, default_flow_style=False)\n log.debug( \"Config:\\n%s\" % cfg)\n ofile.write(cfg)\n ofile.close()",
"def write(filename):\n log.msg(\"Saving configuration information to \\\"\" + filename + \"\\\"\", lvl='i', ss='ss_configfile')\n\n f = open(filename, 'w')\n cp = ConfigParser.SafeConfigParser()\n #a little string hacking because our section names are un-normalized\n #this builds a list of all the sections names\n sectionslst = []\n sections = []\n for k in _loaded.keys():\n sectionslst.append(k.split('.')[0])\n #get unique entries\n sections = _uniquer(sectionslst)\n for sec in sections:\n log.msg(\"\\tCompiling section \\\"\" + sec + \"\\\"\",\n lvl='d3', ss='ss_configfile')\n #make the headers\n cp.add_section(sec)\n #for each item in my dictionary\n #it splits the key in two and uses that for the first and second \"set\" args\n #then it uses the item.value for the 3rd arg\n # from 'section.option:value'\n \n for k in _loaded.items():\n cp.set(str(k[0]).split('.')[0], str(k[0]).split('.')[1], str(k[1]))\n cp.write(f)\n f.close()"
]
| [
"0.62820524",
"0.62570614",
"0.62143034",
"0.62114924",
"0.6193832",
"0.6069943",
"0.599572",
"0.5991819",
"0.5979662",
"0.5961469",
"0.5899129",
"0.5884886",
"0.5874607",
"0.5871666",
"0.58647877",
"0.5844628",
"0.58071727",
"0.57967025",
"0.57941467",
"0.5790304",
"0.5785122",
"0.57635736",
"0.57509816",
"0.5747844",
"0.5724942",
"0.56764615",
"0.5665461",
"0.56509215",
"0.56347096",
"0.5625558"
]
| 0.66994685 | 0 |
Create a bcbio keypair and import to ec2. Gives us access to keypair locally and at AWS. | def create_keypair(econfig_file=None, region=None, keyname="bcbio"):
import boto
import boto.ec2
if econfig_file:
keypair_dir = os.path.dirname(econfig_file).replace("elasticluster", "aws_keypairs")
else:
keypair_dir = os.path.join(os.getcwd(), "aws_keypairs")
if not os.path.exists(keypair_dir):
os.makedirs(keypair_dir)
private_key = os.path.join(os.path.join(keypair_dir, keyname))
new_key = not os.path.exists(private_key)
if new_key:
cmd = ["ssh-keygen", "-t", "rsa", "-N", "", "-f", private_key, "-C", "bcbio_aws_keypair"]
subprocess.check_call(cmd)
public_key = private_key + ".pub"
if region:
ec2 = boto.ec2.connect_to_region(region)
else:
ec2 = boto.connect_ec2()
key = ec2.get_key_pair(keyname)
if key and new_key:
print("Non matching key %s found in AWS, removing." % keyname)
ec2.delete_key_pair(keyname)
key = None
if not key:
print("Key %s not found in AWS, importing created key" % keyname)
with open(public_key) as in_handle:
body = in_handle.read()
try:
ec2.import_key_pair(keyname, body)
except TypeError as e:
body = body.encode('utf-8')
ec2.import_key_pair(keyname, body)
return {"user_key_name": keyname, "user_key_private": private_key,
"user_key_public": public_key} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_keypair(ec2):\n # call the boto ec2 function to create a key pair\n key_pair = ec2.create_key_pair(KeyName=KEY_PAIR_NAME)\n print(\"\\n===Created a new key pair in AWS.\")\n\n # capture the key and store it in a file\n KeyPairOut = str(key_pair.key_material)\n\n # create a file to store the key locally\n print(\"Saving the keypair.\")\n key_pair_path = KEY_PAIR_NAME + \".pem\"\n with open(key_pair_path, \"w\") as f:\n f.write(KeyPairOut)\n os.chmod(key_pair_path, 0o600)\n print(\"===Changed access permission to read-only.\")",
"def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return",
"def create(self, name, public_key=None):\n data = {\n \"keypair\": {\n \"name\": name\n }\n }\n if public_key is not None:\n data['keypair']['public_key'] = public_key\n \n path = '/os-keypairs'\n res = self.client.call(path, 'POST', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Create/import openstack keypair: %s' % truncate(res))\n return res[0]['keypair']",
"def create_keypair(self):\n # NOTE: currently we rely on zmq for convenience, but we may use libnacl directly\n # if we want to isolate this module from zmq dependency.\n public_key, private_key = zmq.curve_keypair()\n return public_key, private_key",
"def create_and_fill_bucket(self):\n EmrProcessing.bucket = \\\n self.s3_handle.create_bucket(EmrProcessing.bucket_name)\n key = EmrProcessing.bucket.new_key('input/test.csv')\n input_file_path = '../data/test.csv'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')\n\n key = EmrProcessing.bucket.new_key('mapper/mapper.py')\n input_file_path = '../src/mapper/mapper.py'\n key.set_contents_from_filename(input_file_path)\n key.set_acl('public-read')",
"def test_aws_service_api_keypair_generate_post(self):\n pass",
"def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)",
"def create(self):\n self.initialize()\n\n if not self.__keypair:\n logger.info('Creating keypair %s...' % self.keypair_settings.name)\n\n if self.keypair_settings.public_filepath and os.path.isfile(\n self.keypair_settings.public_filepath):\n logger.info(\"Uploading existing keypair\")\n self.__keypair = nova_utils.upload_keypair_file(\n self._nova, self.keypair_settings.name,\n self.keypair_settings.public_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = False\n else:\n logger.info(\"Creating new keypair\")\n keys = nova_utils.create_keys(self.keypair_settings.key_size)\n self.__keypair = nova_utils.upload_keypair(\n self._nova, self.keypair_settings.name,\n nova_utils.public_key_openssh(keys))\n file_utils.save_keys_to_files(\n keys, self.keypair_settings.public_filepath,\n self.keypair_settings.private_filepath)\n\n if self.keypair_settings.delete_on_clean is not None:\n delete_on_clean = self.keypair_settings.delete_on_clean\n self.__delete_keys_on_clean = delete_on_clean\n else:\n self.__delete_keys_on_clean = True\n elif self.__keypair and not os.path.isfile(\n self.keypair_settings.private_filepath):\n logger.warn(\"The public key already exist in OpenStack \\\n but the private key file is not found ..\")\n\n return self.__keypair",
"def create_key_pair(self) -> Keypair:\n res = self.context.post(\n \"/dsum/create_key_pair\", None, None, \"DSum: failed creating a Curve 25519 Keypair\")\n return Keypair(res['private_key_id'], res['public_key_id'])",
"def test_create_keypair_save_both(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path,\n private_filepath=priv_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)\n\n self.assertTrue(os.path.isfile(priv_file_path))",
"def getAwsKeypair(directory=None):\n if directory is None:\n directory = './'\n with open(directory + 'access.key', 'r+') as fp:\n access_key = fp.read()\n with open(directory + 'secret.key', 'r+') as fp:\n secret_key = fp.read()\n return (access_key, secret_key)",
"def test_create_keypair_from_file(self):\n keys = RSA.generate(1024)\n nova_utils.save_keys_to_files(keys=keys, pub_file_path=pub_file_path)\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)",
"def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws",
"def init_region ( aws, region_name, aws_account_type, init_params ) :\n ec2_conn = aws.ec2_conn( )\n keypair_savedir = os.environ[ 'PWD' ]\n print \"Creating new keypairs for region \" + region_name\n for keytype in init_params.get( 'keypairs', [] ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keytype )\n keypair = ec2_conn.get_key_pair( keypair_name )\n if keypair :\n print 'Keypair ' + keypair_name + ' already exists. Skipping.'\n else :\n keypair = ec2_conn.create_key_pair( keypair_name )\n keypair.save( keypair_savedir )\n keypair_filename = keypair_savedir + '/' + keypair_name + '.pem'\n print 'Created keypair ' + keypair_filename\n store_keypair( s3_infra_conn = aws.s3_infrastructure_conn( ),\n region_name = region_name,\n aws_account_type = aws_account_type,\n keypair_name = get_keypair_keypath( aws_account_type ) + keypair_name,\n keypair_filename = keypair_filename )\n print 'Stored keypair in S3 at: ' + get_keypair_keypath( aws_account_type )\n os.remove( keypair_filename )\n\n if init_params.get( 'init-deployment', 'YES' ) == 'YES' :\n print \"Creating Deployment security group.\"\n deploy_secgrp = ec2_conn.create_security_group( get_deployment_secgrp_name( ),\n \"Used by the deployment server.\" )\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list ) \n\n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = build_server_cidr ) \n deploy_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = build_server_cidr ) \n\n if init_params.get( 'init-ami-update', 'YES' ) == 'YES' :\n print \"Creating ami-update security group.\"\n amiupdate_secgrp = ec2_conn.create_security_group( get_amiupdate_secgrp_name( ),\n \"Used by the ami update instances.\" )\n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 22,\n to_port = 22,\n cidr_ip = hbo_cidr_list ) \n amiupdate_secgrp.authorize( ip_protocol = \"tcp\",\n from_port = 8080,\n to_port = 8080,\n cidr_ip = hbo_cidr_list )",
"def store_keypair ( s3_infra_conn, region_name, aws_account_type, keypair_name, keypair_filename ) :\n keypair_bucket = get_admin_bucket_name( region_name = region_name )\n store_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = keypair_bucket,\n key_name = get_keypair_keypath( aws_account_type ) + get_keypair_keyname( keypair_name ),\n key_contents_filename = keypair_filename )",
"def test_aws_service_api_keypair_import_post(self):\n pass",
"def create_key_pair(self, keypair, **kwargs):\n\n if not isinstance(keypair, models.CreateKeyPairReq):\n raise HuaweiCloudSDKException(\n message=\"The datatype of parameter(keypair) \"\n \"is not CreateKeyPairReq\")\n body_params = keypair.serialize()\n\n header_params = {}\n header_params['Accept'] = util.select_header_accept(\n ['application/xml', 'application/json'])\n\n header_params['Content-Type'] = util.select_header_content_type(\n ['application/json', 'application/xml'])\n\n return_code, return_data, _ = self.api_client.handle_raw_request(\n 'compute', 'POST',\n '/os-keypairs',\n headers=header_params,\n body=body_params,\n timeout=kwargs.get('_request_timeout', None),\n _preload_content=kwargs.get('_preload_content', True))\n\n if return_data is not None:\n return_data = json.loads(return_data)\n else:\n return_data = {}\n if return_code not in [200, 201]:\n raise HuaweiCloudSDKException(\n return_code,\n \"Run create_key_pair failed, \"\n \"message=%s\" % return_data.get(\"message\"))\n return models.CreateKeyPairResp().deserialize(return_data)",
"def create_key ():",
"def test_create_keypair_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)",
"def download_keypair ( s3_infra_conn, aws_account_type, region_name, keypair_type ) :\n keypair_name = get_keypair_name( aws_account_type, region_name, keypair_type )\n keypair_bucket = get_admin_bucket_name( region_name = region_name )\n return retrieve_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = keypair_bucket,\n key_name = get_keypair_keypath( aws_account_type ) + get_keypair_keyname( keypair_name ),\n stored_filename = keypair_name )",
"def test_create_keypair_save_pub_only(self):\n self.keypair_creator = create_keypairs.OpenStackKeypair(self.os_creds,\n create_keypairs.KeypairSettings(name=keypair_name,\n public_filepath=pub_file_path))\n self.keypair_creator.create()\n\n keypair = nova_utils.keypair_exists(self.keypair_creator.nova, self.keypair_creator.keypair)\n self.assertEquals(self.keypair_creator.keypair, keypair)\n\n file_key = open(os.path.expanduser(pub_file_path)).read()\n self.assertEquals(self.keypair_creator.keypair.public_key, file_key)",
"def _get_key():\n conn = boto.connect_s3()\n bucket = conn.create_bucket(settings.MESSY_BUCKET)\n key = Key(bucket)\n key.key = settings.MESSY_KEY\n return key",
"def setup(pk_outfile=const.ABE_PK_FILE, msk_outfile=const.ABE_MSK_FILE, pairing_group_curve=const.PAIRING_GROUP_CURVE,\n debug=0):\n\n # Instantiate a bilinear pairing map with the given curve\n pairing_group = PairingGroup(pairing_group_curve)\n\n # CP-ABE\n cpabe = CPabe_BSW07(pairing_group)\n\n # Create public and master secret keys\n (pk, msk) = cpabe.setup()\n\n if debug: # ONLY USE FOR DEBUG\n print('CP-ABE PUBLIC KEY =', pk)\n print('CP-ABE MASTER SECRET KEY =', msk)\n\n # Save keys on given output files\n with open(pk_outfile, 'w') as fout:\n fout.write(objectToBytes(pk, pairing_group).hex())\n\n with open(msk_outfile, 'w') as fout:\n fout.write(objectToBytes(msk, pairing_group).hex())",
"def gen_key_pair():\n sk = gen_secret_key(BITCOIN.gen.n)\n pk = PublicKey.from_sk(sk)\n return sk, pk",
"def test_aws_service_api_keypair_get(self):\n pass",
"def amazonEc2_create(amazonEc2):\n\treturn amazonEc2",
"def do_new(argv):\n\n global PRIVATE_KEY\n\n if not PRIVATE_KEY:\n PRIVATE_KEY = wallet.get_private_key()\n else:\n get_new = yes_or_no(\"Private key already exist, do you want generate new one ?\")\n if get_new:\n PRIVATE_KEY = wallet.get_private_key()\n print(\"Private Key: '\" + PRIVATE_KEY + \"'\")\n cmpr_pub_key = wallet.get_compressed_public_key(PRIVATE_KEY, 1)\n addr = wallet.public_key_to_address(cmpr_pub_key, 0)\n open(\"data/address\", \"w\").write(addr)\n print(\"Public key was saved to 'data/cmpr_pub_key'\")",
"def create_key_pair_msg(\n msg: CreateKeyPairMessage,\n node: DomainInterface,\n verify_key: VerifyKey,\n) -> SuccessResponseMessage:\n # Check if user has permissions to create new public/private key pair\n _allowed = node.users.can_manage_infrastructure(verify_key=verify_key)\n file_path = os.getenv(\"OBLV_KEY_PATH\", \"/app/content\")\n file_name = os.getenv(\"OBLV_KEY_NAME\", \"oblv_key\")\n if _allowed:\n result = subprocess.run( # nosec\n [\n \"/usr/local/bin/oblv\",\n \"keygen\",\n \"--key-name\",\n file_name,\n \"--output\",\n file_path,\n ],\n capture_output=True,\n )\n if result.stderr:\n raise subprocess.CalledProcessError( # nosec\n returncode=result.returncode, cmd=result.args, stderr=result.stderr\n )\n\n f_private = open(file_path + \"/\" + file_name + \"_private.der\", \"rb\")\n private_key = f_private.read()\n f_private.close()\n f_public = open(file_path + \"/\" + file_name + \"_public.der\", \"rb\")\n public_key = f_public.read()\n f_public.close()\n\n node.oblv_keys.remove()\n node.oblv_keys.add_keys(public_key=public_key, private_key=private_key)\n else:\n raise AuthorizationError(\"You're not allowed to create a new key pair!\")\n\n return SuccessResponseMessage(\n address=msg.reply_to,\n resp_msg=f\"Successfully created a new public/private key pair on the domain node: {node.name}\",\n )",
"def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)",
"def create_ssh_key_file(username: str, ssh_key: bytes, ip_address: str):\n\n if not os.path.exists(\"./ansible/keys\"):\n os.mkdir(\"./ansible/keys\")\n\n with open(f\"./ansible/keys/admin_{ip_address}.pem\", \"w\") as ssh_key_file:\n ssh_key_file.write(ssh_key.decode())\n\n os.system(f\"chmod 400 ./ansible/keys/admin_{ip_address}.pem\")"
]
| [
"0.73303676",
"0.6833116",
"0.6639188",
"0.65754193",
"0.6572603",
"0.6434263",
"0.6376655",
"0.6371478",
"0.62773746",
"0.62645876",
"0.6253658",
"0.6243391",
"0.62319267",
"0.61779505",
"0.616518",
"0.61117303",
"0.60635716",
"0.602635",
"0.5972846",
"0.595534",
"0.5942674",
"0.59398645",
"0.5915675",
"0.587795",
"0.5857617",
"0.5844282",
"0.5814964",
"0.58144945",
"0.5795392",
"0.57748663"
]
| 0.7892392 | 0 |
Create a bcbio IAM user account with full access permissions. | def _bcbio_iam_user(conn, args):
import boto
name = "bcbio"
access_key_name = "full_admin_access"
if args.nocreate:
need_creds = False
else:
try:
conn.get_user(name)
if args.recreate:
keys = conn.get_all_access_keys(name)
for access_key in tz.get_in(["list_access_keys_response", "list_access_keys_result",
"access_key_metadata"], keys, []):
conn.delete_access_key(access_key["access_key_id"], name)
need_creds = True
else:
need_creds = False
except boto.exception.BotoServerError:
conn.create_user(name)
conn.put_user_policy(name, access_key_name, IAM_POLICY)
need_creds = True
if need_creds:
creds = conn.create_access_key(name)
else:
creds = {}
if creds:
creds = tz.get_in(["create_access_key_response", "create_access_key_result", "access_key"], creds)
print("User credentials for %s:" % name)
for awsid in ["access_key_id", "secret_access_key"]:
print(" %s: %s" % (awsid, creds.get(awsid)))
return {"ec2_access_key": creds.get("access_key_id"),
"ec2_secret_key": creds.get("secret_access_key")}
else:
print("User %s already exists, no new credentials" % name)
print("Edit the configuration file to add existing user's access and secret keys")
return {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_account(self, user):\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"CreateAccount\",\n account_name=user.gov_id,\n domain_id=\"afyamkononi\",\n public_key=user.public_key,\n )\n ]\n )\n IrohaCrypto.sign_transaction(tx, self.creator_account_details.private_key)\n return self.send_transaction_and_return_status(tx)",
"def create_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def create_user(BrokerId=None, ConsoleAccess=None, Groups=None, Password=None, Username=None):\n pass",
"def create_user(ctx, db_username, db_password, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n user = cmd.ensure_admin_user(\n client=ctx.obj, project_id=project.id, username=db_username,\n password=db_password)\n pprint(user)",
"def process_create_dropbox_client_account ( iam_conn, base_name, action_params ) :\n account_name = create_dropbox_client_name( base_name, action_params[ 'name' ] )\n if does_user_exist( iam_conn, account_name ) :\n print 'AWS account ' + account_name + ' already exists. Skipping.'\n return None\n\n print \"Creating AWS account \" + account_name\n iam_conn.create_user( account_name )\n\n if action_params.get( 'generate-access-key', 'NO' ) == 'YES' :\n print \"Generating access key\"\n response = iam_conn.create_access_key( account_name )\n access_key = response[ 'create_access_key_response' ][ 'create_access_key_result' ][ 'access_key' ][ 'access_key_id' ]\n access_key_secret = response[ 'create_access_key_response' ][ 'create_access_key_result' ][ 'access_key' ][ 'secret_access_key' ]\n\n ## FIX: Need to store these with the credentials service at some point.\n key_filename = account_name + '.accesskey.txt'\n print 'Saving access key to file ' + key_filename\n key_file = open( key_filename, 'w' )\n key_file.write( 'Access Key: ' + access_key + '\\n' )\n key_file.write( 'Access Key Secret: ' + access_key_secret )\n key_file.close( )\n print '** WARNING: The access key MUST be registered manually with the credential service before it can be used.'",
"def create_account():\n account = w3.eth.account.create()\n return account",
"def generate_user(self):\n user = self.iam_client.create_user(\n UserName=self.generate_username(),\n Tags=self.get_tags()\n )\n\n username = user['User']['UserName']\n accesskey = self.iam_client.create_access_key(UserName=username)\n\n print('UserName = {}\\nAccessKeyId = {}\\nSecretAccessKey = {}'\n .format(\n username,\n accesskey['AccessKey']['AccessKeyId'],\n accesskey['AccessKey']['SecretAccessKey']\n ))\n\n return username, user['User']['Arn']",
"def create_account(\n account_name,\n account_email,\n account_role,\n access_to_billing,\n organization_unit_id,\n scp):\n\n client = session.client('organizations')\n\n try:\n create_account_response = client.create_account(Email=account_email, AccountName=account_name,\n RoleName=account_role,\n IamUserAccessToBilling=access_to_billing)\n except botocore.exceptions.ClientError as e:\n print(e)\n sys.exit(1)\n\n time.sleep(10)\n\n account_status = 'IN_PROGRESS'\n while account_status == 'IN_PROGRESS':\n create_account_status_response = client.describe_create_account_status(\n CreateAccountRequestId=create_account_response.get('CreateAccountStatus').get('Id'))\n print(\"Create account status \"+str(create_account_status_response))\n account_status = create_account_status_response.get('CreateAccountStatus').get('State')\n if account_status == 'SUCCEEDED':\n accountid = create_account_status_response.get('CreateAccountStatus').get('AccountId')\n elif account_status == 'FAILED':\n print(\"Account creation failed: \" + create_account_status_response.get('CreateAccountStatus').get('FailureReason'))\n sys.exit(1)\n root_id = client.list_roots().get('Roots')[0].get('Id')\n\n # Move account to the org\n if organization_unit_id is not None:\n try:\n describe_organization_response = client.describe_organizational_unit(\n OrganizationalUnitId=organization_unit_id)\n move_account_response = client.move_account(AccountId=accountid, SourceParentId=root_id,\n DestinationParentId=organization_unit_id)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r} \"\n message = template.format(type(ex).__name__, ex.args)\n # create_organizational_unit(organization_unit_id)\n print(message)\n\n # Attach policy to account if exists\n if scp is not None:\n attach_policy_response = client.attach_policy(PolicyId=scp, TargetId=accountid)\n print(\"Attach policy response \"+str(attach_policy_response))\n\n return accountid",
"def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)",
"def create_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"create_user\")",
"def create_user_as_ambassador(self, *args, **kwargs):\n profile = self.create_user(*args, **kwargs)\n profile.make_ambassador()\n return profile",
"def create_bucket(ACL=None, Bucket=None, CreateBucketConfiguration=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWrite=None, GrantWriteACP=None, ObjectLockEnabledForBucket=None):\n pass",
"def create_users(self):\n allow_all_policy = \"\"\"{\n \"Statement\": [\n {\n \"Action\": \"*\",\n \"Effect\": \"Allow\",\n \"Resource\": \"*\"\n }]\n }\"\"\"\n\n for i in xrange(self.args.account_number):\n account_name = self.args.account_prefix + str(i)\n group_name = self.args.group_prefix + str(i)\n password = self.args.password_prefix + str(i)\n self.tester.iam.create_account(account_name)\n self.tester.iam.create_group(group_name, \"/\", account_name)\n self.tester.iam.attach_policy_group(group_name, \"allow-all\", allow_all_policy, account_name)\n for k in xrange(self.args.user_number):\n user_name = self.args.user_prefix + str(k)\n self.tester.iam.create_user(user_name, \"/\", account_name)\n self.tester.iam.add_user_to_group(group_name, user_name, account_name)\n self.tester.iam.create_login_profile(user_name, password, account_name)",
"def execute_create_user(arg):\n blockchain = Blockchain()\n blockchain.read_blockchain()\n\n username = arg['username']\n\n if username is None:\n print('You have to provide an username!!!')\n return\n\n wallet = blockchain.create_user(username)\n print(f'User wallet address is: {wallet.address}')",
"def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')",
"def create_user(self, user_info):\n username = user_info.name\n password = user_info.password\n os_tenants = create_os_project(username=username, password=password, tenant_name=username)\n ob_client = OBClient()\n project = {\n 'name': username,\n 'description': 'the project for user %s' % username\n }\n project = ob_client.create_project(project)\n user = {\n 'username': username,\n 'password': password,\n 'enabled': True,\n 'email': \"{}@softfire.local\".format(username),\n 'roles': [\n {\n 'role': 'USER',\n 'project': project.get('name')\n }\n ]\n }\n logger.debug(\"Create openbaton project %s\" % project)\n ob_client = OBClient(project.get('name'))\n user = ob_client.create_user(user)\n logger.debug(\"Create openbaton user %s\" % user)\n\n user_info.ob_project_id = project.get('id')\n # user_info.testbed_tenants = {}\n\n testbed_tenants = {}\n if os_tenants:\n for testbed_name, v in os_tenants.items():\n tenant_id = v.get('tenant_id')\n vim_instance = v.get('vim_instance')\n try:\n vi = ob_client.create_vim_instance(vim_instance)\n logger.debug(\"created vim instance with id: %s\" % vi.get('id'))\n except NfvoException:\n logger.warning(\"Not able to upload vim %s\" % testbed_name)\n testbed_tenants[TESTBED_MAPPING[testbed_name]] = tenant_id\n\n for k, v in testbed_tenants.items():\n user_info.testbed_tenants[k] = v\n logger.debug(\"Updated user_info %s\" % user_info)\n\n return user_info",
"def create_access_key(self, user_name=None):\r\n params = {'UserName' : user_name}\r\n return self.get_response('CreateAccessKey', params)",
"def create_profile(self, user, *args, **kwargs):\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key, **kwargs)",
"def create_temp_user(client, role_arn):\n try:\n response = client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=\"Lambda-Start-Stop-functionality\"\n )\n ec2_user = boto3.client(\n 'ec2',\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken']\n )\n return ec2_user\n except Exception as error:\n logger.info(\"Creating a temporary ec2 privileged user failed with the following error : {}\".format(error))",
"def create_profile(self, user):\r\n salt = sha.new(str(random.random())).hexdigest()[:5]\r\n activation_key = sha.new(salt+user.username).hexdigest()\r\n return self.create(user=user,\r\n activation_key=activation_key)",
"def create_user(self):\n # TODO-ROB: This is used ONLY when the user registers in flask\n # TODO-ROB: Create the cookiecutter.json file\n # extra_context overrides user and default configs\n cookiecutter(self.user_cookie, no_input=True, extra_context={\"user_name\": self.user}, output_dir=self.users)",
"def create_profile(self, user):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n return self.create(user=user,\n activation_key=activation_key)",
"def bdev_opal_new_user(client, bdev_name, admin_password, user_id, user_password):\n params = {\n 'bdev_name': bdev_name,\n 'admin_password': admin_password,\n 'user_id': user_id,\n 'user_password': user_password,\n }\n\n return client.call('bdev_opal_new_user', params)",
"def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")",
"def create_user(self, conn, name, password, group):\n user = conn.user.allocate(name, password, \"\", [group])\n return user",
"def configure_admin_user(session, account_id, admin_role, in_use):\n sys.stderr.write(\"Creating IAM client...\" + \"\\n\")\n iam = session.client(\"iam\")\n sys.stderr.write(\n \"Creating managed policy for protecting organization assets...\" + \"\\n\")\n iam.create_policy(\n PolicyName=AWS_IAM_PROTECTION_POLICY_NAME,\n Description=(\n \"Provides default-deny control over the Organization roles and resources that \"\n \"cannot be controlled through organization SCPs.\"),\n PolicyDocument=\"\"\"{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": \"Stmt1500485872000\",\n \"Effect\": \"Deny\",\n \"Action\": [\n \"iam:*\"\n ],\n \"Resource\": [\n \"arn:aws:iam::%s:role/%s\",\n \"arn:aws:iam::%s:role/%s\"\n ]\n }\n ]\n }\n \"\"\" % (account_id, admin_role, account_id, AWS_CLOUDTRAIL_ROLE_NAME))\n\n sys.stderr.write(\"Creating user...\" + \"\\n\")\n iam.create_user(UserName=AWS_IAM_USER_NAME)\n sys.stderr.write(\"Attached AWS managed AdministratorAccess policy...\" +\n \"\\n\")\n iam.attach_user_policy(\n UserName=AWS_IAM_USER_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AdministratorAccess\")\n iam.attach_user_policy(\n UserName=AWS_IAM_USER_NAME,\n PolicyArn=\"arn:aws:iam::%s:policy/%s\" %\n (account_id, AWS_IAM_PROTECTION_POLICY_NAME))\n sys.stderr.write(\"IAM user created and policies attached.\" + \"\\n\")\n\n password = base64.b64encode(os.urandom(32))\n iam.create_login_profile(\n UserName=AWS_IAM_USER_NAME,\n Password=password,\n PasswordResetRequired=True)\n sys.stderr.write(\"IAM user (%s) password changed to: %s\" % (\n AWS_IAM_USER_NAME, password) + \"\\n\")\n return password",
"def create_or_modify_account(module, idrac, slot_uri, slot_id, empty_slot_id, empty_slot_uri, user_attr):\n generation, firmware_version = idrac.get_server_generation\n msg, response = \"Unable to retrieve the user details.\", {}\n if (slot_id and slot_uri) is None and (empty_slot_id and empty_slot_uri) is not None:\n msg = \"Successfully created user account.\"\n payload = get_payload(module, empty_slot_id, action=\"create\")\n if module.check_mode:\n module.exit_json(msg=\"Changes found to commit!\", changed=True)\n if generation >= 14:\n response = idrac.invoke_request(ATTRIBUTE_URI, \"PATCH\", data={\"Attributes\": payload})\n elif generation < 14:\n xml_payload, json_payload = convert_payload_xml(payload)\n time.sleep(10)\n response = idrac.import_scp(import_buffer=xml_payload, target=\"ALL\", job_wait=True)\n elif (slot_id and slot_uri) is not None:\n msg = \"Successfully updated user account.\"\n payload = get_payload(module, slot_id, action=\"update\")\n xml_payload, json_payload = convert_payload_xml(payload)\n value = compare_payload(json_payload, user_attr)\n if module.check_mode:\n if value:\n module.exit_json(msg=\"Changes found to commit!\", changed=True)\n module.exit_json(msg=\"No changes found to commit!\")\n if not value:\n module.exit_json(msg=\"Requested changes are already present in the user slot.\")\n if generation >= 14:\n response = idrac.invoke_request(ATTRIBUTE_URI, \"PATCH\", data={\"Attributes\": payload})\n elif generation < 14:\n time.sleep(10)\n response = idrac.import_scp(import_buffer=xml_payload, target=\"ALL\", job_wait=True)\n elif (slot_id and slot_uri and empty_slot_id and empty_slot_uri) is None:\n module.fail_json(msg=\"Maximum number of users reached. Delete a user account and retry the operation.\")\n return response, msg",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)",
"def create():\n api_request = apireq.APIRequest(request, 'client_schema')\n if api_request.is_invalid():\n return api_request.error_text, 400\n return user_management.create_user(api_json['username'])",
"def grant_aws_open_data_access(user, project):\n url = settings.AWS_CLOUD_FORMATION\n # The payload has to be a string in an array\n payload = {'accountid': [\"{}\".format(user.cloud_information.aws_id)]}\n # Custom headers set as a key for a lambda function in AWS to grant access\n headers = {settings.AWS_HEADER_KEY: settings.AWS_HEADER_VALUE,\n settings.AWS_HEADER_KEY2: settings.AWS_HEADER_VALUE2}\n # Do a request to AWS and try to add the user ID to the bucket\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n\n # Exit early if we received a response from AWS indicating an error.\n if response.status_code < 200 or response.status_code >= 300:\n LOGGER.info(\"Error sending adding the AWS ID to the Bucket Policy.\"\n \"The request payload is {0}\\nThe errror is the following: \"\n \"{1}\\n\".format(payload, response.content))\n return \"Access could not be granted.\", False\n\n # The following if block will:\n # (1) create a notification to send the user\n # (2) set a boolean to True/False, indicating if access was granted (True)\n aws_response = response.json()['message']\n if aws_response == \"No new accounts to add\":\n LOGGER.info(\"AWS response adding {0} to project {1}\\n{2}\".format(\n user.cloud_information.aws_id, project, aws_response))\n granted_access = True\n access_message = aws_response\n elif \"Accounts ['{}'] have been added\".format(user.cloud_information.aws_id) in aws_response:\n LOGGER.info(\"AWS response adding {0} to project {1}\\n{2}\".format(\n user.cloud_information.aws_id, project, aws_response))\n granted_access = True\n access_message = aws_response.split(',')[0]\n else:\n LOGGER.info('Unknown response from AWS - {0}\\nThe payload is {1}'.format(\n payload, response.content))\n granted_access = False\n access_message = \"Access could not be granted.\"\n\n return access_message, granted_access"
]
| [
"0.64758754",
"0.6371347",
"0.62785226",
"0.62672675",
"0.6244346",
"0.61853546",
"0.6175834",
"0.61129385",
"0.6081883",
"0.6014631",
"0.59924346",
"0.5924464",
"0.58846915",
"0.58055896",
"0.57954687",
"0.5793468",
"0.5780767",
"0.57705027",
"0.57041115",
"0.5681222",
"0.56748945",
"0.5671788",
"0.56518745",
"0.5642292",
"0.5639952",
"0.5639489",
"0.5632336",
"0.5623871",
"0.5600402",
"0.5591386"
]
| 0.75444907 | 0 |
Create an IAM instance profile with temporary S3 access to be applied to launched machines. | def bcbio_s3_instance_profile(conn, args):
import boto
if hasattr(args, "nocreate") and args.nocreate:
return {"instance_profile": ""}
base_name = args.cluster if hasattr(args, "cluster") and args.cluster else "bcbio"
name = "%s_full_s3_access" % (base_name)
try:
ip = conn.get_instance_profile(name)
except boto.exception.BotoServerError:
print("Instance profile %s doesn't exist, creating" % name)
ip = conn.create_instance_profile(name)
try:
conn.get_role(name)
except boto.exception.BotoServerError:
print("Role %s doesn't exist, creating" % name)
conn.create_role(name)
conn.put_role_policy(name, name, S3_POLICY)
if not tz.get_in(["get_instance_profile_response", "get_instance_profile_result", "instance_profile", "roles"],
ip):
conn.add_role_to_instance_profile(name, name)
print("Instance profile: %s" % name)
return {"instance_profile": name} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(profile, name):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = name\n return client.create_instance_profile(**params)",
"def test_instance_profile_exists(self) -> None:\n self.assertTrue(self.validate_instance_profile('s3-access-role', is_prod=self.prod_env))",
"def _add_instance_profile(self, cleanup_policy_statements, instance_role=None):\n instance_profile_resource = iam.CfnInstanceProfile(\n self,\n \"InstanceProfile\",\n path=IAM_ROLE_PATH,\n roles=[instance_role.split(\"/\")[-1] if instance_role else Fn.ref(\"InstanceRole\")],\n instance_profile_name=self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX),\n )\n\n if not self.custom_cleanup_lambda_role:\n self._add_resource_delete_policy(\n cleanup_policy_statements,\n [\"iam:DeleteInstanceProfile\"],\n [\n self.format_arn(\n service=\"iam\",\n region=\"\",\n resource=\"instance-profile\",\n resource_name=\"{0}/{1}\".format(\n IAM_ROLE_PATH.strip(\"/\"),\n self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX),\n ),\n )\n ],\n )\n\n return instance_profile_resource",
"def create_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def create(profile):\n client = boto3client.get(\"ec2\", profile)\n return client.create_internet_gateway()",
"async def get_or_create_temporary_s3_access(user_id: UserID):",
"def create_instance_profile(instance_role,\n iam_client=None,\n region_name=None,\n tag_prefix=None,\n dry_run=False):\n instance_profile_arn = _get_instance_profile(\n instance_role, iam_client=iam_client, region_name=region_name)\n if instance_profile_arn:\n LOGGER.info(\"%s found IAM instance profile '%s'\",\n tag_prefix, instance_profile_arn)\n else:\n if not dry_run:\n resp = iam_client.create_instance_profile(\n InstanceProfileName=instance_role)\n instance_profile_arn = resp['InstanceProfile']['Arn']\n LOGGER.info(\"%s%s created IAM instance profile '%s'\",\n \"(dryrun) \" if dry_run else \"\", tag_prefix, instance_profile_arn)\n if not dry_run:\n iam_client.add_role_to_instance_profile(\n InstanceProfileName=instance_role,\n RoleName=instance_role)\n LOGGER.info(\"%s%s added IAM instance profile %s to role %s\",\n \"(dryrun) \" if dry_run else \"\",\n tag_prefix, instance_profile_arn, instance_role)\n return instance_profile_arn",
"def create(profile, name):\n # Make sure it doesn't exist already.\n if exists(profile, name):\n msg = \"Instance profile '\" + str(name) + \"' already exists.\"\n raise ResourceAlreadyExists(msg)\n\n # Now we can create it.\n params = {}\n params[\"profile\"] = profile\n params[\"name\"] = name\n response = utils.do_request(instanceprofile, \"create\", params)\n\n # Check that it exists.\n instance_profile_data = polling_fetch(profile, name)\n if not instance_profile_data:\n msg = \"Instance profile '\" + str(name) + \"' not created.\"\n raise ResourceNotCreated(msg)\n\n # Send back the instance profile's info.\n return instance_profile_data",
"def make_profile_for_user(sender, instance, **kwargs):\n if kwargs['created']:\n new_profile = ImagerProfile(user=instance)\n new_profile.save()",
"def create_boto_session(account):\n aws_access_key_id = account['aws_access_key_id']\n aws_secret_access_key = account['aws_secret_access_key']\n region = account['region']\n #aws_profile = account['aws_profile']\n\n\n session = boto3.Session(\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n region_name=region,\n #profile_name=aws_profile,\n )\n\n return session",
"def provision_create(ec2_conn, iam_conn, interana_account_id, s3_bucket_path, interana_user):\n try:\n user, all_policies = check_account_setup(iam_conn, interana_user)\n except Exception, e:\n print \"Warning could not verify user interana_user {} because {}\".format(interana_user, e)\n\n infile = 's3_bucket_list.policy.template'\n outfile = 's3_bucket_list.policy'\n\n bucket_name, bucket_prefix = get_bucket_name_prefix(s3_bucket_path)\n\n all_lines = ''\n with open(infile, 'r') as tmp_fh, open(outfile, 'w') as out_fh:\n for line in tmp_fh:\n re_proxy = re.compile('<INTERANA_ACCOUNT_ID>')\n translate = re_proxy.sub(interana_account_id, line)\n\n re_proxy = re.compile('<BUCKET_NAME>')\n translate = re_proxy.sub(bucket_name, translate)\n\n re_proxy = re.compile('<BUCKET_PREFIX>')\n translate = re_proxy.sub(bucket_prefix, translate)\n\n out_fh.write(translate)\n all_lines += translate.strip()\n\n if len(bucket_prefix) < 1:\n with open(outfile, 'r') as in_fh:\n policy = json.load(in_fh)\n del policy['Statement'][1]['Condition']\n all_lines = json.dumps(policy)\n print \"Download file to check GetObject Access {}\".format(outfile)\n with open(outfile, 'w') as out_fh:\n json.dump(policy, out_fh, indent=4)\n\n print \"****policy file {}***\".format(outfile)\n\n print json.dumps(json.loads(all_lines), indent=True)",
"def create_sam_bucket():\n local(f\"aws s3 mb s3://{env.bucket_name} --region {env.aws_region}\")",
"def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws",
"def create_temp_user(client, role_arn):\n try:\n response = client.assume_role(\n RoleArn=role_arn,\n RoleSessionName=\"Lambda-Start-Stop-functionality\"\n )\n ec2_user = boto3.client(\n 'ec2',\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken']\n )\n return ec2_user\n except Exception as error:\n logger.info(\"Creating a temporary ec2 privileged user failed with the following error : {}\".format(error))",
"def create_profile(sender, **kw):\n user = kw['instance']\n if kw['created']:\n profile = UserProfile(user=user)\n profile.save()",
"def test_ec2_iaminstanceprofiles(mock_get_instances, neo4j_session):\n # Arrange\n boto3_session = MagicMock()\n create_test_account(neo4j_session, TEST_ACCOUNT_ID, TEST_UPDATE_TAG)\n data_iam = tests.data.aws.iam.INSTACE['Roles']\n sync_ec2_instances(\n neo4j_session,\n boto3_session,\n [TEST_REGION],\n TEST_ACCOUNT_ID,\n TEST_UPDATE_TAG,\n {'UPDATE_TAG': TEST_UPDATE_TAG, 'AWS_ID': TEST_ACCOUNT_ID},\n )\n cartography.intel.aws.iam.load_roles(\n neo4j_session, data_iam, TEST_ACCOUNT_ID, TEST_UPDATE_TAG,\n )\n common_job_parameters = {\n \"UPDATE_TAG\": TEST_UPDATE_TAG,\n }\n\n # Act\n run_analysis_job(\n 'aws_ec2_iaminstanceprofile.json',\n neo4j_session,\n common_job_parameters,\n )\n\n # Assert\n assert check_rels(\n neo4j_session,\n 'EC2Instance',\n 'id',\n 'AWSRole',\n 'arn',\n 'STS_ASSUMEROLE_ALLOW',\n rel_direction_right=True,\n ) == {\n ('i-02', 'arn:aws:iam::000000000000:role/SERVICE_NAME_2'),\n ('i-03', 'arn:aws:iam::000000000000:role/ANOTHER_SERVICE_NAME'),\n ('i-04', 'arn:aws:iam::000000000000:role/ANOTHER_SERVICE_NAME'),\n }",
"def create_user_profile(instance, created, **_):\n if created:\n Profile.objects.create(user=instance)",
"def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, profile_name=None):\n self.session = boto3.Session(profile_name=profile_name,\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key,\n aws_session_token=aws_session_token)\n self.aws_access_key_id = aws_access_key_id\n self.aws_secret_access_key = aws_secret_access_key\n self.aws_session_token = aws_session_token\n self.profile_name = profile_name\n self.s3_resource = self.session.resource('s3')\n self.s3_client = self.session.client('s3')\n self.connected = self.setup()",
"def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)",
"def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()",
"def fetch_config_from_s3(client):\n try:\n response = client.assume_role(\n RoleArn=\"arn:aws:iam::548760365095:role/Ec2StartStopLambdaActionRole\",\n RoleSessionName=\"Ec2-Start-Stop-Lambda-Session-Role\"\n )\n fetcher = boto3.client(\n 's3',\n aws_access_key_id=response['Credentials']['AccessKeyId'],\n aws_secret_access_key=response['Credentials']['SecretAccessKey'],\n aws_session_token=response['Credentials']['SessionToken']\n )\n return json_to_dict(fetcher)\n except Exception as error:\n logger.info(\"Creating a temporary S3 privileged user failed with the following error : {}\".format(error))",
"def s3_create_bucket(self):\n self.conn.create_bucket(DEFAULT_BUCKET_NAME)",
"def store(bucket, path_bucket, content, filename=\"\", file_type=None, user=None):\n b = boto_init_s3(bucket)\n if b:\n k = b.get_key(path_bucket)\n if not k:\n k = b.new_key(path_bucket)\n k.set_contents_from_string(content)\n if file_type == \"profile\":\n try:\n old_pix = ProfilePicture.objects.get(is_current=True, user_id=user)\n old_pix.is_current = False\n old_pix.save()\n except:\n pass\n ProfilePicture.objects.create(name=filename, path=path_bucket, user_id=user, is_current=True)\n else:\n if file_type == \"channel\":\n k.set_contents_from_string(content)\n return",
"def _s3_create(context, metadata):\n\n # Parse the metadata into bucket and manifest path\n parsed_url = parse.urlparse(metadata['image_location'])\n if parsed_url.hostname is not None:\n # Handle s3://<BUCKET_NAME>/<KEY_PATH> case\n bucket_name = parsed_url.hostname\n manifest_path = parsed_url.path[1:]\n else:\n # Handle <BUCKET_NAME>/<KEY_PATH> case\n bucket_name = parsed_url.path.split('/')[0]\n manifest_path = '/'.join(parsed_url.path.split('/')[1:])\n\n # Continue with S3 import\n s3_client = _s3_conn(context)\n image_location = '/'.join([bucket_name, manifest_path])\n key = s3_client.get_object(Bucket=bucket_name, Key=manifest_path)\n body = key['Body']\n if isinstance(body, str):\n manifest = body\n else:\n # TODO(andrey-mp): check big objects\n manifest = body.read()\n\n (image_metadata, image_parts,\n encrypted_key, encrypted_iv) = _s3_parse_manifest(context, manifest)\n metadata.update(image_metadata)\n metadata.update({'image_state': 'pending',\n 'visibility': 'private'})\n\n # TODO(bcwaldon): right now, this removes user-defined ids\n # We need to re-enable this.\n metadata.pop('id', None)\n\n glance = clients.glance(context)\n image = glance.images.create(**metadata)\n\n def _update_image_state(image_state):\n glance.images.update(image.id, image_state=image_state)\n\n def delayed_create():\n \"\"\"This handles the fetching and decrypting of the part files.\"\"\"\n context.update_store()\n try:\n image_path = tempfile.mkdtemp(dir=CONF.image_decryption_dir)\n log_vars = {'image_location': image_location,\n 'image_path': image_path}\n\n _update_image_state('downloading')\n try:\n parts = []\n for part_name in image_parts:\n part = _s3_download_file(s3_client, bucket_name,\n part_name, image_path)\n parts.append(part)\n\n # NOTE(vish): this may be suboptimal, should we use cat?\n enc_filename = os.path.join(image_path, 'image.encrypted')\n with open(enc_filename, 'wb') as combined:\n for filename in parts:\n with open(filename, \"rb\") as part:\n combined.write(part.read())\n\n except Exception:\n LOG.exception('Failed to download %(image_location)s '\n 'to %(image_path)s', log_vars)\n _update_image_state('failed_download')\n return\n\n _update_image_state('decrypting')\n try:\n dec_filename = os.path.join(image_path, 'image.tar.gz')\n _s3_decrypt_image(context, enc_filename, encrypted_key,\n encrypted_iv, dec_filename)\n except Exception:\n LOG.exception('Failed to decrypt %(image_location)s '\n 'to %(image_path)s', log_vars)\n _update_image_state('failed_decrypt')\n return\n\n _update_image_state('untarring')\n try:\n unz_filename = _s3_untarzip_image(image_path, dec_filename)\n except Exception:\n LOG.exception('Failed to untar %(image_location)s '\n 'to %(image_path)s', log_vars)\n _update_image_state('failed_untar')\n return\n\n _update_image_state('uploading')\n try:\n with open(unz_filename, \"rb\") as image_file:\n glance.images.upload(image.id, image_file)\n except Exception:\n LOG.exception('Failed to upload %(image_location)s '\n 'to %(image_path)s', log_vars)\n _update_image_state('failed_upload')\n return\n\n _update_image_state('available')\n\n shutil.rmtree(image_path)\n except glance_exception.HTTPNotFound:\n LOG.info('Image %swas deleted underneath us', image.id)\n except Exception:\n LOG.exception('Failed to complete image %s creation', image.id)\n\n eventlet.spawn_n(delayed_create)\n\n return image",
"def create(*args, **kwargs):\n\n factory = V2ProfileFactory()\n output = factory.create(export_json=True)\n click.echo(output)",
"def get(profile):\n client = boto3client.get(\"iam\", profile)\n return client.list_instance_profiles()",
"def create(profile, name, application, cname=None, version=None,\n tier=\"web\", key_pair=None, instance_type=\"t1.micro\",\n instance_profile=None, service_role=None,\n healthcheck_url=None, security_groups=None,\n max_instances=1, min_instances=1, tags=None,\n vpc_id=None, subnets=None, db_subnets=None,\n elb_subnets=None, elb_scheme=None,\n public_ip=None, root_volume_size=None):\n client = boto3client.get(\"elasticbeanstalk\", profile)\n params = {}\n params[\"ApplicationName\"] = application\n params[\"EnvironmentName\"] = name\n if cname:\n params[\"CNAMEPrefix\"] = cname\n if version:\n params[\"VersionLabel\"] = version\n stack = utils.get_multicontainer_docker_solution_stack(profile)\n params[\"SolutionStackName\"] = stack \n if tier == \"web\":\n tier_definition = {\n \"Name\": \"WebServer\",\n \"Type\": \"Standard\",\n \"Version\": \"1.0\",\n }\n elif tier == \"worker\":\n tier_definition = {\n \"Name\": \"Worker\",\n \"Type\": \"SQS/HTTP\",\n \"Version\": \"1.0\",\n }\n else:\n raise Exception(\"tier must be 'web' or 'worker'\")\n params[\"Tier\"] = tier_definition\n if tags:\n params[\"Tags\"] = tags\n options = []\n if key_pair:\n key_pair_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"EC2KeyName\",\n \"Value\": key_pair,\n }\n options.append(key_pair_option)\n if instance_type:\n instance_type_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"InstanceType\",\n \"Value\": instance_type,\n }\n options.append(instance_type_option)\n if instance_profile:\n profile_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"IamInstanceProfile\",\n \"Value\": instance_profile,\n }\n options.append(profile_option)\n if service_role:\n role_option = {\n \"Namespace\": \"aws:elasticbeanstalk:environment\",\n \"OptionName\": \"ServiceRole\",\n \"Value\": service_role,\n }\n options.append(role_option)\n if healthcheck_url:\n healthcheck_url_option = {\n \"Namespace\": \"aws:elasticbeanstalk:application\",\n \"OptionName\": \"Application Healthcheck URL\",\n \"Value\": healthcheck_url,\n }\n options.append(healthcheck_url_option)\n if security_groups:\n security_groups_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"SecurityGroups\",\n \"Value\": \",\".join(security_groups),\n }\n options.append(security_groups_option)\n if min_instances:\n min_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MinSize\",\n \"Value\": str(min_instances),\n }\n options.append(min_instances_option)\n if max_instances:\n max_instances_option = {\n \"Namespace\": \"aws:autoscaling:asg\",\n \"OptionName\": \"MaxSize\",\n \"Value\": str(max_instances),\n }\n options.append(max_instances_option)\n if vpc_id:\n vpc_id_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"VPCId\",\n \"Value\": vpc_id,\n }\n options.append(vpc_id_option)\n if subnets:\n subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"Subnets\",\n \"Value\": \",\".join(subnets),\n }\n options.append(subnets_option)\n if db_subnets:\n db_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"DBSubnets\",\n \"Value\": \",\".join(db_subnets),\n }\n options.append(db_subnets_option)\n if elb_subnets:\n elb_subnets_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBSubnets\",\n \"Value\": \",\".join(elb_subnets),\n }\n options.append(elb_subnets_option)\n if elb_scheme:\n elb_scheme_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"ELBScheme\",\n \"Value\": elb_scheme,\n }\n options.append(elb_scheme_option)\n if public_ip:\n public_ip_option = {\n \"Namespace\": \"aws:ec2:vpc\",\n \"OptionName\": \"AssociatePublicIpAddress\",\n \"Value\": str(public_ip),\n }\n options.append(public_ip_option)\n if root_volume_size:\n root_volume_size_option = {\n \"Namespace\": \"aws:autoscaling:launchconfiguration\",\n \"OptionName\": \"RootVolumeSize\",\n \"Value\": str(root_volume_size),\n }\n options.append(root_volume_size_option)\n if options:\n params[\"OptionSettings\"] = options\n return client.create_environment(**params)",
"def create_ami_from_instance ( aws_account_type,\n ec2_conn,\n instance,\n ami_name,\n ami_description = None,\n wait_for_available = True ) :\n ami_id = instance.create_image( ami_name, ami_description )\n ami = aws_wait( ec2_conn.get_all_images, ami_id, [ ami_id ] )\n if not ami :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n if wait_for_available :\n ami_available = wait_on_object_state( ami, 'available' ,max_wait=3600)\n if not ami_available :\n print \"AMI is not available after a long time! \" + ami.name\n return None\n\n # Allow other AWS accounts the ability to see this AMI.\n if aws_account_type == 'esp-nonprod' :\n priv_account_id = esp_prod[ 'accountid' ]\n else :\n priv_account_id = esp_nonprod[ 'accountid' ]\n\n ami.set_launch_permissions( user_ids = [ priv_account_id ] )\n\n return ami",
"def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)",
"def save_current_ami ( s3_infra_conn, region_name, env_type, app_name, ami_name ) :\n ami_bucket = get_admin_bucket_name( region_name = region_name )\n store_s3_contents( s3_conn = s3_infra_conn,\n bucket_name = ami_bucket,\n key_name = get_ami_keypath( env_type ) + get_ami_keyname( app_name ),\n key_contents = ami_name )"
]
| [
"0.7128096",
"0.60106343",
"0.59777516",
"0.5973521",
"0.594296",
"0.5929832",
"0.57960683",
"0.5742524",
"0.57358336",
"0.57317406",
"0.5709242",
"0.5685809",
"0.5625889",
"0.5614665",
"0.5586729",
"0.55047476",
"0.54935837",
"0.54734963",
"0.54473054",
"0.54275876",
"0.54045767",
"0.53811026",
"0.5380098",
"0.5333718",
"0.5330781",
"0.5326973",
"0.5315243",
"0.5311756",
"0.5296836",
"0.5280491"
]
| 0.72730076 | 0 |
Parse the parsers in the given list one after another, then expect the end of the input. | def whole(parsers):
if len(parsers) == 0:
return finished >> (lambda x: [])
if len(parsers) == 1:
return parsers[0] + finished >> (lambda x: x[:-1])
return reduce(add, parsers) + skip(finished) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def at_end():\n def run(chunk, last):\n if chunk:\n return ParserResult.from_done(False, chunk, last)\n elif last:\n return ParserResult.from_done(True, chunk, last)\n else:\n return ParserResult.from_partial(Parser(run))\n return Parser(run)",
"def test_multiple_parsers():\n rules = []\n first_parser = BlockParser(rules)\n assert len(first_parser.rules) == 0\n\n rules.append((lambda x: True, 1.0))\n second_parser = BlockParser(rules)\n assert len(second_parser.rules) == 1\n\n assert len(first_parser.rules) == 0, \"Non-local mutation of a parser's rules\"",
"def parser(sent_list): #input: list of sentences",
"def parse_order(line, *line_parsers):\r\n for parser in line_parsers:\r\n try:\r\n return parser.parse(line)\r\n except ValueError:\r\n continue",
"def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)",
"def _advance(self, idlist=None):\n if self.token.id == \"END\":\n return\n if idlist and self.token.id in idlist:\n self.token = next(self.token_gen)\n elif not idlist:\n self.token = next(self.token_gen)\n else:\n raise ParseError(\n \"\"\"Expected one of %s found %r instead. (line: %i)\"\"\"\n % (\" \".join(idlist), self.token.id, self.line)\n )",
"def _build_main_parser(self, redirector, terminators, multilineCommands, legalChars,\n commentInProgress, case_insensitive, blankLinesAllowed, prefixParser):\n\n # Build several parsing components that are eventually compiled into overall parser\n output_destination_parser = (pyparsing.Literal(redirector * 2) |\n (pyparsing.WordStart() + redirector) |\n pyparsing.Regex('[^=]' + redirector))('output')\n\n terminator_parser = pyparsing.Or(\n [(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in terminators])('terminator')\n string_end = pyparsing.stringEnd ^ '\\nEOF'\n multilineCommand = pyparsing.Or(\n [pyparsing.Keyword(c, caseless=case_insensitive) for c in multilineCommands])('multilineCommand')\n oneline_command = (~multilineCommand + pyparsing.Word(legalChars))('command')\n pipe = pyparsing.Keyword('|', identChars='|')\n do_not_parse = self.commentGrammars | commentInProgress | pyparsing.quotedString\n after_elements = \\\n pyparsing.Optional(pipe + pyparsing.SkipTo(output_destination_parser ^ string_end,\n ignore=do_not_parse)('pipeTo')) + \\\n pyparsing.Optional(output_destination_parser +\n pyparsing.SkipTo(string_end,\n ignore=do_not_parse).setParseAction(lambda x: x[0].strip())('outputTo'))\n if case_insensitive:\n multilineCommand.setParseAction(lambda x: x[0].lower())\n oneline_command.setParseAction(lambda x: x[0].lower())\n else:\n multilineCommand.setParseAction(lambda x: x[0])\n oneline_command.setParseAction(lambda x: x[0])\n\n if blankLinesAllowed:\n blankLineTerminationParser = pyparsing.NoMatch\n else:\n blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')\n blankLineTerminator.setResultsName('terminator')\n blankLineTerminationParser = ((multilineCommand ^ oneline_command) +\n pyparsing.SkipTo(blankLineTerminator, ignore=do_not_parse).setParseAction(\n lambda x: x[0].strip())('args') + blankLineTerminator)('statement')\n\n multilineParser = (((multilineCommand ^ oneline_command) +\n pyparsing.SkipTo(terminator_parser,\n ignore=do_not_parse).setParseAction(lambda x: x[0].strip())('args') +\n terminator_parser)('statement') +\n pyparsing.SkipTo(output_destination_parser ^ pipe ^ string_end,\n ignore=do_not_parse).setParseAction(lambda x: x[0].strip())('suffix') +\n after_elements)\n multilineParser.ignore(commentInProgress)\n\n singleLineParser = ((oneline_command +\n pyparsing.SkipTo(terminator_parser ^ string_end ^ pipe ^ output_destination_parser,\n ignore=do_not_parse).setParseAction(\n lambda x: x[0].strip())('args'))('statement') +\n pyparsing.Optional(terminator_parser) + after_elements)\n\n blankLineTerminationParser = blankLineTerminationParser.setResultsName('statement')\n\n parser = prefixParser + (\n string_end |\n multilineParser |\n singleLineParser |\n blankLineTerminationParser |\n multilineCommand + pyparsing.SkipTo(string_end, ignore=do_not_parse)\n )\n parser.ignore(self.commentGrammars)\n return parser",
"def test_multiple_series(self):\n assert parse_command('test{{A,B}}{{1,2}}') == [\n ('testA1', {}), ('testA2', {}), ('testB1', {}), ('testB2', {})]",
"def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev",
"def input(self, *input):\n for i in input:\n self._parser.feed(i)",
"def test_parse_valid(self):\n mock_scraper = MockCtdScraper()\n scrape_gen = mock_scraper.scrape(TEST_CHUNKSIZE)\n self.parser.parse(next(scrape_gen))",
"def _try_parse(self, *parse_funcs: ParseFunc) -> Optional[node.NodeType]:\n for parse_func in parse_funcs:\n try:\n with self.tokens:\n return parse_func()\n except ParserException:\n pass\n return None",
"def _process_parse(parse, coreflist):\n sentence = parse.get('sentences')\n if sentence:\n ptree = Tree.parse(tag_ptree(sentence[0]['parsetree'], coreflist))\n words = [(w[0], w[1]) for w in sentence[0]['words']]\n depends = [(d[0], d[1], d[2]) for d in sentence[0]['dependencies']]\n text = sentence[0]['text']\n\n return ptree, words, depends, text\n else:\n return None",
"def finish_parse(self) -> None:\n pass",
"def on_parse(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None",
"async def process(self, tokens):\n return await self.parser.process(tokens)",
"def parse(self, input):\n pass",
"def end_of_input():\n return at_end.bind(lambda end:\n Parser(lambda chunk, last: ParserResult.from_error(\"Not end of input\"))\n if not end else Parser.unit(None))",
"def parse(self, inp):\n\n tokens = self.tokenizer.tokenize(inp)\n tokens_left = len(tokens)\n\n # print(tokens)\n\n while tokens_left:\n\n for rule in self.grammar:\n tokens = tokens[rule.match(tokens):]\n\n if len(tokens) < tokens_left:\n tokens_left = len(tokens)\n else:\n # nothing is matching any more - stop\n break\n\n return len(tokens) == 0, tokens",
"def list_parse(self, parser_name, table):\n initial_datetime = datetime.strptime(\n self.initial_date, '%Y-%m-%d').date()\n until_datetime = datetime.strptime(self.until_date, '%Y-%m-%d').date()\n\n while initial_datetime != (until_datetime + timedelta(days=1)):\n current_date = initial_datetime.strftime('%Y-%m-%d')\n\n log.debug('Current date: {}'.format(current_date))\n print(current_date)\n\n glob_string = '{0}/data/raw/{1}/form-html/*.html'.format(\n PROJECT_DIR, current_date)\n\n for filepath in sorted(glob.glob(glob_string)):\n list_output = getattr(parse, parser_name)(filepath).form_list()\n\n # Because output might have multiple rows:\n for output in list_output:\n self.commit_to_database(table, output)\n\n initial_datetime += timedelta(days=1)",
"def test_multiple_identical_series(self):\n assert parse_command('test{{A,B}}{{A,B}}') == [\n ('testAA', {}), ('testBB', {})]",
"def parse(self, parser):\n with self.reading:\n chunks = [self.read_buffer.dequeue() or (yield self.base.read(self.bufsize))]\n try:\n while True:\n tupe, result = parser.__parser__()(chunks[-1], False)\n if tupe & ParserResult.DONE:\n value, chunk, _ = result\n del chunks[:]\n self.read_buffer.enqueue(chunk)\n do_return(value)\n elif tupe & ParserResult.PARTIAL:\n parser = result\n chunks.append((yield self.base.read(self.bufsize)))\n else:\n raise ParserError(result)\n except BrokenPipeError:\n # try to terminate parser with last chunk\n tupe, result = parser.__parser__()(b'', True)\n if tupe & ParserResult.DONE:\n value, chunk, _ = result\n del chunks[:]\n self.read_buffer.enqueue(chunk)\n do_return(value)\n raise\n finally:\n for chunk in chunks:\n self.read_buffer.enqueue(chunk)",
"def ParseMultiple(\r\n cls,\r\n statements: List[\"Statement.ItemType\"],\r\n normalized_iter: NormalizedIterator,\r\n observer: \"Statement.Observer\",\r\n ignore_whitespace=False,\r\n\r\n # True to ensure that results are sorted to find the best possible match\r\n # (regardless of statement order). False will return the first statement\r\n # matched.\r\n sort_results=True,\r\n\r\n # True to execute all statements within a single thread\r\n single_threaded=False,\r\n ) -> Optional[\"Statement.ParseResult\"]:\r\n\r\n original_statements = statements\r\n if isinstance(original_statements, Statement.NamedItem):\r\n statements = original_statements.Item\r\n\r\n use_futures = not single_threaded and len(statements) != 1\r\n\r\n # ----------------------------------------------------------------------\r\n def Impl(statement):\r\n parser = cls._Parser(\r\n statement,\r\n normalized_iter.Clone(),\r\n observer,\r\n ignore_whitespace=ignore_whitespace,\r\n single_threaded=single_threaded,\r\n )\r\n\r\n success = parser.ParseItem(statement)\r\n if success is None:\r\n return None\r\n\r\n return Statement.ParseResult(success, parser.results, parser.normalized_iter)\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n if use_futures:\r\n futures = observer.Enqueue(\r\n [\r\n lambda statement=statement: Impl(statement)\r\n for statement in statements\r\n ],\r\n )\r\n\r\n results = []\r\n\r\n for future in futures:\r\n result = future.result()\r\n if result is None:\r\n return None\r\n\r\n results.append(result)\r\n\r\n else:\r\n results = []\r\n\r\n for statement in statements:\r\n result = Impl(statement)\r\n if result is None:\r\n return None\r\n\r\n results.append(result)\r\n\r\n if sort_results:\r\n # Stable sort according to the criteria:\r\n # - Success\r\n # - Longest matched content\r\n\r\n sort_data = [\r\n (\r\n index,\r\n 1 if result.Success else 0,\r\n result.Iter.Offset,\r\n )\r\n for index, result in enumerate(results)\r\n ]\r\n\r\n sort_data.sort(\r\n key=lambda value: value[1:],\r\n reverse=True,\r\n )\r\n\r\n result = results[sort_data[0][0]]\r\n\r\n else:\r\n result = None\r\n\r\n for potential_result in results:\r\n if potential_result.Success:\r\n result = potential_result\r\n\r\n break\r\n\r\n if result is None:\r\n result = results[0]\r\n\r\n if result.Success:\r\n return Statement.ParseResult(\r\n True,\r\n [\r\n Statement.StatementParseResultItem(\r\n original_statements,\r\n result.Results,\r\n ),\r\n ],\r\n result.Iter,\r\n )\r\n\r\n return_results: Statement.ParseResultItemsType = []\r\n max_iter: Optional[NormalizedIterator] = None\r\n\r\n for result in results:\r\n return_results += result.Results\r\n\r\n if max_iter is None or result.Iter.Offset > max_iter.Offset:\r\n max_iter = result.Iter\r\n\r\n return Statement.ParseResult(\r\n False,\r\n [\r\n Statement.StatementParseResultItem(\r\n original_statements,\r\n return_results,\r\n ),\r\n ],\r\n cast(NormalizedIterator, max_iter),\r\n )",
"def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result",
"def tests() -> None:\n assert input_parser(\"123\") == '123'\n assert input_parser(\"(add 12 12)\") == '24'\n assert input_parser(\"(add 0 (add 3 4))\") == '7'\n assert input_parser(\"(add 3 (add (add 3 3) 3))\") == '12'\n assert input_parser(\"(multiply 3 (multiply (multiply 3 3) 3))\") == '81'\n assert input_parser(\"(multiply 2 (multiply 3 4))\") == '24'\n assert input_parser(\"(multiply 0 (multiply 3 4))\") == '0'\n\n assert input_parser(\"(add 4 1)\") == '5'\n assert input_parser(\"(multiply 4 1)\") == '4'\n \n assert input_parser(\"(add 4 (add 1 8))\") == '13'\n assert input_parser(\"(add (add 1 8) 4)\") == '13'\n assert input_parser(\"(multiply (multiply 1 2) 12)\") == '24'\n assert input_parser(\"(multiply 4 (multiply 8 12))\") == '384'\n\n assert input_parser(\"(add (multiply 4 5) (multiply 10 10))\") == '120'\n assert input_parser(\"(add (multiply (add 4 (add 3 (add 3 (add 3 (add 1 (multiply 4 5)))))) 5) (multiply 10 10))\") == '270'\n \n assert input_parser(\"(add (multiply 4 5) (multiply 10 10) (add 1 2 3 4 5 6 7 (add 4 4) 9) (multiply 4 5))\") == '185'\n\n assert input_parser('(subtract 2 1)') == '1'\n assert input_parser(\"(divide 55 5)\") == '11'",
"def multi_parse(templates, text):\n for template in templates:\n parsed = parse(template, text)\n if parsed:\n return parsed\n\n raise ValueError(\n f\"'{text}' does not match any template: {templates}\")",
"def _parse_sons(self, d_parsers, verbose=False):\n\n treated = set(d_parsers.keys())\n imports = set(self.imports.keys())\n imports = imports.difference(treated)\n if not imports:\n return d_parsers\n\n for source in imports:\n if verbose:\n print ('>>> treating :: {}'.format(source))\n\n # get the absolute path corresponding to source\n\n filename = get_filename_from_import(source,self._output_folder)\n\n q = Parser(filename)\n q.parse(d_parsers=d_parsers)\n d_parsers[source] = q\n\n # link self to its sons\n\n imports = list(self.imports.keys())\n for source in imports:\n d_parsers[source].append_parent(self)\n self.append_son(d_parsers[source])\n\n return d_parsers",
"def _init_parser(self):\n # outputParser = (pyparsing.Literal('>>') | (pyparsing.WordStart() + '>') | pyparsing.Regex('[^=]>'))('output')\n outputParser = (pyparsing.Literal(self.redirector * 2) |\n (pyparsing.WordStart() + self.redirector) |\n pyparsing.Regex('[^=]' + self.redirector))('output')\n\n terminatorParser = pyparsing.Or(\n [(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in self.terminators])('terminator')\n stringEnd = pyparsing.stringEnd ^ '\\nEOF'\n self.multilineCommand = pyparsing.Or(\n [pyparsing.Keyword(c, caseless=self.case_insensitive) for c in self.multilineCommands])('multilineCommand')\n oneLineCommand = (~self.multilineCommand + pyparsing.Word(self.legalChars))('command')\n pipe = pyparsing.Keyword('|', identChars='|')\n self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')\n doNotParse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString\n afterElements = \\\n pyparsing.Optional(pipe + pyparsing.SkipTo(outputParser ^ stringEnd, ignore=doNotParse)('pipeTo')) + \\\n pyparsing.Optional(\n outputParser + pyparsing.SkipTo(stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())(\n 'outputTo'))\n if self.case_insensitive:\n self.multilineCommand.setParseAction(lambda x: x[0].lower())\n oneLineCommand.setParseAction(lambda x: x[0].lower())\n if self.blankLinesAllowed:\n self.blankLineTerminationParser = pyparsing.NoMatch\n else:\n self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')\n self.blankLineTerminator.setResultsName('terminator')\n self.blankLineTerminationParser = ((self.multilineCommand ^ oneLineCommand) +\n pyparsing.SkipTo(self.blankLineTerminator, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')\n self.multilineParser = (((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(terminatorParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + terminatorParser)('statement') +\n pyparsing.SkipTo(outputParser ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('suffix') + afterElements)\n self.multilineParser.ignore(self.commentInProgress)\n self.singleLineParser = ((oneLineCommand + pyparsing.SkipTo(terminatorParser ^ stringEnd ^ pipe ^ outputParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args'))('statement') +\n pyparsing.Optional(terminatorParser) + afterElements)\n # self.multilineParser = self.multilineParser.setResultsName('multilineParser')\n # self.singleLineParser = self.singleLineParser.setResultsName('singleLineParser')\n self.blankLineTerminationParser = self.blankLineTerminationParser.setResultsName('statement')\n self.parser = self.prefixParser + (\n stringEnd |\n self.multilineParser |\n self.singleLineParser |\n self.blankLineTerminationParser |\n self.multilineCommand + pyparsing.SkipTo(stringEnd, ignore=doNotParse)\n )\n self.parser.ignore(self.commentGrammars)\n\n inputMark = pyparsing.Literal('<')\n inputMark.setParseAction(lambda x: '')\n fileName = pyparsing.Word(self.legalChars + '/\\\\')\n inputFrom = fileName('inputFrom')\n inputFrom.setParseAction(replace_with_file_contents)\n # a not-entirely-satisfactory way of distinguishing < as in \"import from\" from <\n # as in \"lesser than\"\n self.inputParser = inputMark + pyparsing.Optional(inputFrom) + pyparsing.Optional('>') + \\\n pyparsing.Optional(fileName) + (pyparsing.stringEnd | '|')\n self.inputParser.ignore(self.commentInProgress)",
"def test__parser__grammar_sequence_nested(seg_list, caplog):\n bs = StringParser(\"bar\", KeywordSegment)\n fs = StringParser(\"foo\", KeywordSegment)\n bas = StringParser(\"baar\", KeywordSegment)\n g = Sequence(Sequence(bs, fs), bas)\n ctx = ParseContext(dialect=None)\n with caplog.at_level(logging.DEBUG, logger=\"sqlfluff.parser\"):\n # Matching the start of the list shouldn't work\n logging.info(\"#### TEST 1\")\n assert not g.match(seg_list[:2], parse_context=ctx)\n # Matching the whole list should, and the result should be flat\n logging.info(\"#### TEST 2\")\n assert g.match(seg_list, parse_context=ctx).matched_segments == (\n KeywordSegment(\"bar\", seg_list[0].pos_marker),\n seg_list[1], # This will be the whitespace segment\n KeywordSegment(\"foo\", seg_list[2].pos_marker),\n KeywordSegment(\"baar\", seg_list[3].pos_marker)\n # NB: No whitespace at the end, this shouldn't be consumed.\n )",
"def twoOrMore(parserElement):\n return parserElement + pp.OneOrMore(parserElement)"
]
| [
"0.5791424",
"0.5646095",
"0.5645046",
"0.5396733",
"0.5381657",
"0.53701615",
"0.5365384",
"0.53398365",
"0.52898455",
"0.5230443",
"0.5213715",
"0.5188441",
"0.51719886",
"0.5169891",
"0.51469034",
"0.514417",
"0.5139918",
"0.51322347",
"0.5127973",
"0.5121833",
"0.5117573",
"0.51011133",
"0.50987375",
"0.5083001",
"0.50575376",
"0.5051312",
"0.5039566",
"0.5037734",
"0.5021676",
"0.5013397"
]
| 0.70667577 | 0 |
Parse any object other than a HyExpression beginning with a HySymbol equal to one of the disallowed_heads. | def notpexpr(*disallowed_heads):
return some(lambda x: not (
isinstance(x, HyExpression) and
x and
isinstance(x[0], HySymbol) and
x[0] in disallowed_heads)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_othersymbol(line):\n return None",
"def test_missing_single_token(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n B=1,\n D=1)",
"def test_single_extra_token(self):\n self.helper_test_evaluate_raises(\n 'A and not B',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=0)",
"def test_missing_multiple_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or (B and (C and not D))',\n expected_exc_type=MissingSymbolError,\n A=0,\n D=1)",
"def parse(expr, whitelist):\n # remove all whitespace\n expr = re.sub(r'\\s+', '', expr)\n\n seq = []\n parsed = []\n for ch in expr:\n if ch in valid_chars:\n seq.append(ch)\n elif ch in operators or ch.isdigit():\n if seq:\n sym = process_sequence(seq, whitelist)\n parsed.append(sym)\n seq = []\n\n # power operator\n if ch == '^':\n ch = '**'\n\n parsed.append(ch)\n else:\n raise ValueError('Illegal character: \"{}\"'.format(ch))\n\n if seq:\n parsed.append(process_sequence(seq, whitelist))\n return ''.join(parsed)",
"def test_all_extra_tokens(self):\n self.helper_test_evaluate_raises(\n '1 or 0',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=1)",
"def _want_sym(sym):\n if sym is None or len(sym) < 2:\n return False\n if sym['name'] in extract_ignore_names:\n return False\n bad_types = ['t', 'b', 'r', 'd', 'w']\n return (sym['type'] not in bad_types\n and sym['name'] not in ['__bss_start', '_end', '_edata'])",
"def _checkMarketSyntax(self, market):\n if self.app.getExchange() == 'coinbasepro' and market != '':\n p = re.compile(r\"^[1-9A-Z]{2,5}\\-[1-9A-Z]{2,5}$\")\n if not p.match(market):\n raise TypeError('Coinbase Pro market is invalid.')\n elif self.app.getExchange() == 'binance':\n p = re.compile(r\"^[A-Z]{6,12}$\")\n if not p.match(market):\n raise TypeError('Binance market is invalid.')",
"def test_several_extra_tokens(self):\n self.helper_test_evaluate_raises(\n 'A or B or C',\n expected_exc_type=ExtraSymbolError,\n A=0,\n B=0,\n C=0,\n D=0,\n E=0)",
"def test_missing_all_tokens(self):\n self.helper_test_evaluate_raises(\n '(A nand B) and not D',\n expected_exc_type=MissingSymbolError)",
"def no_or_clauses (self,phrase):\r\n \r\n for x in phrase:\r\n if isinstance(x,list) and x[0] == '@':\r\n return False\r\n return True",
"def handle_pure_literals(cnf):\n pure = dict()\n for clause in cnf:\n for lit in clause:\n match = pure.get(lit.name, None)\n if type(match) == type(lit) and not match.equals(lit):\n pure[lit.name] = \"invalid\"\n if not match:\n pure[lit.name] = lit\n\n for lit in pure.values():\n if lit == \"invalid\":\n continue\n cnf = simplify(cnf, lit) \n return cnf",
"def allow_token(self, token):\n if 'data' in token:\n # Loop through all the attributes and drop the ones that are not\n # allowed, are unsafe or break other rules. Additionally, fix\n # attribute values that need fixing.\n #\n # At the end of this loop, we have the final set of attributes\n # we're keeping.\n attrs = {}\n for namespaced_name, val in token['data'].items():\n namespace, name = namespaced_name\n\n # Drop attributes that are not explicitly allowed\n #\n # NOTE(willkg): We pass in the attribute name--not a namespaced\n # name.\n if not self.attr_filter(token['name'], name, val):\n continue\n\n # Look at attributes that have uri values\n if namespaced_name in self.attr_val_is_uri:\n val_unescaped = re.sub(\n \"[`\\000-\\040\\177-\\240\\s]+\",\n '',\n unescape(val)).lower()\n\n # Remove replacement characters from unescaped characters.\n val_unescaped = val_unescaped.replace(\"\\ufffd\", \"\")\n\n # Drop attributes with uri values that have protocols that\n # aren't allowed\n if (re.match(r'^[a-z0-9][-+.a-z0-9]*:', val_unescaped) and\n (val_unescaped.split(':')[0] not in self.allowed_protocols)):\n continue\n\n # Drop values in svg attrs with non-local IRIs\n if namespaced_name in self.svg_attr_val_allows_ref:\n new_val = re.sub(r'url\\s*\\(\\s*[^#\\s][^)]+?\\)',\n ' ',\n unescape(val))\n new_val = new_val.strip()\n if not new_val:\n continue\n\n else:\n # Replace the val with the unescaped version because\n # it's a iri\n val = new_val\n\n # Drop href and xlink:href attr for svg elements with non-local IRIs\n if (None, token['name']) in self.svg_allow_local_href:\n if namespaced_name in [(None, 'href'), (namespaces['xlink'], 'href')]:\n if re.search(r'^\\s*[^#\\s]', val):\n continue\n\n # If it's a style attribute, sanitize it\n if namespaced_name == (None, u'style'):\n val = self.sanitize_css(val)\n\n # At this point, we want to keep the attribute, so add it in\n attrs[namespaced_name] = val\n\n token['data'] = alphabetize_attributes(attrs)\n\n return token",
"def tokenize(raw):\n if valid_expression(raw) != True:\n return valid_expression(raw)\n\n SYMBOLS = set('+-*/() ')\n\n mark = 0\n tokens = []\n n = len(raw)\n for j in range(n):\n if raw[j] in SYMBOLS:\n if mark != j:\n tokens.append(raw[mark:j]) # complete preceding token\n if raw[j] != ' ':\n tokens.append(raw[j]) # include this token\n mark = j + 1 # update mark to being at next index\n if mark != n:\n tokens.append(raw[mark:n]) # complete preceding token\n return tokens",
"def all_simple (phrase):\r\n\r\n\r\n for x in phrase:\r\n if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):\r\n return False\r\n return True",
"def _nonkey_extended():\n p = ((Pattern._nonkey() + \n c.paren((next_word('or') + Pattern._nonkey()).treat(lib.snd).plus()).possibly()) +\n c.paren(Pattern._nonkey().plus()).many())\n def f(item):\n item1 = p.process(item)\n ((a,bs),cs) = item1.acc\n vals = [a.value]+ [i.value for i in bs]\n c.synonym_add(vals)\n return c.update((a,cs),item1)\n return Parse(f)",
"def test_incompatible_rules():\n\n grammar = \"\"\"\n A: B | C;\n B: 'enumeration';\n C: value=INT;\n \"\"\"\n with pytest.raises(TextXSyntaxError):\n metamodel_from_str(grammar)",
"def unary_rule(spec):\n \n name = spec[\"name\"]\n form = spec[\"form\"]\n sem = spec[\"sem\"]\n hooks = spec[\"hooks\"]\n analyses = spec[\"analyses\"]\n \n ret = {\"name\": name,\n \"mother\": {\n \"syn\": {\"cat\": \"Verb\",\n \"form\": form},\n \"sem\": sem,\n \"hooks\": hooks},\n \"dtrs\": [\n {\"dtr\": \"Head\",\n \"analyses\": analyses}],\n \"head\": {\n \"dtr\": \"Head\"}}\n\n return ret",
"def no_operators(expression):\n OPERATORS = set('+-*/')\n for i in expression:\n if i in OPERATORS:\n return True\n raise NotValidExpression('Not a valid expression, no operators')",
"def test_disable_pyparsing_arity_trimming_works():\n for func in [lambda a: None, lambda a, b: None, lambda a, b, c, d: None]:\n element = Literal('test').setParseAction(func)\n with raises(TypeError):\n element.parseString('test')",
"def is_operator(obj):\n return isinstance(obj, Token) and obj[0] not in '/01234567890+-.<[('",
"def parse_l1_logical_express(express):\n if express.find(\" AND \")!=-1 or express.find(\" NOT\")!=-1 or express.find(\"NOT \")!=-1 or express == \"L1GlobalDecision\":\n return []\n\n return express.split(\" OR \")",
"def Load(json_text, start_symbol, ignore='_reserved'):\n g = Grammar(json_text, start_symbol, ignore=ignore)\n g.canonicalize()\n g.compute_first()\n g.compute_follow()\n return g",
"def test_dotexpr_lhs():\n a = Var(ArrayLiteral([1]).find(lambda v: v == 1))\n b = Var(Let(lambda b=[1, 2]: b).find(lambda v: v == 1))\n c = Var(String(\"hello\").concat(String(\" world\")))\n ignore(b)\n ignore(c)\n return a",
"def _processSpec(self, spec):\n if isinstance(spec, list):\n for k in spec:\n if isinstance(k, Specifier):\n self._spec.append(k)\n else:\n raise NotAValidSpecifierError(str(type(k)))\n elif isinstance(spec, Specifier):\n self._spec.append(spec)\n else:\n # This point we need to go to the symboltable\n # and look for structs and unions.\n raise NotAValidSpecifierError(str(type(spec)))",
"def test_missing_space_before_symbol():\n token = Token(\"5\", TokenInfo(\"<stdin>\", 0, 1, \"5+\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"5\"\n assert token.info.lineno == 0\n assert token.symbol == Literal.VALUE\n assert token.info.line == \"5+\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \"+\"\n\n assert token.lexeme == \"5\"\n assert err.value.msg is LythError.MISSING_SPACE_BEFORE_OPERATOR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \"5+\"",
"def test_sqpp_paren_expr1_not_expr2_or_quoted_string_not_expr3_or_expr4WORDS(self):\n self.assertEqual(self.parser.parse_query('(expr1) not expr2 | \"expressions not in and quotes | (are) not - parsed \" - (expr3) or expr4'),\n ['+', 'expr1', '-', 'expr2', '|', '\"expressions not in and quotes | (are) not - parsed \"', '-', 'expr3', '|', 'expr4'])\n #['+', '+ \"expressions not in and quotes | (are) not - parsed \" | expr1 | expr4',\n # '+', '- expr3 | expr1 | expr4',\n # '+', '+ \"expressions not in and quotes | (are) not - parsed \" - expr2 | expr4',\n # '+', '- expr3 - expr2 | expr4'])",
"def extract_hydrogens_from_instructions(instruction):\n if \"-\" in instruction[1] or \"-\" in instruction[2]:\n try:\n heavy_core = instruction[1].split(\"-\")[0]\n hydrogen_core = instruction[1].split(\"-\")[1]\n heavy_fragment = instruction[2].split(\"-\")[0]\n hydrogen_fragment = instruction[2].split(\"-\")[1]\n return heavy_core, hydrogen_core, heavy_fragment, hydrogen_fragment\n except IndexError:\n raise IndexError(f\"Wrong growing direction in {instruction}. Both, fragment and core atoms must include one or two atoms.\" \n \"Ex: frag.pdb C1 C2 |or| frag.pdb C1-H1 C2-H2\")\n else:\n return False",
"def should_be_ignored(line: str) -> bool:\n\n starting_chars = [\"!\", \"@@\", \"/\", \"[\", \".\", \"-\", \"_\", \"?\", \"&\"]\n\n return any(line.startswith(x) for x in starting_chars)",
"def parse(name: unicode, ignoreLeaderParens: bool) -> List[unicode]:\n ..."
]
| [
"0.5053372",
"0.5052113",
"0.50418425",
"0.49850217",
"0.49680007",
"0.48462263",
"0.4817602",
"0.48168248",
"0.48086628",
"0.46729738",
"0.4664141",
"0.46620685",
"0.46346858",
"0.46272382",
"0.46187416",
"0.46145827",
"0.46143615",
"0.460878",
"0.46011806",
"0.45874092",
"0.4580879",
"0.4572397",
"0.45097733",
"0.45005518",
"0.44971442",
"0.44945174",
"0.4482925",
"0.4479059",
"0.44667977",
"0.44661874"
]
| 0.6480621 | 0 |
Parse `parser` several times (`lo` to `hi`) in a row. `hi` can be float('inf'). The result is a list no matter the number of instances. | def times(lo, hi, parser):
@Parser
def f(tokens, s):
result = []
for _ in range(lo):
(v, s) = parser.run(tokens, s)
result.append(v)
end = s.max
try:
for _ in (repeat(1) if isinf(hi) else range(hi - lo)):
(v, s) = parser.run(tokens, s)
result.append(v)
except NoParseError as e:
end = e.state.max
return result, State(s.pos, end)
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_multi(choice, end=None):\n end = end or str(g.model.size)\n pattern = r'(?<![-\\d])(\\d+-\\d+|-\\d+|\\d+-|\\d+)(?![-\\d])'\n items = re.findall(pattern, choice)\n alltracks = []\n\n for x in items:\n\n if x.startswith(\"-\"):\n x = \"1\" + x\n\n elif x.endswith(\"-\"):\n x = x + str(end)\n\n if \"-\" in x:\n nrange = x.split(\"-\")\n startend = map(int, nrange)\n alltracks += _bi_range(*startend)\n\n else:\n alltracks.append(int(x))\n\n return alltracks",
"def _format_epilog(parser):\n for line in statemachine.string2lines(\n parser.epilog, tab_width=4, convert_whitespace=True):\n yield line",
"def parse(self) -> List[List[Union[str,int]]]:\n return self.__create_list(cp(self.tokens))",
"def parse(self):\n if len(self._content) == 0:\n return []\n\n groups = self._content.split(\",\")\n arr = set()\n\n def func(acc, cpu):\n if ListFormatParser._is_range(cpu):\n acc.update(ListFormatParser._range_to_list(cpu))\n else:\n acc.add(int(cpu))\n return acc\n\n return list(functools.reduce(func, groups, arr))",
"def _parse_tour(self):\n\n tour = []\n\n while True:\n try:\n s = int(self._lines.current)\n if s == -1:\n return tour\n tour.append(s)\n except ValueError:\n break\n\n try:\n next(self._lines)\n except StopIteration:\n break\n\n return tour",
"def args(self, parser):\n\n args = []\n\n if not parser.peek_and_check(self.stop):\n while True:\n args.append(parser.expression())\n if not parser.peek_and_check(self.sep):\n break\n parser.advance(self.sep)\n\n return args",
"def parser(in_file,verbose):\n\n # perform high-level parsing into sections\n res_file_lines = [row for row in in_file]\n tokenized_lines = tools.split_and_prune_lines(res_file_lines)\n sections = tools.extracted_sections(tokenized_lines)\n\n # split out common sections and subsequent groups of results sections\n def is_results_sentinel_section(section):\n \"\"\" Identify mesh point separator \"pseudo-section\" header.\n\n (Helper function for res_parser_spncci.)\n \"\"\"\n (section_name,_) = section\n return (section_name == \"RESULTS\")\n\n grouped_sections = tools.split_when(is_results_sentinel_section,sections)\n common_sections = list(next(grouped_sections))\n grouped_results_sections = [list(section_group) for section_group in grouped_sections]\n\n if (verbose):\n print(\"Section counts\")\n print(\" Common sections:\",len(common_sections))\n for results_section_group in grouped_results_sections:\n print(\" Results sections (by group):\",len(results_section_group))\n\n # generate results objects by mesh point\n mesh_data = []\n if (grouped_results_sections):\n # there are results sections: actual mesh, not counting run\n for results_section_group in grouped_results_sections:\n full_section_group = common_sections + results_section_group\n results = spncci_results_data.SpNCCIResultsData()\n parse_mesh_point(results,full_section_group,section_handlers)\n mesh_data.append(results)\n else:\n # no results sections: counting run\n results = spncci_results_data.SpNCCIResultsData()\n parse_mesh_point(results,common_sections,section_handlers)\n mesh_data.append(results)\n\n return mesh_data",
"def parse_seq(seq, mini=2, maxi=2):\n if seq is None:\n return seq\n output = seq.split(',')\n assert mini <= len(output) and len(output) <= maxi\n return [int(i) for i in output]",
"def whole(parsers):\n if len(parsers) == 0:\n return finished >> (lambda x: [])\n if len(parsers) == 1:\n return parsers[0] + finished >> (lambda x: x[:-1])\n return reduce(add, parsers) + skip(finished)",
"def parse(cls, selector, pad_len=0):\n\n if re.search('^\\s*$', selector):\n result = [[]]\n else:\n result = cls.parser.parse(selector, lexer=cls.lexer)\n return cls.pad_parsed(result, pad_len)",
"def parseNumbers(equation):\r\n queue = createQueue()\r\n stack = None\r\n parts = equation.split(' ')\r\n for part in parts:\r\n enqueue(queue, part)\r\n stack = push(stack, part)\r\n return stack, queue",
"def parse(self):\n result = list()\n for i, line in enumerate([x.strip() for x in self._input_file], 1):\n if not line:\n continue\n # There should be only 2 entries. Example:\n # kernel`0xffffffff8074d27e;kernel`_sx_xlock 1\n try:\n frames, value = line.split()\n frames = [trim_offset(n) for n in frames.split(';')]\n except ValueError:\n raise StackCollapserException('Unable to parse line {}'.format(i))\n result.append((frames, int(value)))\n return result",
"def parse_row(input_row, parsers):\n\n return [parser(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]",
"def parse_jobs(self, response: scrapy.http.Response):\n hits = response.xpath('//div[@class=\"jobHit\"]')\n for hit in hits:\n job = self.default_job()\n job['queries'] = response.meta['queries']\n for i in MTADialogSpider.parse_job(hit, job):\n yield i",
"def parse(x):\n if isinstance(x, container_abcs.Iterable):\n return x\n return tuple(repeat(x, n))",
"def parse_grid(self, data):\n return [list(row) for row in data.strip().split(\"\\n\")]",
"def read_problem_instances(ifile):\n N = int(ifile.readline().strip())\n\n for i in range(N):\n w_nrows, w_ncols = [int(x.strip()) for x in ifile.readline().strip().split(' ')]\n nrows, ncols = [int(x.strip()) for x in ifile.readline().strip().split(' ')]\n m = []\n for r in range(nrows):\n line = ifile.readline()\n line = line.strip()\n m.append([int(x.strip()) for x in line.split(' ')])\n yield m, w_nrows, w_ncols",
"def change_parser(parser):\r\n prev = base.current_executor()\r\n try:\r\n base.use_executor(lambda request, _: prev(request, parser))\r\n yield\r\n finally:\r\n base.use_executor(prev)",
"def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert",
"def match(parser):\n def run(parser, chunks, chunk, last):\n chunks = (chunk, chunks)\n result = parser.__parser__()(chunk, last)\n tupe, value = result\n if tupe & ParserResult.DONE:\n value, chunk, last = value\n match = _chunks_merge(chunks)[:-len(chunk)] if chunk else _chunks_merge(chunks)\n return ParserResult.from_done((match, value), chunk, last)\n elif tupe & ParserResult.PARTIAL:\n return ParserResult.from_partial(Parser(run, value, chunks))\n else:\n return result\n return Parser(run, parser, tuple())",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row,parsers)",
"def str2list(parser: Callable[[str], Any]) -> Callable[[str], List[Any]]:\n\n def _parse(string: str) -> List[Any]:\n return [parser(entry) for entry in string.split()]\n\n return _parse",
"def parseRange_(self, aRange):\n result = []\n \n common.logger.debug(5,\"parseRange_ \"+str(aRange))\n if aRange=='all' or aRange==None or aRange=='':\n result=range(1,common.jobDB.nJobs()+1)\n return result\n elif aRange=='0':\n return result\n\n subRanges = string.split(aRange, ',')\n for subRange in subRanges:\n result = result+self.parseSimpleRange_(subRange)\n\n if self.checkUniqueness_(result):\n return result\n else:\n common.logger.message(\"Error \"+result)\n return []",
"def _gen_data(fhs, columns, sep):\n for fh in fhs:\n for line in fh:\n if line[0] == \"#\": continue\n toks = line.split(sep)\n yield toks[columns[0]], int(toks[columns[1]]), float(toks[columns[2]])",
"def __init__(self):\n self.hi = []\n self.lo = []",
"def batch_parse(inputs, grammar, trace=0):\n\n # put imports here to avoid circult dependencies\n from nltk.grammar import FeatureGrammar\n from nltk.parse import FeatureChartParser, load_parser\n\n if isinstance(grammar, FeatureGrammar):\n cp = FeatureChartParser(grammar)\n else:\n cp = load_parser(grammar, trace=trace)\n parses = []\n for sent in inputs:\n tokens = sent.split() # use a tokenizer?\n syntrees = cp.nbest_parse(tokens)\n parses.append(syntrees)\n return parses",
"def parse_pairs(self):\n pass",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)",
"def parse_rows_with(reader, parsers):\n for row in reader:\n yield parse_row(row, parsers)",
"def parse(handle, known_handle) -> ty.Iterator[data.Entry]:\n entries = parser.parse(CONTEXT, handle, known_handle)\n return map(op.itemgetter(0), entries)"
]
| [
"0.4880365",
"0.47404096",
"0.47179648",
"0.470378",
"0.46360415",
"0.46103963",
"0.4609697",
"0.45963877",
"0.45378098",
"0.45185974",
"0.45155594",
"0.4511633",
"0.45087922",
"0.4464779",
"0.4458345",
"0.44525602",
"0.44323313",
"0.4415435",
"0.44136307",
"0.43991777",
"0.43778798",
"0.43760422",
"0.436744",
"0.43655157",
"0.43602112",
"0.43545717",
"0.43511227",
"0.4324508",
"0.4324508",
"0.43084997"
]
| 0.72359776 | 0 |
Matches the given parser and produces a named tuple `(Tag tag value)` with `tag` set to the given tag name and `value` set to the parser's value. | def tag(tag_name, parser):
return parser >> (lambda x: Tag(tag_name, x)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_reference(self, name_tag):\n top_level_parse_re = re.compile(\"^(.+):(.+)$\")\n\n results = top_level_parse_re.match(name_tag)\n if results:\n return(results.group(1), results.group(2))\n else:\n return (None,None)",
"def parse_value(named_reg_value):\n name, value, value_type = named_reg_value\n value_class = REG_VALUE_TYPE_MAP[value_type]\n return name, value_class(value)",
"def parse_tag_key_value(key_value: str, value_required=True) -> Tuple[str, Any]:\n if not key_value:\n raise ValueError(\"key must be specified.\")\n\n if \"=\" not in key_value:\n if value_required:\n raise ValueError(f\"key=value pair expected: '{key_value}'\")\n return (key_value, ANY_VALUE)\n\n key, value = key_value.split(\"=\", 1)\n if not key:\n raise ValueError(f\"key must be specified: '{key_value}'\")\n return (key, parse_tag_value(value))",
"def parseTag(self) :\n pos = self.position\n tagtype = self.tags[ord(self._data[pos])]\n if tagtype == 'end-of-attributes-tag':\n return 0\n pos += 1\n posend = pos2 = pos + 2\n namelength = unpack(\">H\", self._data[pos:pos2])[0]\n if not namelength :\n name = self._curname\n else :\n posend += namelength\n self._curname = name = self._data[pos2:posend]\n pos2 = posend + 2\n valuelength = unpack(\">H\", self._data[posend:pos2])[0]\n posend = pos2 + valuelength\n value = self._data[pos2:posend]\n if tagtype in (\"integer\", \"enum\") :\n value = unpack(\">I\", value)[0]\n elif tagtype == \"boolean\" :\n value = ord(value)\n try :\n (oldname, oldval) = self._curattributes[-1][-1]\n if oldname == name :\n oldval.append((tagtype, value))\n else :\n raise IndexError\n except IndexError :\n self._curattributes[-1].append((name, [(tagtype, value)]))\n self.logDebug(\"%s(%s) : %s\" % (name, tagtype, value))\n return posend - self.position",
"def split_name_and_attrs(cls, tag_name):\n # transform `key=\"value\"` to `(key, value)`\n def to_pair(s):\n removed_quote = ''.join(ch for ch in s if ch not in ('\"', \"'\"))\n return removed_quote.split('=')\n\n name_parts = tag_name.split()\n name = name_parts[0]\n raw_attrs = [w for w in name_parts if \"=\" in w]\n tag_attrs = dict(to_pair(w) for w in raw_attrs)\n return name, tag_attrs",
"def parse(cls, value: str) -> Tuple[str, Dict[str, str]]:\n raw_value = read_value_from_path(value)\n args: Dict[str, str] = {}\n\n if \"@\" in raw_value:\n args[\"region\"], raw_value = raw_value.split(\"@\", 1)\n\n # now find any other arguments that can be filters\n matches = re.findall(r\"([0-9a-zA-z_-]+:[^\\s$]+)\", raw_value)\n for match in matches:\n k, v = match.split(\":\", 1)\n args[k] = v\n\n return args.pop(\"name_regex\"), args",
"def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks",
"def process_start_tag(tag_text):\n tokens = tokenise(tag_text.strip(\"<>\"))\n tag = tokens[0]\n attributes = {}\n if len(tokens) > 1:\n for token in tokens[1:]:\n try:\n i = token.index(\"=\")\n key = token[0:i]\n value = token[i + 1 :].strip(' \"')\n except ValueError:\n key = token\n value = \"\"\n attributes[key] = value\n return (tag, attributes)",
"def tags_handler(ctx, param, value):\n retval = from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = dict(p.split('=') for p in value)\n except:\n raise click.BadParameter(\n \"'%s' contains a malformed tag.\" % value,\n param=param, param_hint='transform')\n return retval",
"def parse_value(value: str) -> Tuple[str, str, str]:\n value_pattern = r'^(usb|pci)\\(([^:]{4}):([^:]{4})\\)$'\n matches = re.match(value_pattern, value)\n assert matches, value\n ilk, vendor, device = matches.group(1), matches.group(2), matches.group(3)\n return ilk, vendor, device",
"def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))",
"def getattr(parser, token):\n # This version uses a regular expression to parse tag contents.\n try:\n # Splitting by None == splitting by spaces.\n tag_name, arg = token.contents.split(None, 1)\n except ValueError:\n raise template.TemplateSyntaxError, \"%r needs arguments\" % token.contents.split()[0]\n m = re.search(r'(\\S+) (\\S+) as (.+)', arg)\n if not m:\n m = re.search(r'(\\S+) (\\S+)', arg.strip())\n if not m: \n raise template.TemplateSyntaxError,\\\n \"%r tag had invalid arguments\" % tag_name\n var_name, var_attr = m.groups()\n return GetAttrNode(var_name, var_attr,) \n var_name, var_attr, var_new = m.groups()\n return GetAttrNode(var_name, var_attr, var_new)",
"def _parse_tags(self):\n tokens = self.tags_str[1:].split(\";\")\n self._tags = {\n k.strip(): v\n for token in tokens\n for k, v in [token.split(\"=\")]\n }",
"def get_tag(file, tag):\r\n import re\r\n\r\n # make sure the necessary globals are initialised\r\n global filenames # set of processed files\r\n if 'filenames' not in globals():\r\n filenames = set()\r\n global tags # dictionary of cached tag values\r\n if 'tags' not in globals() : # the collection has not yet been initialized\r\n tags = {}\r\n\r\n\r\n if file not in filenames:\r\n # file has not been processed yet\r\n\ttry:\r\n\t f = open(file, \"rt\")\r\n\texcept IOError:\r\n\t logger.warning(\"File '%s' not found.\", file)\r\n\t return \"*** ERROR *** File %s Not found***\\n\" % file\r\n\r\n\t# matches up to 5 chars at start of line followed the \"{{{\" \r\n\t# followed by tag name followed by up to five chars \r\n\t# with optional trailing white space.\r\n\tstarttag = '^(\\s*).{0,5}\\{{3}(\\S+).{0,5}\\s*$'\r\n\tstartre = re.compile(starttag)\r\n\t# matches up to 5 chars followed by \"}}}\" followed by up to 5 chars and\r\n\t# optional trailing white space.\r\n\tendtag = \"^\\s*.{0,5}\\}{3}.{0,5}\\s*$\"\r\n\tendre = re.compile(endtag)\r\n\tcapturing = False # are we capturing?\r\n\tcurtagname = \"\"\r\n\ttagvalue = \"\"\r\n\ttrim = 0\r\n\r\n\twhile True:\r\n\t l = f.readline()\r\n\t if not l: break\r\n\t if capturing:\r\n\t if endre.match(l):\r\n\t\t capturing = False\r\n\t\t tags[(file, curtagname)] = tagvalue\r\n\t\t tagvalue = ''\r\n\t\telse:\r\n\t\t tagvalue += l[trim:]\r\n\t\t tagvalue += '\\n'\r\n\r\n\r\n\t else:\r\n\t m = startre.match(l)\r\n\t\tif m: # we have a start tag\r\n trim = len(m.group(1))\r\n\t\t curtagname = m.group(2)\r\n\t\t capturing = True\r\n\r\n\tf.close()\r\n filenames.add(file)\r\n\r\n\r\n try:\r\n return tags[(file,tag)]\r\n except KeyError:\r\n\tlogger.warning(\"Tag '%(tag)s' not found in %(file)s\", \r\n\t\t\t{'file':file, 'tag':tag})\r\n\r\n\treturn \"*** ERROR *** Tag %(tag)s not found in file %(file)s ***\\n\" % \\\r\n\t\t\t\t{'file':file, 'tag':tag}",
"def _parse_tags(tags: str):\n return dict(item.split(\":\") for item in shlex.split(tags)) # type: ignore",
"def parse_value(cls, value):\n choice, value = value.split('=')\n value = cls.VALUES_MAP[value]\n\n return choice, value",
"def parse_tag(self, tag):\n \n mytag = \"latest\"\n mydigest = None\n\n regex = \"([\\w\\d\\.\\-]+)@?([\\w\\d\\.\\-]*)$\"\n\n regex_matched = re.match(regex, tag)\n mytag = regex_matched.group(1)\n mydigest = regex_matched.group(2)\n \n if regex_matched is None:\n mytag = \"latest\"\n\n return (mytag, mydigest)",
"def getPair(self, args):\r\n return self.name, self.getValue(args)",
"def get_tag_value(\n service: str,\n tags: List[Any],\n tag_key: str,\n) -> str:\n capitalize = capitalize_tag_kv(service)\n matches = [\n t[f\"{'V' if capitalize else 'v'}alue\"]\n for t in tags\n if t[f\"{'K' if capitalize else 'k'}ey\"] == tag_key\n ]\n if len(matches) != 1:\n log_error(\n f\"Oops it looks like we're unable to find a match for tag {tag_key}.\"\n \"Please open an issue to help us get this fixed!\",\n )\n raise Abort()\n\n return matches[0]",
"def tokenize_key_value_pair(kv_pair):\n key, value = kv_pair.strip().split('\\t')\n key = tuple(key.strip().split())\n value = tuple(value.strip().split())\n return (key, value)",
"def get_tags(html):\n\ttitle = re.findall('\"title\":\"(.*?)\",', html)[0]\n\ttitle = codecs.getdecoder(\"unicode_escape\")(title)[0]\n\n\tartist = re.findall('\"username\":\"(.*?)\",', html)[0]\n\tartist = codecs.getdecoder(\"unicode_escape\")(artist)[0]\n\n\tgenre = re.findall('\"genre\":\"(.*?)\",', html)[0]\n\tgenre = codecs.getdecoder(\"unicode_escape\")(genre)[0]\n\n\treturn title, artist, genre",
"def var(parser, token):\n # This version uses a regular expression to parse tag contents.\n try:\n # Splitting by None == splitting by spaces.\n tag_name, arg = token.contents.split(None, 1)\n except ValueError:\n raise template.TemplateSyntaxError, \"%r needs arguments\" % token.contents.split()[0]\n m = re.search(r'(\\S+) is (.+)', arg)\n if not m:\n raise template.TemplateSyntaxError, \"%r tag had invalid arguments\" % tag_name\n var_name, var_value = m.groups()\n return VarNode(var_name, var_value)",
"def get_parser(self, tag_name):\n return self.mock_parsers.setdefault(tag_name, MockParser(tag_name))",
"def get_asg_tag(tags, tag_name):\n result = {}\n for tag in tags:\n for key, val in tag.items():\n if val == tag_name:\n result = tag\n return result",
"def tags(self):\n return tuple([x.strip() for x in self._dict.get('tags').split(',')])",
"def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])",
"def _parse_tag(self, f, tag):\n # Check that we got a compiled file\n if not isinstance(f, mumpy.MUMPSFile):\n raise TypeError(\"Please specify a valid MUMPS routine.\")\n\n # Get the tag body\n lines = f.tag_body(tag)\n for line in lines:\n self.output = True\n\n if self.debug:\n self.rou['lex'].test(line)\n\n self.rou['lex'].reset()\n\n try:\n p = self.rou['parser'].parse(line, lexer=self.rou['lex'].lexer)\n p.execute()\n except mumpy.MUMPSReturn as ret:\n return lambda v=ret: v.value()\n except mumpy.MUMPSGotoLine as goto:\n return lambda cmd=self._parse_tag, fn=goto.func: cmd(fn.rou,\n fn.tag)\n except mumpy.MUMPSCommandEnd:\n continue\n\n # Reset the Lexer and Parser to the correct state\n self.rou['lex'].reset()\n\n # Return any resulting expression to the caller\n return lambda: None",
"def match(parser):\n def run(parser, chunks, chunk, last):\n chunks = (chunk, chunks)\n result = parser.__parser__()(chunk, last)\n tupe, value = result\n if tupe & ParserResult.DONE:\n value, chunk, last = value\n match = _chunks_merge(chunks)[:-len(chunk)] if chunk else _chunks_merge(chunks)\n return ParserResult.from_done((match, value), chunk, last)\n elif tupe & ParserResult.PARTIAL:\n return ParserResult.from_partial(Parser(run, value, chunks))\n else:\n return result\n return Parser(run, parser, tuple())",
"def try_parse_field(field_name, value, parser_dict):\n parser = parser_dict.get(field_name) # None if no such entry\n if parser is not None:\n return try_or_none(parser)(value)\n else:\n return value",
"def tp_key_value(str_tag):\n rgx_split = re.compile(r'[\\@\\(\\)\\{\\}]')\n str_key, str_value = '', ''\n\n # count the pieces\n lst_parts = rgx_split.split(str_tag)\n lng_parts = len(lst_parts)\n\n # and winnow the noise\n if lng_parts > 1:\n str_key = lst_parts[1]\n if lng_parts > 2:\n for str_value in lst_parts[2:]:\n if str_value != '':\n break\n\n return (str_key, str_value)"
]
| [
"0.5538646",
"0.553688",
"0.55304265",
"0.5503507",
"0.5379562",
"0.52458453",
"0.5207461",
"0.5187187",
"0.5185535",
"0.5160518",
"0.5132922",
"0.5025835",
"0.5006027",
"0.5005376",
"0.49659863",
"0.49604243",
"0.49449548",
"0.49418393",
"0.4910231",
"0.48585212",
"0.48282945",
"0.48170835",
"0.48158097",
"0.47868204",
"0.47274643",
"0.46781304",
"0.46698174",
"0.46676975",
"0.46530825",
"0.4650396"
]
| 0.6260766 | 0 |
Read last build timestamp from the log file | def last_build_processed_timestamp(log_file):
# Get last build processed timestamp
last_timestamp = 0
with open(log_file, "r") as process_file:
if os.path.getsize(process_file.name) > 0:
last_timestamp = process_file.readline().strip()
return last_timestamp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_last_build_processed_timestamp(log_file):\n with open(log_file, \"w\") as process_file:\n process_file.write(str(time.time()))",
"def GetBuildDate(build_filename):\n try:\n with open(build_filename) as f:\n return float(f.readline())\n except (IOError, ValueError):\n return 0.0",
"def read_last_line_in_data_log():\n timestamp = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n log_file_path = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs'\n log_file_path += os.sep + timestamp\n file_name = log_file_path + os.sep + timestamp + \"_data.txt\"\n # file_name = r'C:/Users/kimdu/Documents/ph549/Telemetry_logs/test.txt' # test generated data\n try:\n with open(file_name, 'rb') as f:\n f.seek(-2, os.SEEK_END)\n while f.read(1) != b'\\n':\n f.seek(-2, os.SEEK_CUR)\n content = f.readline().decode()\n except:\n with open(file_name, 'rb') as f:\n content = f.readlines()[-1].decode()\n return content",
"def get_image_provision_timestamp(log_path: str) -> Optional[str]:\n with open(log_path) as f:\n for line in f:\n if 'Build image provisioning date and time' in line:\n # The next line is the timestamp.\n try:\n return next(f).strip()\n except StopIteration:\n return None",
"def test_get_build_timestamp(self):\n pass",
"def get_log():\n set_ctime()\n f = open(log_path, 'r')\n o = get_offset()\n f.seek(int(o))\n return f",
"def getchrony():\n \n filename = \"/var/log/chrony/tracking.log\"\n fileNotOK = True\n try:\n if os.path.isfile(filename):\n fileNotOK = False\n except:\n fileNotOK = True\n # if file is not OK, return default\n if fileNotOK:\n return( \"2020-02-20T02:02:02.000\", 0., 0.)\n \n #get the very last line in the filea\n line = subprocess.check_output(['tail', '-1', filename])\n parts = line.split()\n nparts = len(parts)\n\n if nparts < 10:\n return( \"\", 0., 0.)\n \n date = parts[0]\n time = parts[1]\n ip = parts[2]\n #print(\"Offset: %s\" % (parts[9]))\n offset = float(parts[6])\n offsetrms = float(parts[9])\n datestr = \"%sT%s\" % (date, time)\n return( datestr, offset, offsetrms)",
"def get_build_timestamp(jenkins_url, job_name, build_nr):\n timestamp = execute_command(\n f\"wget -qO- {jenkins_url}/{job_name}/{build_nr}\"\n )\n return datetime.fromtimestamp(timestamp/1000)",
"def getLastFinishedBuild():",
"def GetLastBuildRevision(self):\n last_build_revision = None\n if os.path.exists(self.last_change_file):\n last_build_revision = int(open(self.last_change_file).read())\n\n if os.path.exists(self.revisions_path):\n fp = open(self.revisions_path)\n try:\n line = fp.readline()\n\n # TODO(markhuang): remove this block after all builders are updated\n line = line.replace('\\'', '\"')\n\n revisions_dict = simplejson.loads(line)\n if revisions_dict:\n self.last_chromium_revision = revisions_dict['chromium_revision']\n self.last_webkit_revision = revisions_dict['webkit_revision']\n self.last_v8_revision = revisions_dict['v8_revision']\n except (IOError, KeyError, ValueError), e:\n self.last_chromium_revision = None\n self.last_webkit_revision = None\n self.last_v8_revision = None\n print e\n fp.close()\n return last_build_revision",
"def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n self.__project.location).rstrip()",
"def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]",
"def get_git_timestamp(path):\n return int(_run_command(path, 'git log -1 --format=%ct'))",
"def load_last_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\"\n if os.path.isfile(\"last_time_run\"): #\n # If the file exists\n f = open(\"last_time_run\", \"r\")\n last_run_time = datetime.datetime.strptime(f.read(), \"%Y-%m-%d %H:%M:%S\")\n f.close()\n return last_run_time\n save_current_run_time()\n # If file doesn't exist (possible if it's the first run), return current time\n return datetime.datetime.now()",
"def __read_last_line(self) -> str:\n with open(LOGFILE_OPENINGS, \"r\", encoding=\"utf-8\") as f:\n last_line = f.readlines()[-1]\n return repr(LogLine.from_line(last_line))",
"def build_time(self):\n return self.nodes[0].get('infos').get('system_info').get('build_time')",
"def get_last_processed_log(parser_info_path, job_to_retry):\n with open(parser_info_path, \"r\") as processed_file:\n processed_object = json.load(processed_file)\n try:\n last_processed_log = processed_object[\"parserInfo\"][\"lastRevision\"][\n job_to_retry\n ]\n except KeyError:\n # If last processed log not defined, all logs will be parsed\n last_processed_log = 1\n processed_object[\"parserInfo\"][\"lastRevision\"][\n job_to_retry\n ] = last_processed_log\n\n return last_processed_log, processed_object",
"def jenkins_last_build_sha():\n job_url = os.getenv('JOB_URL')\n job_json_url = \"{0}/api/json\".format(job_url)\n response = urllib.urlopen(job_json_url)\n job_data = json.loads(response.read())\n\n last_completed_build_url = job_data['lastCompletedBuild']['url']\n last_complete_build_json_url = \"{0}/api/json\".format(last_completed_build_url)\n\n response = urllib.urlopen(last_complete_build_json_url)\n last_completed_build = json.loads(response.read())\n\n return last_completed_build[1]['lastBuiltRevision']['SHA1'] # needs testing",
"def read_last_ts_written(self):\n try:\n logging.info(\"Reading last timestamp written from previous run.\")\n with open(self.state_file_path, \"r\") as file:\n self.last_ts_written = int(file.read())\n logging.info(\n \"Last timestamp from previous run is {}.\".format(\n self.last_ts_written\n )\n )\n return True\n except FileNotFoundError:\n self.last_ts_written = 0\n logging.warning(\n \"No state file found at {}, setting last timestamp written to 0.\".format(\n self.state_file_path\n )\n )\n return False",
"def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()",
"def read_buildstamp(subdir):\n if os.path.exists(subdir):\n try:\n with open(subdir + '/scripts/build-stamp.txt', 'r') as f:\n freesurfer_version = f.readlines()[0]\n # except a FileNotFound error\n except OSError as e:\n freesurfer_version = input(\n \"\"\"\n Could not find a build timestamp in the supplied subject directory.\n The used freesurfer version can not be extracted. Please enter the\n version of freesurfer you are using, if available: \"\"\"\n or \"\")\n return freesurfer_version",
"def get_release_date ():\n fname = os.path.join(\"doc\", \"changelog.txt\")\n release_date = \"unknown\"\n with open(fname) as fd:\n # the release date is on the first line\n line = fd.readline()\n mo = release_ro.search(line)\n if mo:\n release_date = mo.groups(1)\n return release_date",
"def getLast():\n try:\n open(os.path.join(basepath, 'last'))\n except IOError:\n try:\n arguments.project\n except NameError:\n print(\"No current project. Start one with -p\")\n exit()\n else:\n f = open(os.path.join(basepath, 'last'), 'w')\n f.write(arguments.project[0])\n f.close()\n store = open(os.path.join(basepath, 'last'), 'r')\n last = store.readline().rstrip('\\n')\n last = [last, 's']\n store.close()\n path = getPath(last[0])\n with open(path, 'r') as log:\n reader = csv.reader(log)\n for row in reader:\n if row[1] == 'a' or row[1] == 's':\n line = row\n try:\n line\n except NameError:\n last[1] = 's'\n else:\n last[1] = line[1]\n return last",
"def get_last_update(self):\n last_update = os.path.getmtime(self.parent_filepath)\n return last_update",
"def get_last_fetch_time():\n if os.path.exists(LAST_FETCH_TIME_FILE):\n with open(LAST_FETCH_TIME_FILE, 'r') as f:\n last_fetch_time = f.read()\n\n return last_fetch_time\n return ''",
"def get_last_log_file(self):\n try:\n last_log = str(self.last_log)\n except Exception as e:\n last_log = None\n logger.error(f\"untracked exception: {e}\")\n\n if not last_log:\n return\n try:\n with open(last_log, \"r\") as file:\n return file\n except Exception as e:\n logger.error(f\"Untracked exception: {e}\")",
"def most_recent_read(self):\n self.read_pos = (self.write_pos - 1) % self.log_len\n return",
"def find_latest_log(logdir):\n newtime = 0\n newfile = None\n for tfile in os.listdir(os.path.expanduser(logdir)):\n rp = os.path.realpath(os.path.expanduser(logdir)+'/'+tfile)\n ttime = os.stat(rp).st_mtime\n if ttime > newtime:\n newtime = ttime\n newfile = rp\n\n return newfile",
"def _last_roll_revision(self):\n if not self._cached_last_roll_revision:\n revinfo = subprocess2.check_output(['gclient', 'revinfo'],\n cwd=self._path_to_chrome)\n project_path = 'src/' + self._path_to_project\n for line in revinfo.splitlines():\n dep_path, source = line.split(': ', 1)\n if dep_path == project_path:\n self._cached_last_roll_revision = source.split('@')[-1]\n break\n assert len(self._cached_last_roll_revision) == 40\n return self._cached_last_roll_revision",
"def get_changefile_timestamp(changefile_type, file_sequence_number):\n url = get_url(changefile_type) + \"/\"\n url = url + (\"%03i/%03i/%03i\" % (file_sequence_number / 1000000,\n file_sequence_number / 1000 % 1000,\n file_sequence_number % 1000))\n url = url + \".state.txt\"\n changefile_timestamp = None\n for result in urllib.urlopen(url):\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n if file_sequence_number == 0:\n changefile_timestamp = datetime(1900, 1, 1)\n else:\n AssertionError(\"no timestamp for %s changefile %i.\" %\n (changefile_type, file_sequence_number))\n else:\n logging.info(\"%s, id: %i, timestamp: %s\" %\n (changefile_type, file_sequence_number,\n changefile_timestamp.isoformat()))\n return changefile_timestamp"
]
| [
"0.71376187",
"0.6779207",
"0.6755526",
"0.6730641",
"0.6637661",
"0.6348121",
"0.6277634",
"0.6233516",
"0.6186471",
"0.61769265",
"0.6168673",
"0.61549896",
"0.6153312",
"0.6153262",
"0.61097366",
"0.6045366",
"0.60451233",
"0.6036591",
"0.5966002",
"0.5964683",
"0.5945673",
"0.5941876",
"0.5930383",
"0.59298384",
"0.59006864",
"0.5892885",
"0.5887283",
"0.58758765",
"0.585298",
"0.5833754"
]
| 0.8103804 | 0 |
Generates plots for all videos in a directory | def generate_plots(path):
videos = glob(path + '/*.mkv')
print(path, len(videos), videos)
if len(videos) == 0:
return
else:
videos = videos[0]
metadata_list = glob(path + '/metadata.txt')
#print(path, len(metadata_list), metadata_list)
if len(metadata_list) == 0:
return
P = Preprocessor()
P.import_video(str(videos))
P.read_metadata(path)
P.preprocess()
Im = P.frames_processed
if len(Im) == 0:
print(len(Im))
return
z_start = P.z_start
z_end = P.z_end
mean, cov = analyze_image(Im)
window_size = 10
mean_smoothed = smoothing.mean_moving_average(mean, window_size)
cov_smoothed = smoothing.cov_moving_average(cov, window_size)
c = CubicFitRotated()
c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)
try:
os.mkdir(path + '/analysis')
path += '/analysis'
except OSError:
pass
plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')
plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')
export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)
export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)
plt.close('all') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)",
"def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)",
"def create_video(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for f in all_obj_locs[::STEP]:\n plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n plt.ylim([-LANE_LENGTH / 4 + 25, LANE_LENGTH / 4 + 75])\n plt.xlim([-50, LANE_LENGTH + 50])\n x_s = [p[1] for p in f]\n y_s = [p[0] for p in f]\n s = 10\n plt.plot([0, 0], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([LANE_LENGTH + MARGIN, LANE_LENGTH + MARGIN], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [0 - MARGIN, 0 - MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [LANE_WIDTH + MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.scatter(x_s, y_s, s=s)\n x_s_pins = init_pins()[:, 0]\n y_s_pins = init_pins()[:, 1]\n plt.scatter(y_s_pins, x_s_pins, s=3, color=\"black\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n # plt.show()\n i += 1\n create_video_from_frames(len(all_obj_locs[::STEP]), fps / STEP / DT)",
"def with_path(path):\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = list(mythVideo.searchVideos(filename = path))\n\n for video in all_videos:\n if video.season > 0:\n video.label = video.title + \" - Season \" + str(video.season)\n\n if video.label not in seen and not seen_add(video.label):\n video.url = \"/videos/\" + video.title + \"/season/\" + str(video.season)\n videos.append(video)\n\n else:\n video.label = video.title + \" - \" + video.subtitle\n video.url = \"/videos/\" + video.title + \"/\" + video.hash\n videos.append(video)\n\n if len(videos) == 1:\n videos[0].pic = url_for('.video_image', title = title, hash = videos[0].hash)\n videos[0].feplay = url_for('.video_feplay', title = title, hash = hash)\n return render_template('recording.html', item = videos[0])\n\n videos = sorted(videos, key = lambda video: video.season)\n return render_template('list.html', items = videos, page_title = title)",
"def make_all_plots(dirname='plots'):\n for worker_type in ['ordinary', 'normal', 'master', None]:\n name = 'rajpal'\n if worker_type is not None:\n name += '-' + worker_type\n data = Data.from_rajpal_icml15(worker_type=worker_type)\n data.make_plots(name)\n data.make_data('{}.csv'.format(name))\n\n data = Data.from_bragg_hcomp13(positive_only=False)\n data.make_plots(os.path.join(dirname, 'bragg'))\n data.make_data(os.path.join(dirname, 'bragg.csv'))\n\n data = Data.from_bragg_hcomp13(positive_only=True)\n data.make_plots(os.path.join(dirname, 'bragg-pos'))\n data.make_data(os.path.join(dirname, 'bragg-pos.csv'))\n\n data = Data.from_lin_aaai12(workflow='tag')\n data.make_plots(os.path.join(dirname, 'lin-tag'))\n data.make_data(os.path.join(dirname, 'lin-tag.csv'))\n\n data = Data.from_lin_aaai12(workflow='wiki')\n data.make_plots(os.path.join('lin-wiki'))\n data.make_data(os.path.join('lin-wiki.csv'))\n\n make_bragg_teach_plots(dirname=dirname)",
"def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))",
"def test_plenty_of_video_files():\n # make sure that there is one sequence per video file\n pipe = VideoPipe(\n batch_size=BATCH_SIZE, data=PLENTY_VIDEO_FILES, step=1000000, sequence_length=1)\n pipe.build()\n iters = math.ceil(len(os.listdir(PLENTY_VIDEO_DIRECTORY)) / BATCH_SIZE)\n for i in range(iters):\n print(\"Iter \" + str(i))\n pipe.run()",
"def outputs(folderName):\n for i in itertools.count(1):\n yield io.open('%s/Video_%s.h264' %\n (folderName,\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S')),\n 'wb')",
"def create_original_videos(frames, video_path, interval):\n ncols = int(math.sqrt(len(frames)))\n fig, ax = plt.subplots(\n ncols=ncols,\n nrows=ncols,\n figsize=(5 * ncols, 5 * ncols),\n tight_layout=True,\n )\n max_len = max([len(f) for f in frames])\n\n def init():\n ims = []\n k = 0\n for k in range(ncols):\n for j in range(ncols):\n ims.append(ax[j][k].imshow(unnorm(frames[k * ncols + j][0])))\n ax[j][k].grid(False)\n ax[j][k].set_xticks([])\n ax[j][k].set_yticks([])\n return ims\n\n ims = init()\n\n def update(i):\n print(\"{}/{}\".format(i, max_len))\n for k in range(ncols):\n for j in range(ncols):\n idx = (\n i\n if i < len(frames[k * ncols + j])\n else len(frames[k * ncols + j]) - 1\n )\n ims[k * ncols + j].set_data(unnorm(frames[k * ncols + j][idx]))\n plt.tight_layout()\n return ims\n\n anim = FuncAnimation(\n fig, update, frames=np.arange(max_len), interval=interval, blit=False,\n )\n anim.save(video_path, dpi=80)",
"def plot_folder(path):\r\n plt.figure(figsize=(20, 10))\r\n for filename in glob.glob(path + '/*.pspec'):\r\n x, y= np.loadtxt(fname=filename, delimiter='\\t',dtype=int, usecols = (1,2),\r\n skiprows=100, unpack = True)\r\n plt.plot(x, y)\r\n return plt.show()",
"def index():\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = mythVideo.searchVideos(insertedafter = '1900-01-01 00:00:00')\n\n for video in all_videos:\n path = video.filename.split('/')[0]\n if path not in seen and not seen_add(path):\n video.url = url_for('.with_path', path=path)\n video.label = path\n videos.append(video)\n\n videos = sorted(videos, key = lambda video: video.label.lowercase())\n return render_template('list.html', items = videos, page_title = 'Videos')",
"def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files",
"def process_group(pattern, params):\n # check subdirectory according to filter options\n subdir = params['label']\n # and the parameters label\n print 'Processing:', subdir\n\n # search for videos matching the pattern\n search = os.path.join(ROOT_RAWDATA_DIR, pattern)\n print 'Search pattern:', search\n flist = sorted(glob.glob(search))\n\n # for each matching video\n for f in flist:\n # video structures (copied from LEGOS FTP) is yyyymmdd/HH/MM.mp4\n # and we want to store frames as yyyymmdd/yyyymmdd_HH/yyyymmdd_HHMM/yyyymmdd_HHMM_<index>.<format>\n # so: recursively split to extract basename, hour and date\n p, fname = os.path.split(f)\n p, hour = os.path.split(p)\n p, date = os.path.split(p)\n minute, _ = os.path.splitext(fname)\n # compute output dir, and prefix for frames\n outdir = os.path.join(ROOT_PREPROC_DIR,\n subdir, # according to parameters\n date,\n '{}_{}'.format(date, hour),\n '{}_{}{}'.format(date, hour, minute),\n )\n prefix = '{}_{}{}_'.format(date, hour, minute)\n # create output directory if neeeded\n if not os.path.exists(outdir):\n print 'Creating output directory', outdir\n os.makedirs(outdir, 0755)\n # call decoder\n command = ['python', '-u', 'decoder.py',\n f,\n '-o', outdir,\n '-p', prefix,\n '-l', params['label'],\n '-f', str(params['image_format']),\n '-m', str(params['median_length']),\n '-r', str(params['resolution']),\n '-O', str(params['origin'][0]), str(params['origin'][1]),\n '-d', str(params['dimensions'][0]), str(params['dimensions'][1]),\n '-a', str(params['rotation']),\n ]\n subprocess.call(command)",
"def generate_video_metadata(absolute_paths):\n\n vids = []\n\n bad_fn = \"/share/pi/cleemess/file-conversion-pipeline/bad_mp4s.txt\"\n good_fn = \"/share/pi/cleemess/file-conversion-pipeline/good_mp4s.txt\"\n # if os.path.exists(bad_fn):\n # os.remove(bad_fn)\n\n if os.path.exists(bad_fn):\n with open(bad_fn) as f:\n bad_paths = set([line.strip() for line in f.readlines()])\n else:\n bad_paths = set()\n\n if os.path.exists(good_fn):\n with open(good_fn) as f:\n good_paths = set([line.strip() for line in f.readlines()])\n else:\n good_paths = set()\n \n with tqdm(list(absolute_paths)) as pbar:\n for absolute_path in pbar:\n if absolute_path in bad_paths or absolute_path in good_paths:\n continue\n\n cmd = \"ffprobe -v quiet -print_format json -show_streams %s\" % absolute_path\n try:\n subprocess.check_output(shlex.split(cmd)).decode(\"utf-8\")\n with open(good_fn, \"a\") as f:\n f.write(absolute_path + \"\\n\")\n good_paths.add(absolute_path)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n with open(bad_fn, \"a\") as f:\n f.write(absolute_path + \"\\n\")\n bad_paths.add(absolute_path)\n # print(e)\n # print(cmd)\n # raise\n\n pbar.set_description(f\"{len(good_paths)}, {len(bad_paths)}\")\n return vids",
"def get_all_videos(dir, extension='mp4'):\n\n list_video_fn = []\n for dirpath, dirnames, filenames in os.walk(dir):\n for filename in [f for f in filenames if f.endswith(extension)]:\n fn = os.path.join(dirpath, filename)\n list_video_fn.append(fn)\n\n return list_video_fn",
"def get_all_videos_in_directory(directory: str):\n\n all_files_and_folders = os.listdir(directory)\n\n only_videos = []\n for file in all_files_and_folders:\n if is_video(file):\n only_videos.append(file)\n ...\n\n return only_videos",
"def plot_dir(main):\n try:\n wd = str(main.lineEdit_8.text())\n if wd == '':\n main.msg(\"Error \"+errorPath+\"plot_dir: Must choose directory first\")\n return\n for fi in os.listdir(wd):\n dataPath = os.path.join(wd, fi)\n main.msg(\"Plotting \"+str(fi))\n img = mpimg.imread(str(dataPath))\n imgObj = Img.Img(img, title=str(fi), filePath=str(dataPath))\n main.imgObjList.append(imgObj)\n func.update(main)\n slider.slider_update(main)\n except:\n main.msg(\"Error \"+errorPath+\"plot_dir: Make sure all files are images (tiff, jpeg, etc.)\")",
"def create_video_of_histograms(frame_mat, ion_dict, rims_object):\r\n print('\\n-------------------------------------------------------\\n')\r\n print('Generating video...')\r\n frame_folder = r'simulation outputs/00_Video outputs'\r\n if not os.path.exists(frame_folder):\r\n os.makedirs(frame_folder)\r\n\r\n '''Clearing out any old frames'''\r\n os.chdir(frame_folder)\r\n for file in os.listdir('.'):\r\n if file.startswith(\"cycle\"):\r\n os.remove(file)\r\n os.chdir(r'../..')\r\n\r\n '''Creating histogram for each cycle'''\r\n for cycle in range(load_one_setting(settings_filename,'MAX_CYCLES')):\r\n plt.figure(cycle)\r\n max_x = np.max(frame_mat) * pow(10, 4)\r\n min_x = np.min(frame_mat) * pow(10, 4)\r\n plt.ylim(0, 0.025)\r\n plt.xlim(min_x, max_x)\r\n '''Adding all ions to the cycle histogram'''\r\n for i_ion, ion in enumerate(ion_dict.items()):\r\n percentage_progress(cycle * len(ion_dict.items()) + i_ion,\r\n load_one_setting(settings_filename,'MAX_CYCLES') * len(ion_dict.items()), rims_object)\r\n x_results = frame_mat[i_ion][cycle]\r\n x_results_um = [dx * pow(10, 4) for dx in x_results]\r\n weights = np.ones_like(x_results_um) / float(len(x_results_um))\r\n plt.hist(x_results_um, weights=weights, bins=load_one_setting(settings_filename,'RESOLUTION'), label=str(ion[0]))\r\n '''Plot attributes and labels'''\r\n plt.ylabel('Density')\r\n plt.xlabel(r'X [$\\mu $m]')\r\n plt.title(r\"RIMS: Video of distribution x axis: $\\rho $(x,t)\", fontsize=12, fontweight='bold')\r\n plt.suptitle('ratchet cycle = ' + str(cycle), fontsize=10)\r\n plt.legend(loc='upper left')\r\n '''Documenting histogram'''\r\n file_name = 'cycle_' + str(cycle)\r\n plt.savefig(frame_folder+'/'+file_name+'.jpeg')\r\n plt.close(cycle)\r\n video_name = str(create_unique_id()) + ' Distribution video.avi'\r\n generate_video_from_frames(frame_folder + r'/', video_name)\r\n print(\"\\nVideo saved to \" + frame_folder + \" as \" + video_name)",
"def create_video_unique(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for i in range(len(all_obj_locs[::STEP])):\n plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n plt.ylim([-LANE_LENGTH / 4 + 25, LANE_LENGTH / 4 + 75])\n plt.xlim([-50, LANE_LENGTH + 50])\n x_s = [p[0][1] for p in all_obj_locs[::STEP][:i + 1]]\n y_s = [p[0][0] for p in all_obj_locs[::STEP][:i + 1]]\n s = 10\n plt.plot([0, 0], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([LANE_LENGTH + MARGIN, LANE_LENGTH + MARGIN], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [0 - MARGIN, 0 - MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [LANE_WIDTH + MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.scatter(x_s, y_s, s=s)\n x_s_pins = init_pins()[:, 0]\n y_s_pins = init_pins()[:, 1]\n plt.scatter(y_s_pins, x_s_pins, s=3, color=\"black\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n # plt.show()\n i += 1\n create_video_from_frames(len(all_obj_locs[::STEP]), fps / STEP / DT)",
"def _choose_video(self) -> None:\n self.folderpath = []\n foldername = filedialog.askopenfilename(initialdir=\"/home/mateusz\", title=\"Select video\",\n filetypes=[(\"Movies\", '.avi')])\n self.folderpath.append(foldername)\n for label in self.filespaths_labels:\n label.destroy()\n for filepath in self.folderpath:\n label = tk.Label(self.import_frame, text=filepath, fg=\"#C4CBCC\", bg=\"#2A3538\")\n label.pack()\n self.filespaths_labels.append(label)",
"def gather_videos(files):\r\n # Because we are using a set, no duplicates will be present\r\n videos = set()\r\n for item in files:\r\n # Crawl subfolders\r\n if os.path.isdir(item):\r\n for root, _, filenames in os.walk(item):\r\n for filename in filenames:\r\n filepath = os.path.join(root, filename)\r\n # Check if its a video\r\n if YoutubeService.valid_video_file(filepath):\r\n videos.add(filepath)\r\n # If it exists it is a single file, check if its a video\r\n elif os.path.exists(item) and YoutubeService.valid_video_file(item):\r\n videos.add(item)\r\n return videos",
"def make_plots(self):\n n_rounds = self.run.n_rounds\n\n log.info('Making %d frames', n_rounds)\n args = [self._get_for_parallel(i) for i in range(n_rounds)]\n self.lbv.map(_plot_helper, args)",
"def in_show_video(name, vext='.mp4', ext='.png', loop=True, autoplay=True, controls=True, embed=False, figpath=figpath, **kwargs):\n import os\n from IPython.core.display import display, Image, HTML\n from base64 import b64encode\n\n opts = 'playsinline '\n if loop: opts += 'loop '\n if autoplay: opts += 'autoplay '\n if controls: opts += 'controls '\n if embed:\n try:\n with open(os.path.join(figpath, name + ext), \"rb\") as image_file:\n im1 = b64encode(image_file.read()).decode(\"utf-8\")\n with open(os.path.join(figpath, name + '_cube' + ext), \"rb\") as image_file:\n im2 = b64encode(image_file.read()).decode(\"utf-8\")\n with open(os.path.join(figpath, name + vext), \"rb\") as video_file:\n im3 = b64encode(video_file.read()).decode(\"utf-8\")\n\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr>\n <td width=33%%><center><img src=\"data:image/png;base64,{0}\" width=100%/></td>\n <td rowspan=2 colspan=2><center><video src=\"data:video/webm;base64,{1}\" {2} type=\"video/{3}\" width=100%/></td>\n </tr>\n <tr>\n <td><center><img src=\"data:image/png;base64,{4}\" width=100%/></td>\n </tr>\n </table></center>\"\"\".format(im1, im3, opts, vext[1:], im2)\n # display(HTML(s))\n except:\n video = open(os.path.join(figpath, name + vext), \"rb\").read()\n video_encoded = b64encode(video).decode(\"utf-8\")\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr> <td width=100%><center><video {0} src=\"data:video/{1};base64,{2}\" width=100%\\>\n </td></tr></table></center>\"\"\".format(opts, vext[1:], video_encoded)\n # display(HTML(s))\n else:\n\n if os.path.isfile(os.path.join(figpath, name + ext)) and os.path.isfile(os.path.join(figpath, name + '_cube' + ext)):\n if os.path.isfile(os.path.join(figpath, name + vext)):\n s = f\"\"\"\n <center><table border=none width=100% height=100%>\n <tr>\n <td width=33%%><center><img src=\"{os.path.join(figpath, name + ext)}\" width=100%/></td>\n <td rowspan=2 colspan=2><center>\n <video width=100% {opts}>\n <source src=\"{os.path.join(figpath, name + vext)}\" type=\"video/{vext[1:]}\">\n Your browser does not support the video tag.\n </video>\n </td>\n </tr>\n <tr>\n <td><center><img src=\"{os.path.join(figpath, name + '_cube' + ext)}\" width=100%/></td>\n </tr>\n </table></center>\"\"\"\n else:\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr>\n <td width=50%%><center><img src=\"{0}\" width=100%/></td>\n <td><center><img src=\"{1}\" width=100%/></td>\n </tr>\n </table></center>\"\"\".format(os.path.join(figpath, name + ext),\n os.path.join(figpath, name + '_cube' + ext))\n else:\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr> <td width=100%><center><video {0} src=\"{2}\" type=\"video/{1}\" width=100%\\>\n </td></tr></table></center>\"\"\".format(opts, vext[1:], os.path.join(figpath, name + vext))\n html = HTML(s)\n html.reload()\n display(html)",
"def loop_dir(dir_name: str, graph_ext: str) -> None:\n directory = fsencode(dir_name)\n for file in listdir(directory):\n filename = fsdecode(file)\n if filename.endswith(graph_ext):\n draw_graph(filename)",
"def all(folder, mt=False):\n handles = []\n experiments = get_experiment_series(folder, mT=mt)\n for ex in experiments:\n if mt:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm {}mT'.format(ex.height, ex.magnet))[0])\n else:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm'.format(ex.height))[0])\n plt.legend()\n plt.show()",
"def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)",
"def generate_figures():\r\n # create results directory if necessary\r\n try:\r\n makedirs(\"results\")\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n \r\n for b in benchmarks:\r\n generate_figure(model[b], b)",
"def create_plots(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n self.sse_plot()\n self.avg_sse_plot()",
"def run(self):\n\n for file_cnt, file_path in enumerate(self.files_found):\n video_timer = SimbaTimer()\n video_timer.start_timer()\n _, self.video_name, _ = get_fn_ext(file_path)\n self.video_info, self.px_per_mm, self.fps = self.read_video_info(\n video_name=self.video_name\n )\n self.width, self.height = int(\n self.video_info[\"Resolution_width\"].values[0]\n ), int(self.video_info[\"Resolution_height\"].values[0])\n if self.video_setting:\n self.fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value)\n self.video_save_path = os.path.join(\n self.heatmap_clf_location_dir, self.video_name + \".mp4\"\n )\n self.writer = cv2.VideoWriter(\n self.video_save_path,\n self.fourcc,\n self.fps,\n (self.width, self.height),\n )\n if self.frame_setting:\n self.save_video_folder = os.path.join(\n self.heatmap_clf_location_dir, self.video_name\n )\n if not os.path.exists(self.save_video_folder):\n os.makedirs(self.save_video_folder)\n self.data_df = read_df(file_path=file_path, file_type=self.file_type)\n clf_array, aspect_ratio = self.__calculate_bin_attr(\n data_df=self.data_df,\n clf_name=self.clf_name,\n bp_lst=self.bp_lst,\n px_per_mm=self.px_per_mm,\n img_width=self.width,\n img_height=self.height,\n bin_size=self.bin_size,\n fps=self.fps,\n )\n\n if self.max_scale == \"auto\":\n self.max_scale = self.__calculate_max_scale(clf_array=clf_array)\n if self.max_scale == 0:\n self.max_scale = 1\n\n if self.final_img_setting:\n self.make_clf_heatmap_plot(\n frm_data=clf_array[-1, :, :],\n max_scale=self.max_scale,\n palette=self.palette,\n aspect_ratio=aspect_ratio,\n file_name=os.path.join(\n self.heatmap_clf_location_dir,\n self.video_name + \"_final_frm.png\",\n ),\n shading=self.shading,\n clf_name=self.clf_name,\n img_size=(self.width, self.height),\n final_img=True,\n )\n\n if self.video_setting or self.frame_setting:\n for frm_cnt, cumulative_frm_idx in enumerate(range(clf_array.shape[0])):\n frm_data = clf_array[cumulative_frm_idx, :, :]\n cum_df = pd.DataFrame(frm_data).reset_index()\n cum_df = cum_df.melt(\n id_vars=\"index\",\n value_vars=None,\n var_name=None,\n value_name=\"seconds\",\n col_level=None,\n ).rename(\n columns={\"index\": \"vertical_idx\", \"variable\": \"horizontal_idx\"}\n )\n cum_df[\"color\"] = (\n (cum_df[\"seconds\"].astype(float) / float(self.max_scale))\n .round(2)\n .clip(upper=100)\n )\n color_array = np.zeros(\n (\n len(cum_df[\"vertical_idx\"].unique()),\n len(cum_df[\"horizontal_idx\"].unique()),\n )\n )\n for i in range(color_array.shape[0]):\n for j in range(color_array.shape[1]):\n value = cum_df[\"color\"][\n (cum_df[\"horizontal_idx\"] == j)\n & (cum_df[\"vertical_idx\"] == i)\n ].values[0]\n color_array[i, j] = value\n\n fig = plt.figure()\n im_ratio = color_array.shape[0] / color_array.shape[1]\n plt.pcolormesh(\n color_array,\n shading=self.shading,\n cmap=self.palette,\n rasterized=True,\n alpha=1,\n vmin=0.0,\n vmax=float(self.max_scale),\n )\n plt.gca().invert_yaxis()\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n plt.tick_params(axis=\"both\", which=\"both\", length=0)\n cb = plt.colorbar(pad=0.0, fraction=0.023 * im_ratio)\n cb.ax.tick_params(size=0)\n cb.outline.set_visible(False)\n cb.set_label(\n \"{} (seconds)\".format(self.clf_name), rotation=270, labelpad=10\n )\n plt.tight_layout()\n plt.gca().set_aspect(aspect_ratio)\n canvas = FigureCanvas(fig)\n canvas.draw()\n mat = np.array(canvas.renderer._renderer)\n image = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)\n image = cv2.resize(image, (self.width, self.height))\n image = np.uint8(image)\n plt.close()\n\n if self.video_setting:\n self.writer.write(image)\n if self.frame_setting:\n frame_save_path = os.path.join(\n self.save_video_folder, str(frm_cnt) + \".png\"\n )\n cv2.imwrite(frame_save_path, image)\n print(\n \"Created heatmap frame: {} / {}. Video: {} ({}/{})\".format(\n str(frm_cnt + 1),\n str(len(self.data_df)),\n self.video_name,\n str(file_cnt + 1),\n len(self.files_found),\n )\n )\n\n if self.video_setting:\n self.writer.release()\n\n video_timer.stop_timer()\n print(\n \"Heatmap plot for video {} saved (elapsed time: {}s) ... \".format(\n self.video_name, video_timer.elapsed_time_str\n )\n )\n\n self.timer.stop_timer()\n stdout_success(\n msg=\"All heatmap visualizations created in project_folder/frames/output/heatmaps_classifier_locations directory\",\n elapsed_time=\"self.timer.elapsed_time_str\",\n )",
"def subplottPNG(self):\n os.chdir(self.mainDir)\n folder = os.listdir(u'.')\n folders = [f for f in folder if f[0] == 'S']\n\n for subject in folders:\n\n try: # go to the 'results' directory\n resultsDir = os.path.join(os.path.join(self.mainDir, subject),'results')\n os.chdir(resultsDir)\n\n # find all files with .png extension\n pngfiles = glob.glob('*.png')\n pngfiles.sort(key = lambda x:x[0])\n pngfiles.sort(key = lambda x:x[1])\n\n fig = plt.figure()\n\n for ii, filename in enumerate(pngfiles):\n f = plt.subplot(4,4,ii+1)\n f.set_axis_off()\n f.set_xlabel('ses:'+str(ii+1))# f.set_figheight(15)\n fig.set_figwidth(30)\n fig.set_figheight(30)\n fig.tight_layout()\n img = matplotlib.image.imread(filename)\n plt.imshow(img)\n\n figname = subject + '_subplot'+ '.png'\n matplotlib.pyplot.savefig(figname)\n\n except Exception as errMessage:\n print(errMessage)"
]
| [
"0.6731108",
"0.65034777",
"0.6334065",
"0.6266662",
"0.6246572",
"0.6235564",
"0.62117976",
"0.61683494",
"0.6158867",
"0.6102118",
"0.6092265",
"0.6078301",
"0.606834",
"0.5971214",
"0.5945981",
"0.59456533",
"0.5926945",
"0.5920736",
"0.5918344",
"0.58989173",
"0.5894912",
"0.5870374",
"0.5835195",
"0.5828622",
"0.5825802",
"0.5807936",
"0.58065224",
"0.5796297",
"0.5749138",
"0.5749013"
]
| 0.74448717 | 0 |
Create `nb_existing` + `nb_new` lines in the transmittal. | def create_lines(self, nb_existing=1, nb_new=1, **kwargs):
doc = DocumentFactory(
metadata_factory_class=ContractorDeliverableFactory,
revision_factory_class=ContractorDeliverableRevisionFactory,
category=self.category)
rev = doc.get_latest_revision()
metadata = doc.metadata
arguments = {
'transmittal': self.transmittal,
'document': doc,
'document_key': doc.document_key,
'title': doc.title,
'is_new_revision': False,
'category': self.category,
'revision': rev.revision,
}
arguments.update(kwargs)
# Existing revisions
for i in range(nb_existing):
rev = ContractorDeliverableRevisionFactory(
metadata=metadata)
arguments.update({'revision': rev.revision})
TrsRevisionFactory(**arguments)
metadata.latest_revision = rev
metadata.save()
arguments.update({'is_new_revision': True})
# New revisions
for i in range(nb_new):
arguments.update({'revision': rev.revision + i + 1})
TrsRevisionFactory(**arguments)
return doc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def newLine(self) :\n if not self.hpgl2 :\n dic = self.pages.get(self.pagecount, None)\n if dic is None :\n self.setPageDict(\"linescount\", 1) \n dic = self.pages.get(self.pagecount)\n nblines = dic[\"linescount\"] \n self.setPageDict(\"linescount\", nblines + 1) \n if (self.linesperpage is not None) \\\n and (dic[\"linescount\"] > self.linesperpage) :\n self.pagecount += 1",
"def _dig_line_count_changed(self, text):\n self._setup_table_digital()",
"def test_line_counts(self):\n diff = (\n b'+ This is some line before the change\\n'\n b'- And another line\\n'\n b'Index: foo\\n'\n b'- One last.\\n'\n b'--- README 123\\n'\n b'+++ README (new)\\n'\n b'@@ -1,1 +1,1 @@\\n'\n b'-blah blah\\n'\n b'-blah\\n'\n b'+blah!\\n'\n b'-blah...\\n'\n b'+blah?\\n'\n b'-blah!\\n'\n b'+blah?!\\n')\n files = DiffParser(diff).parse()\n\n self.assertEqual(len(files), 1)\n self.assertEqual(files[0].insert_count, 3)\n self.assertEqual(files[0].delete_count, 4)",
"def send_new_images(self, n_new):\r\n if n_new == 1:\r\n if self.next_data_has_pump:\r\n #self.pump_probe_data -= self.background\r\n self.new_pump_probe.emit(self.wavelen_arr,\r\n self.pump_probe_data - self.background)\r\n self.next_data_has_pump = False\r\n else:\r\n self.new_probe_only.emit(self.wavelen_arr,\r\n self.probe_only_data - self.background)\r\n self.next_data_has_pump = True\r\n else: # n_new == 2\r\n self.new_probe_only.emit(self.wavelen_arr,\r\n self.probe_only_data - self.background)\r\n self.new_pump_probe.emit(self.wavelen_arr,\r\n self.pump_probe_data - self.background)",
"def add_to_memory(count):\n\t\tSimulation.past_attendance.append(count)\n\t\tPerson.add_to_memory(count)\n\t\tSimulation.simulation_progression.write(str(Person.recent_memory))\n\t\tSimulation.simulation_progression.write(\"\\n\")",
"def appendsize(self, numents):\n pass",
"def nblines(self, val):\n data = val & self.NBLINES_MASK\n self._ftdi.spi_write(self.NBLINES_ADDR, [data], burst='fixed')",
"def cb_line_numbers(data, item, window):\n bar_height = weechat.window_get_integer(window, \"win_chat_height\")\n content = \"\"\n for i in range(1, bar_height + 1):\n content += \"%s \\n\" % i\n return content",
"def set_numcells(self, N):\n\t\tself.create_cells(N)\n\t\tself.connect_cells()\n\t\tself.connect_stim()",
"def NewItems(self) -> _n_1_t_7:",
"def no_of_lines():\n return render_template(\"no_of_lines.html\", no_of_lines=no_of_lines())",
"def test_generate_nb(self):\n pass",
"def fullIntroduceNew (self, networkSize, numNodes, time) :\n\t\tnodeList = self.createNodes(networkSize)\n\t\tself.addAppRecordDiff(nodeList)\n\t\tsessionInfo = self.sessionsFull(nodeList)\n\t\ttotal = 0\n\t\twhile self.endConditionData(nodeList) :\n\t\t\tif total == time :\n\t\t\t\tfor i in range(networkSize, networkSize + numNodes):\n\t\t\t\t\tnode = Node(str(i))\n\t\t\t\t\tnodeList.append(node)\n\t\t\t\t\tnode.addAppData(\"record\"+str(i),\"data\" + str(i), Node.ALL, Node.ALL )\n \t\t\tnode.serialize((Node.ALL, Node.ALL))\n\t\t\t\t\tsessionInfo = self.sessionsFull(nodeList)\n\t\t\tindex = random.randint(0, len(sessionInfo)-1)\n\t\t\tclient = sessionInfo[index][0]\n\t\t\tserver = sessionInfo[index][1]\n\t\t\tself.fullDBReplication(nodeList[client], sessionInfo[index][2])\n\t\t\ttotal = total + 1\n\t\treturn total",
"def count():\r\n c = eNine.get()\r\n eNine.delete(0, END)\r\n count = int(c)\r\n count += 1\r\n eNine.insert(0, count)",
"def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)",
"def set_numcells(self, N = []):\n self.set_gids(N)\n self.create_cells()\n\n #self.syn_output() # generate synaptic \"output\" in neuron\n #self.connect_cells()",
"def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines",
"def _mint_new_ott_ids(self, how_many=1):\n first_minted_id = self._next_ott_id\n self._next_ott_id = first_minted_id + how_many\n content = u'{\"next_ott_id\": %d}\\n' % self._next_ott_id\n # The content is JSON, but we hand-rolled the string above\n # so that we can use it as a commit_msg\n self._write_master_branch_resource(content,\n self._id_minting_file,\n commit_msg=content,\n is_json=False)\n last_minted_id = self._next_ott_id - 1\n return first_minted_id, last_minted_id",
"def add_count(self):\n self.count += 1",
"def newtail(f, n, offset=0):\n for i, line in enumerate(f):\n print(\"newtail stats\", i, n, line, )\n if i == n:\n return line",
"def insert_new_lines(compiled, process): # pylint: disable=too-many-locals\n codes = {}\n codes_offsets = {}\n codes_lines = {}\n\n original_line_offsets(compiled, codes, codes_offsets, codes_lines)\n\n for code, inst in code_dis_sorted_line(compiled, recurse=True):\n lines = codes_lines[id(code)]\n offsets = codes_offsets[id(code)]\n process(inst, code, offsets, lines)\n\n # Create fake line numbers: lines should be unique\n new_codes_lines = {}\n size = max(Counter(lines).most_common(1)[0][1]\n for lines in codes_lines.values())\n\n for code_id, lines in codes_lines.items():\n new_lines = []\n last = -1\n for line in lines:\n if line != last:\n offset = 0\n new_lines.append(line * size + offset)\n offset += 1\n last = line\n new_codes_lines[code_id] = new_lines\n\n return size, recreate_code(compiled, codes_offsets, new_codes_lines)",
"def test_generate_nb_testing(self):\n pass",
"def balance_check_existing_oldBF_new_view(request):\n # Check connected\n if not Utils.has_permission(request, request.registry.settings['affaire_numero_edition']):\n raise exc.HTTPForbidden()\n \n affaire_id = request.params['affaire_id']\n oldBF = json.loads(request.params['oldBF'])\n\n # Control existance of each oldBF\n numero_obj = []\n for bf in oldBF:\n bf_cadastre_id, bf_numero = bf.split(\"_\")\n numero = request.dbsession.query(Numero).filter(and_(\n Numero.cadastre_id == bf_cadastre_id,\n Numero.numero == bf_numero\n )).first()\n\n # Create number if it doesn't exist\n if not numero:\n numero = Numero(\n cadastre_id = bf_cadastre_id,\n type_id = request.registry.settings['numero_bf_id'],\n numero = bf_numero,\n etat_id = request.registry.settings['numero_vigueur_id']\n )\n \n request.dbsession.flush()\n\n # Add numero to array of Numeros created\n numero_obj.append(numero)\n \n # Add numero_affaire link\n affNum = AffaireNumero()\n affNum(\n affaire_id = affaire_id,\n numero_id = numero.id,\n type_id = request.registry.settings['numero_bf_id'],\n actif = True,\n )\n\n # Add numero_etat_histo link\n numEtatHisto = NumeroEtatHisto()\n numEtatHisto(\n numero_id = numero.id,\n numero_etat_id = request.registry.settings['numero_vigueur_id'],\n date = datetime.now().date()\n )\n\n return Utils.serialize_many(numero_obj)",
"def insertnln(n=1):\r\n\tidx = 0\r\n\twhile idx < n:\r\n\t\tCONSOLE.insertln()\r\n\t\tidx = idx + 1",
"def _add_NR(self, w2, row):\n row['NR'] = None\n return True",
"def new(self):\n self.last_artistid += 1\n self.add_artist_line(self.last_artistid)\n vbar = self.scrl.verticalScrollBar()\n vbar.setMaximum(vbar.maximum() + 34)\n vbar.setValue(vbar.maximum())",
"def insert_new_label(self, label, index, nvals):\n if label in self.labels: return\n self.labels.append(label)\n self.parents.append(self.find_parent_label(label))\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0",
"def add_newlines(self: logging.Logger, num_newlines=1) -> None:\n self.removeHandler(self.base_handler)\n self.addHandler(self.newline_handler)\n\n # Main code comes here\n for _ in range(num_newlines):\n self.info('')\n\n self.removeHandler(self.newline_handler)\n self.addHandler(self.base_handler)",
"def _on_new_batch(self, data):\n data[self.pid_cols] = self.pid.digitize(data[self.pid_cols])\n #set counts back to 0\n for label in self.labels:\n self.lab_counts[label] = 0 \n for col in self.cat_cols:\n for label in self.labels:\n for val in self.categories[col]:\n self.cat_counts[col][label][val] = 0\n \n #add each row to the counts\n for index, row in data.iterrows():\n label = row[self.target_col_name]\n self.lab_counts[label] += 1\n \n for col in self.cat_cols:\n #skip nans\n if self.isnan(row[col]):\n continue\n val = row[col]\n self.cat_counts[col][label][val] += 1\n \n self._calculate_probs_and_entropies()",
"def save_fibers(oldhdr, oldfib, fname, indices):\n hdrnew = oldhdr.copy()\n outstreams = []\n for i in indices:\n outstreams.append(oldfib[i])\n n_fib_out = len(outstreams)\n hdrnew[\"n_count\"] = n_fib_out\n iflogger.info(\"Writing final non-orphan fibers as %s\", fname)\n nb.trackvis.write(fname, outstreams, hdrnew)\n return n_fib_out"
]
| [
"0.5686097",
"0.53973794",
"0.5373536",
"0.52844876",
"0.5228212",
"0.51969445",
"0.5139273",
"0.5136653",
"0.51357573",
"0.51017237",
"0.50866765",
"0.5070022",
"0.5069148",
"0.50635684",
"0.50523114",
"0.500983",
"0.49911618",
"0.49664697",
"0.4954358",
"0.4953977",
"0.4944995",
"0.49270537",
"0.49208948",
"0.4918291",
"0.4914896",
"0.49138218",
"0.49067765",
"0.49014235",
"0.49010357",
"0.49003887"
]
| 0.55814284 | 1 |
Non contractor cannot ack receipt of transmittals. | def test_non_contractor_acks_receipt(self):
res = self.client.post(self.url)
self.assertEqual(res.status_code, 403) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _send_ack(self):\n ack_packet = packet.Packet.from_data(\n 0,\n self.dest_addr,\n self.own_addr,\n ack=self._next_expected_seqnum\n )\n self._schedule_send_out_of_order(ack_packet)",
"def ack_required(self):\n v = self[22]\n v = v >> 1\n return (v & 0b1) != 0",
"def no_ack(self):\n\n return self._block.tx_policies[self._lvap.addr].no_ack",
"def on_ack(self, data):\n logger.debug('on_ack: %r', data)\n raise NotAccepted('Server should not be ACKed.')",
"def _check_acknowledgement(self, response):\n\n if response == self.NAK + self.CR + self.LF:\n message = 'Serial communication returned negative acknowledge (NAK). ' \\\n 'Check AGC100 documentation for more details.'\n raise IOError(message)\n\n elif response != self.ACK + self.CR + self.LF:\n message = 'Serial communication returned unknown response:\\n{}' \\\n ''.format(repr(response))\n raise AssertionError(message)",
"def no_ack(self, no_ack):\n\n self._block.tx_policies[self._lvap.addr].no_ack = no_ack",
"def test_transaction_is_affordable_there_is_no_wealth(self):\n currency_endowment = {\"FET\": 0}\n good_endowment = {\"good_id\": 0}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": 0},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 0},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n\n assert not self.ownership_state.is_affordable_transaction(\n tx_message=tx_message\n ), \"We must reject the transaction.\"",
"def __is_ack(self, ack) -> bool:\n return ack == ['void']",
"def acknowledged(self):\n ...",
"def tests_transaction_is_affordable_else_statement(self):\n currency_endowment = {\"FET\": 0}\n good_endowment = {\"good_id\": 0}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": 10},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 50},\n info={\"some_info_key\": \"some_info_value\"},\n ledger_id=\"fetchai\",\n tx_nonce=\"transaction nonce\",\n )\n\n assert not self.ownership_state.is_affordable_transaction(\n tx_message=tx_message\n ), \"We must reject the transaction.\"",
"def test_decision_maker_tx_message_is_not_acceptable_for_settlement(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=self.tx_id,\n tx_sender_addr=self.tx_sender_addr,\n tx_counterparty_addr=self.tx_counterparty_addr,\n tx_amount_by_currency_id={\"FET\": -2},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=self.ledger_id,\n info=self.info,\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.decision_maker, \"_is_acceptable_for_settlement\", return_value=True\n ):\n with mock.patch.object(\n self.decision_maker, \"_settle_tx\", return_value=None\n ):\n self.decision_maker.handle(tx_message)\n assert not self.decision_maker.message_out_queue.empty()",
"def consume_ack(self, event):\n pass",
"async def test_prevent_out_of_order_txs(self):\n\n tx1 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10)\n dtx1 = decode_transaction(tx1)\n stx1 = sign_transaction(tx1, FAUCET_PRIVATE_KEY)\n tx2 = await self.get_tx_skel(FAUCET_PRIVATE_KEY, TEST_ADDRESS, 10 ** 10, dtx1.nonce + 1)\n stx2 = sign_transaction(tx2, FAUCET_PRIVATE_KEY)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 400, resp.body)\n\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx1})\n self.assertEqual(resp.code, 200, resp.body)\n resp = await self.fetch(\"/tx\", method=\"POST\", body={\"tx\": stx2})\n self.assertEqual(resp.code, 200, resp.body)",
"def test_reject_agreement(self):\n pass",
"def test_solicitation_no_reply_resend(self):\n waittime = self.autoconflayer._solicitation_timeout * 4.0\n self.autoconflayer.start_process()\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure the forwarder solicitation was sent more than once\n solictiation = Interest(Name('/autoconfig/forwarders'))\n solictiation_count = len([1 for data in tolower if data == [bcfid, solictiation]])\n self.assertGreater(solictiation_count, 1)",
"def ack_transmit(self, data, max_freq_retry=5, max_tx_retry=10):\n\t\ttransmit_success = False\n\t\tfor freq_retry in range(max_freq_retry):\n\t\t\tfor tx_retry in range(max_tx_retry):\n\t\t\t\tself.tb.transmit(data)\n\t\t\t\ttime.sleep(0.15)\n\t\t\t\tif self.ack_processor.get_last_ack() != self.seqnum:\n\t\t\t\t\tself.log(\"Warning: no ACK received, retrying\", hue.bad)\n\t\t\t\telse:\n\t\t\t\t\tself.log(\"ACK received\", hue.good)\n\t\t\t\t\ttransmit_success = True\n\t\t\t\t\treturn True\n\t\t\tself.log(\"Warning: switching frequency\", hue.bad)\n\t\t\tself.tb.frequency_switch()\n\t\treturn False",
"def ack(self):\n return (self.status == self.STATUS_ACK)",
"def would_retransmit(self):\n return not self.my_pending_requests.is_empty()",
"def can_escalate(self, depended=False):\n if not self.tt_system or not self.tt_system_id:\n return False\n return self.can_notify(depended)",
"def on_enq(self, data):\n logger.debug('on_enq: %r', data)\n if not self.in_transfer_state:\n self.in_transfer_state = True\n return ACK\n else:\n logger.error('ENQ is not expected')\n return NAK",
"def test_acknowledge_orders(self):\n pass",
"def test_ticket_not_consumed(self):\n st = ServiceTicketFactory()\n self.assertFalse(st.is_consumed())",
"def test_process_packet_ack(self):\n pkt = {'type': 'ack',\n 'ackId': 140,\n 'endpoint': '',\n 'args': []}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called",
"def check_for_no_ban_on_rejected_tx(self, tx, reject_reason):\n self.nodes[0].p2p.send_txs_and_test(\n [tx], self.nodes[0], success=False, reject_reason=reject_reason)",
"def test_transaction_is_not_affordable(self):\n tx_message = TransactionMessage(\n performative=TransactionMessage.Performative.PROPOSE_FOR_SETTLEMENT,\n skill_callback_ids=[PublicId(\"author\", \"a_skill\", \"0.1.0\")],\n tx_id=\"transaction0\",\n tx_sender_addr=\"agent_1\",\n tx_counterparty_addr=\"pk\",\n tx_amount_by_currency_id={\"FET\": -20},\n tx_sender_fee=0,\n tx_counterparty_fee=0,\n tx_quantities_by_good_id={\"good_id\": 10},\n ledger_id=\"off_chain\",\n info={\"some_info_key\": \"some_info_value\"},\n tx_nonce=\"Transaction nonce\",\n )\n\n with mock.patch.object(\n self.ledger_state_proxy.ledger_apis, \"token_balance\", return_value=0\n ):\n result = self.ledger_state_proxy.is_affordable_transaction(\n tx_message=tx_message\n )\n assert not result",
"def test_ecomm_cancel_package_failure():\n data = {'waybill': global_awb_number}\n cancel_package = CancelShipment(TEST_CREDS)\n response = cancel_package.send_request(data)\n assert response['success'] == True",
"def test_ComputerPartition_error_AccountingResource_DeliveredState(self):\n sequence_list = SequenceList()\n sequence_string = self.prepare_computer_partition_accounting_resource_sequence_string + '\\\n LoginDefaultUser \\\n StartSalePackingList \\\n CleanTic \\\n StopSalePackingList \\\n CleanTic \\\n DeliverSalePackingList \\\n Tic \\\n Logout \\\n SlapLoginCurrentComputer \\\n CheckSoftwareReleaseErrorCall \\\n Tic \\\n SlapLogout \\\n LoginDefaultUser \\\n CheckSalePackingListNoErrorText \\\n CheckDeliveredSalePackingList \\\n SelectCurrentlyUsedSalePackingListUid \\\n CheckSalePackingListErrorText \\\n Logout \\\n LoginERP5TypeTestCase \\\n CheckSiteConsistency \\\n Logout \\\n '\n sequence_list.addSequenceString(sequence_string)\n sequence_list.play(self)",
"def _transmit_without_retry(self, envelopes):\n # Contains logic from transport._transmit\n # TODO: Remove this function from exporter and consolidate with\n # transport._transmit to cover all exporter use cases. Uses cases\n # pertain to properly handling failures and implementing a retry\n # policy for this exporter.\n # TODO: implement retry policy\n try:\n response = requests.post(\n url=self.options.endpoint,\n data=json.dumps(envelopes),\n headers={\n 'Accept': 'application/json',\n 'Content-Type': 'application/json; charset=utf-8',\n },\n timeout=self.options.timeout,\n )\n except Exception as ex:\n # No retry policy, log output\n logger.warning('Transient client side error %s.', ex)\n return\n\n text = 'N/A'\n data = None\n # Handle the possible results from the response\n if response is None:\n logger.warning('Error: cannot read response.')\n return\n try:\n status_code = response.status_code\n except Exception as ex:\n logger.warning('Error while reading response status code %s.', ex)\n return\n try:\n text = response.text\n except Exception as ex:\n logger.warning('Error while reading response body %s.', ex)\n return\n try:\n data = json.loads(text)\n except Exception as ex:\n logger.warning('Error while loading ' +\n 'json from response body %s.', ex)\n return\n if status_code == 200:\n logger.info('Transmission succeeded: %s.', text)\n return\n # Check for retryable partial content\n if status_code == 206:\n if data:\n try:\n retryable_envelopes = []\n for error in data['errors']:\n if error['statusCode'] in (\n 429, # Too Many Requests\n 500, # Internal Server Error\n 503, # Service Unavailable\n ):\n retryable_envelopes.append(\n envelopes[error['index']])\n else:\n logger.error(\n 'Data drop %s: %s %s.',\n error['statusCode'],\n error['message'],\n envelopes[error['index']],\n )\n # show the envelopes that can be retried manually for\n # visibility\n if retryable_envelopes:\n logger.warning(\n 'Error while processing data. Data dropped. ' +\n 'Consider manually retrying for envelopes: %s.',\n retryable_envelopes\n )\n return\n except Exception:\n logger.exception(\n 'Error while processing %s: %s.',\n status_code,\n text\n )\n return\n # Check for non-retryable result\n if status_code in (\n 206, # Partial Content\n 429, # Too Many Requests\n 500, # Internal Server Error\n 503, # Service Unavailable\n ):\n # server side error (retryable)\n logger.warning(\n 'Transient server side error %s: %s. ' +\n 'Consider manually trying.',\n status_code,\n text,\n )\n else:\n # server side error (non-retryable)\n logger.error(\n 'Non-retryable server side error %s: %s.',\n status_code,\n text,\n )",
"def no_more_acks() -> bool:\n return not any(not op.is_set() for op in self._pending_operations.values())",
"def test_not_accept(mocker, client, application, decision, should_send_email):\n order = create_test_order(application, 123, fulfilled=False)\n\n data = {\"req_reference_number\": make_reference_id(order), \"decision\": decision}\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=True\n )\n send_email = mocker.patch(\"ecommerce.api.MailgunClient.send_individual_email\")\n resp = client.post(reverse(\"order-fulfillment\"), data=data)\n assert resp.status_code == statuses.HTTP_200_OK\n assert len(resp.content) == 0\n order.refresh_from_db()\n assert Order.objects.count() == 1\n assert order.status == Order.FAILED\n\n if should_send_email:\n assert send_email.call_count == 1\n assert send_email.call_args[0] == (\n \"Order fulfillment failed, decision={decision}\".format(\n decision=\"something else\"\n ),\n \"Order fulfillment failed for order {order}\".format(order=order),\n \"[email protected]\",\n )\n else:\n assert send_email.call_count == 0"
]
| [
"0.6460112",
"0.64305913",
"0.64181185",
"0.6250114",
"0.6188823",
"0.608136",
"0.59823006",
"0.5949967",
"0.5943985",
"0.5902159",
"0.5897936",
"0.58878154",
"0.5869167",
"0.5841636",
"0.58208567",
"0.5818153",
"0.5790359",
"0.5729279",
"0.5706158",
"0.56860346",
"0.5656956",
"0.561294",
"0.56128997",
"0.5599173",
"0.5576129",
"0.5567944",
"0.555802",
"0.5528098",
"0.55260676",
"0.55188847"
]
| 0.69539994 | 0 |
Make a prediction with the given input. The prediction process consists of the input tensor transferring to the model device, forward pass of the nn_module in eval mode and application of the prediction_transform to the raw prediction output. | def predict(self, input):
self._check_predict_ready()
with torch.no_grad():
self.eval()
input = deep_to(input, self.device)
prediction = self.nn_module(input)
prediction = self.prediction_transform(prediction)
return prediction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, input):\n\n with torch.no_grad():\n input_tensor = self.preprocess_input(input)\n p_labels, p_probs = self.predict_on_batch(input_tensor)\n predictions = np.stack((p_labels, p_probs), axis=1)\n\n return predictions",
"def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)",
"def predict(cls, input):\n clf = cls.get_model()\n return clf.predict(input)",
"def predict(self, input_sequence):\n return self.session.run(self.prediction, feed_dict={self.input_placeholder: input_sequence})",
"def predict(self, input):\n input = input.reshape((input.shape[0], 1))\n return self.feedforward(input)",
"def predict_step(self, x):\n\n input_x = self.session.graph.get_operation_by_name(\"input_x\").outputs[0]\n predictions_op = self.session.graph.get_operation_by_name(\"output/predictions\").outputs[0] \n\n d_ = {\n input_x: x\n }\n\n self.init_dataset(d_)\n\n return self.session.run([predictions_op])",
"def predict(self, input_):\n return torch.cat(self.forward(input_), dim=-1)",
"def predict(self, x):\n if self.training:\n self.eval()\n\n with torch.no_grad():\n x = self.forward(x)\n if self.activation:\n x = self.activation(x)\n\n return x",
"def predict(self, x):\n if self.training:\n self.eval()\n\n with torch.no_grad():\n x = self.forward(x)\n if self.activation:\n x = self.activation(x)\n\n return x",
"def make_prediction(*, input_image):\n \n pipeline = Pipeline(model)\n resized_image = pipeline.resize_image(input_image)\n prediction = argmax(pipeline.make_prediction(resized_image))\n \n return prediction",
"def target_predict(self, inp):\n return self.target_model.predict(inp)",
"def predict_fn(input_object, model):\n if torch.cuda.is_available():\n input_object = input_object.cuda()\n input_object = torch.unsqueeze(input_object, 0)\n\n with torch.no_grad():\n prediction = model(input_object)\n return prediction",
"def predict(self, src):\n\n src = torch.as_tensor(src).float()\n\n self.eval()\n\n return self.forward(src)",
"def predict(self, source_input):\n self.next_source = source_input\n translation = self.estimator.predict(\n input_fn=self.input_fn(self._create_generator))\n return translation",
"def predict_sync(\n input: predictor.Input = Body(..., example=predictor.factory.mock_input()),\n ):\n return predictor.run(input)",
"def make_prediction(*, input_data) -> dict:\n\n data = pd.read_json(input_data)\n validated_data = validation.validate_inputs(input_data=data)\n prediction = _model_pipeline.predict(validated_data[configs.FEATURES])\n validated_data['target'] = prediction\n \n # model monitoring\n build_reports(validated_data)\n\n output = np.exp(prediction)\n response = {\"predictions\": output}\n\n return response",
"def predict(self, x: torch.Tensor) -> torch.Tensor:\n # Set model to evaluation mode to deactivate dropouts\n self.eval()\n # Forward pass in the network\n return self.forward(x)",
"def predict_on_batch(self, x):\n # TODO: Understand how pytorch models could return multiple outputs\n import torch\n from torch.autograd import Variable\n\n if isinstance(x, np.ndarray):\n # convert to a pytorch tensor and then to a pytorch variable\n input = self._torch_var(torch.from_numpy(self.correct_neg_stride(x)))\n pred = self.model(input)\n\n elif isinstance(x, dict):\n # convert all entries in the dict to pytorch variables\n input_dict = {k: self._torch_var(torch.from_numpy(self.correct_neg_stride(x[k]))) for k in x}\n pred = self.model(**input_dict)\n\n elif isinstance(x, list):\n # convert all entries in the list to pytorch variables\n input_list = [self._torch_var(torch.from_numpy(self.correct_neg_stride(el))) for el in x]\n pred = self.model(*input_list)\n\n else:\n raise Exception(\"Input not supported!\")\n\n # convert results back to numpy arrays\n if isinstance(pred, Variable):\n pred_np = self._torch_var_to_numpy(pred)\n\n elif isinstance(pred, dict):\n pred_np = {k: self._torch_var_to_numpy(pred[k]) for k in pred}\n\n elif isinstance(pred, list) or isinstance(pred, tuple):\n pred_np = [self._torch_var_to_numpy(el) for el in pred]\n\n else:\n raise Exception(\"Model output format not supported!\")\n\n return pred_np",
"def predict(self, inputs):\n return self.model.predict(inputs)",
"def predict(self, X_pred):\n \n with tf.Session() as sess:\n self.saver.restore(sess, self.log_dir + '/model')\n\n y_pred = sess.run(self.output_class, feed_dict={self.X_tf: X_pred, self.keep_prob: 1.0})\n return y_pred",
"def compute_pred_network_output(self, model_idx, input_data):\n\n feed_dict = {}\n feed_dict[self.X_Minibatch] = input_data\n return self.session.run(self.pred_output[model_idx], feed_dict=feed_dict)",
"def predict(self, inputs):\n\n return self.model.predict(inputs)",
"def predict_on_batch(self, input_batch):\n from deeplift.util import run_function_in_batches\n from deeplift.util import compile_func\n x_standardized = self.model._batch_to_list(input_batch)\n if self.fwd_predict_fn is None:\n # TODO: Once DeepLIFT layer annotation works integrate it here too:\n \"\"\"\n # identify model output layers:\n self.output_layers_idxs = []\n for output_name in self.model.model.output_names:\n for i, l in enumerate(self.model.model.layers):\n if l.name == output_name:\n self.output_layers_idxs.append(i)\n \"\"\"\n inputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.input_layer_idxs]\n outputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.output_layers_idxs]\n self.fwd_predict_fn = compile_func(inputs, outputs)\n\n preds = run_function_in_batches(\n input_data_list=x_standardized,\n func=self.fwd_predict_fn,\n batch_size=self.batch_size,\n progress_update=None)\n\n preds = np.array(preds)\n if len(self.output_layers_idxs) == 1:\n preds = preds[0, ...]\n\n return preds",
"def predict(self, x):\n return self.model.predict(x, batch_size=1, verbose=0)",
"def prediction(self, x):\n if len(x.shape)==1:\n x = np.reshape(x, (1, x.shape[0]))\n predict = self.model.predict(x)\n return predict",
"def prediction(self, x):\n t = self.model.predict(x)\n return t",
"def _make_predict(self):\n with context.context(training=False):\n prediction = self(*self.inputs)\n return theano.function(self.inputs, prediction)",
"def predict(self, x):\n self.eval()\n self.forward(x)\n predicted = np.argmax(self.inputs[-1], axis=1)\n return predicted",
"def prediction(self, x):\n if len(x.shape)==1:\n x = np.reshape(x, (1, x.shape[0]))\n predict = self.model.predict(x)\n\n return predict",
"def prediction(self, x):\n t = self.model.predict(x.reshape(1, -1))\n return t"
]
| [
"0.72369206",
"0.70280635",
"0.69705504",
"0.6882179",
"0.6841684",
"0.6813821",
"0.678869",
"0.65115666",
"0.65115666",
"0.65031374",
"0.6496032",
"0.6488619",
"0.6483028",
"0.6481794",
"0.6461958",
"0.64542216",
"0.64486",
"0.63823557",
"0.6380704",
"0.63725114",
"0.6371604",
"0.63679355",
"0.63411",
"0.6338089",
"0.62855023",
"0.6275981",
"0.6264944",
"0.6259316",
"0.624919",
"0.6224582"
]
| 0.8179348 | 0 |
Set the nn_module into train mode. | def train(self, mode: bool = True):
if self.nn_module.training != mode:
self.nn_module.train(mode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_mode_train(self):\n self._set_mode('train')\n return self",
"def train(self):\n self.mode = \"train\"\n self.online_net.train()",
"def train(self):\n self.mode = \"train\"\n self.online_net.train()",
"def set_train_mode(training, mnet, hnet, hhnet, dis):\n for net in [mnet, hnet, hhnet, dis]:\n if net is not None:\n if training:\n net.train()\n else:\n net.eval()",
"def set_train(self):\n self.model.train()",
"def train(self):\n self.training = True",
"def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()",
"def train(self, mode=True, mc_dropout=False):\n self.training = mode\n for module_name, module in self.named_modules():\n module.training = mode\n if mc_dropout and not mode:\n if isinstance(module, nn.Dropout2d):\n # print(\"WARNING - nn.Module.train - {}\".format(module_name))\n module.training = True\n\n return self",
"def set_train(self):\n self.train()\n self.volatile = False",
"def train(self, mode: bool = True) -> None:\n super().train(mode=mode)\n if mode:\n self.mean_module = None\n self.covar_module = None\n self.likelihood = None\n self.task_covar_module = None",
"def train(self, mode=True):\n nn.Module.train(self, mode)\n\n if mode:\n # Set all bn layers in backbone to eval mode\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find(\"BatchNorm\") != -1:\n m.eval()\n\n self.backbone.apply(set_bn_eval)",
"def set_module_trainable(module: nn.Module, mode: bool) -> None:\n for param in module.parameters():\n param.requires_grad = mode",
"def start_training(self):\n self.training = True",
"def _set_learning_phase(self, train: bool = False):\n if train:\n self.net_q.train()\n self.net_ps.train()\n self.net_k.train()\n else:\n self.net_q.eval()\n self.net_ps.eval()\n self.net_k.eval()",
"def set_train(self):\n self._phase = 'train'\n self.add_flags_recursive(training=True)\n return self",
"def model_switch_to_training(self):\n pass",
"def set_trainable(model, train):\r\n model.trainable = train\r\n for l in model.layers:\r\n l.trainable = train",
"def set_mode(self, mode):\n if mode == self._model_modes.INFERENCE:\n self.network.eval()\n else:\n self.network.train()\n return self",
"def training(self):\n self.training = True",
"def set_train(self):\n self.train()\n self.volatile = False\n self.scheduled_sampling = self.sample_prob != 0",
"def set_train(self):\n for m in self.models.values():\n m.train()",
"def set_mode(self, mode):\n if mode == 'train':\n self.net.train()\n elif mode == 'eval':\n self.net.eval()\n else:\n raise ValueError(\n \"Got invalid mode '{}'. Valid options are 'train' and 'eval'.\".format(mode))",
"def train(self, mode=True, freeze_bn=False):\n super(NetFeat, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False",
"def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)",
"def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)",
"def train(self, mode=True):\n super(SwinTransformer, self).train(mode)\n self._freeze_stages()",
"def train(self, mode: bool = True):\n T = super().train(mode=mode)\n if mode:\n self.graph_construction()\n return T",
"def set_trainable(model, toset):\n for layer in model.layers:\n layer.trainable = toset\n model.trainable = toset",
"def train(self, mode=True):\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n # print('the type train model : {}'.format(type(m)))\n if isinstance(m, torch.nn.BatchNorm2d) or \\\n isinstance(m, linklink.nn.syncbn_layer.SyncBatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # print('the freeze module: {} of {}th'.format(type(m), count))\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False",
"def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)"
]
| [
"0.8144728",
"0.7675277",
"0.7675277",
"0.7567097",
"0.75528985",
"0.74082536",
"0.7332675",
"0.7263821",
"0.7259717",
"0.72031873",
"0.7111367",
"0.70592844",
"0.7042016",
"0.7032377",
"0.7019192",
"0.6991392",
"0.69666255",
"0.6925678",
"0.6881183",
"0.681172",
"0.6764086",
"0.6661279",
"0.6614778",
"0.6605452",
"0.6605452",
"0.6495469",
"0.646692",
"0.64466393",
"0.6426861",
"0.642219"
]
| 0.8258922 | 0 |
Generate an empty configuration file with only a single empty Site policy | def create_empty_config_file():
config = {
"config": [
{
"site": {
"username": "",
"name": "",
"ip_address": "",
"password": "",
"local": "",
"use_https": ""
}
}
]
}
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_config_file_before(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\",\n },\n {\n \"contract_name\": \"contract-2\",\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file_after(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out1\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n },\n {\n \"l3out\": {\n \"name\": \"l3out2\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output",
"def create_export_policy():\n config = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return config",
"def generate_config(args):\n default_config = resource_string('webrpg', 'scripts/templates/default_config.txt').decode('utf-8')\n if args.sqla_connection_string:\n default_config = default_config.replace('%(sqlalchemy_url)s', args.sqla_connection_string)\n else:\n default_config = default_config.replace('%(sqlalchemy_url)s', get_user_parameter('SQL Alchemy Connection String', 'sqlite:///%(here)s/pyire_test.db'))\n\n with open(args.filename, 'w') as out_f:\n out_f.write(default_config)",
"def generate_config_template():\n lines = ['# Lines starting with # will be skipped.']\n lines.append('# Only one argument on each line.')\n lines.append('#-s This option is always assumed to be true.')\n lines.append('#-p')\n lines.append('#-m')\n lines.append('#-o')\n lines.append('#-c')\n lines.append('-l')\n lines.append('#-a')\n lines.append('#-d')\n\n with open('export_config.txt', 'wb') as f_new:\n f_new.write('\\r\\n'.join(lines))\n print 'Template generated. Edit this file as you please and call this script '\\\n 'with the -f option enabled.'",
"def generate_config_mixed_first(self):\n\n for model_name in self.profile_models[-1:]:\n del self.config['profile_models'][model_name]\n with open('config-mixed-first.yml', 'w+') as f:\n yaml.dump(self.config, f)",
"def create_site_config():\n config = {\n \"config\": [\n {\n \"site\": {\n \"username\": \"%s\" % SITE1_LOGIN,\n \"name\": \"Site1\",\n \"ip_address\": \"%s\" % SITE1_IPADDR,\n \"password\": \"%s\" % SITE1_PASSWORD,\n \"local\": \"True\",\n \"use_https\": \"False\"\n }\n },\n {\n \"site\": {\n \"username\": \"%s\" % SITE2_LOGIN,\n \"name\": \"Site2\",\n \"ip_address\": \"%s\" % SITE2_IPADDR,\n \"password\": \"%s\" % SITE2_PASSWORD,\n \"local\": \"False\",\n \"use_https\": \"False\"\n }\n }\n ]\n }\n return config",
"def create_settings_file():\n with open('./cfg/settings.cfg'.replace(\"/\", os.path.sep), 'w') as cfg:\n cfg.write('[report]\\nlogo = ./cfg/logo.png\\ncompany =\\nrecord =\\nunit =\\nexaminer =\\nnotes =\\n\\n[auth]\\ngmail = [email protected]\\npassw = yourpassword\\ndevid = 1234567887654321\\ncelnumbr = BackupPhoneNunmber\\n\\n[app]\\npkg = com.whatsapp\\nsig = 38a0f7d505fe18fec64fbf343ecaaaf310dbd799\\n\\n[client]\\npkg = com.google.android.gms\\nsig = 38918a453d07199354f8b19af05ec6562ced5788\\nver = 9877000'.replace(\"/\", os.path.sep))",
"def _createConfigFile(self):\n configFile = self._configFile()\n try:\n with open(configFile) as fh:\n pass\n except IOError:\n try:\n with open(configFile, 'w') as fh:\n fh.write(\"[settings]\\n\")\n fh.write(\"debug = false\\n\")\n fh.write(\"hidefilenames = false\\n\")\n except IOError:\n pass",
"def generateDefaultConfig(self):\n\n\t\t# Open config.ini in write mode\n\t\tf = open(self.fileName, \"w\")\n\n\t\t# Set keys to config object\n\t\tself.config.add_section(\"db\")\n\t\tself.config.set(\"db\", \"host\", \"localhost\")\n\t\tself.config.set(\"db\", \"username\", \"root\")\n\t\tself.config.set(\"db\", \"password\", \"\")\n\t\tself.config.set(\"db\", \"database\", \"ripple\")\n\t\tself.config.set(\"db\", \"pingtime\", \"600\")\n\n\t\tself.config.add_section(\"server\")\n\t\tself.config.set(\"server\", \"server\", \"tornado\")\n\t\tself.config.set(\"server\", \"host\", \"0.0.0.0\")\n\t\tself.config.set(\"server\", \"port\", \"5001\")\n\t\tself.config.set(\"server\", \"localizeusers\", \"1\")\n\t\tself.config.set(\"server\", \"outputpackets\", \"0\")\n\t\tself.config.set(\"server\", \"outputrequesttime\", \"0\")\n\t\tself.config.set(\"server\", \"timeoutlooptime\", \"100\")\n\t\tself.config.set(\"server\", \"timeouttime\", \"100\")\n\n\t\tself.config.add_section(\"flask\")\n\t\tself.config.set(\"flask\", \"threaded\", \"1\")\n\t\tself.config.set(\"flask\", \"debug\", \"0\")\n\t\tself.config.set(\"flask\", \"logger\", \"0\")\n\n\t\tself.config.add_section(\"ci\")\n\t\tself.config.set(\"ci\", \"key\", \"changeme\")\n\n\t\t# Write ini to file and close\n\t\tself.config.write(f)\n\t\tf.close()",
"def _write_default_config(self, force=False):\n\t\t\n\t\tif self.configfilepath is not None:\n\t\t\tlogger.debug(\"You use the existing config file %s, I don't have to write one.\" % \\\n (self._get_config_filepath()))\n\t\t\treturn\n\t\t\n\t\tif force or not os.path.exists(self._get_config_filepath()):\t\n\t\t\tp = subprocess.Popen([self.sexpath, \"-dd\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\tout, err = p.communicate()\n\t\t\tif err != \"\":\n\t\t\t\tlogger.warning(\"Ouch, SExtractor complains :\")\n\t\t\t\tlogger.warning(err)\n\t\t\tf = open(self._get_config_filepath(), 'w')\n\t\t\tf.write(out.decode(encoding='UTF-8'))\n\t\t\tf.close()\n\t\t\tlogger.debug(\"Wrote %s\" % (self._get_config_filepath()))\n\t\telse:\n\t\t\tlogger.debug(\"Default config file already exists, I don't overwrite it.\")",
"def create_diff_epg_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg2\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def write_default_config():\n # TODO: BROKEN!\n config_path = pathlib.Path(xdg.BaseDirectory.xdg_config_home) / \"awiesm_bc\"\n config_file = config_path / DEFAULT_CONFIG_FILENAME\n if not os.path.isdir(config_path):\n os.makedirs(config_path)\n\n if not os.path.isfile(config_file):\n # TODO: write file\n pass",
"def _CreateCfgFile():\n default_cfg = \"\"\"\nproject: \"fake_project\"\nzone: \"fake_zone\"\nstorage_bucket_name: \"fake_bucket\"\nclient_id: \"fake_client_id\"\nclient_secret: \"fake_client_secret\"\n\"\"\"\n return default_cfg",
"def create_default_config(self, parser):\n parser.add_section('irc')\n parser.set('irc', 'channels', '')\n \n # create the full path, and the file\n try:\n os.makedirs(self.config_dir_path, mode=0700)\n except OSError:\n pass\n file_resource = open(self.config_file_path, 'w')\n parser.write(file_resource)",
"def not_authorized(context):\n context.config_file = 'NON_EXISTENT_FILE.cfg'",
"def __build_empty_config(self):\n\n self.__config.add_section('IN_OUT')\n self.__config['IN_OUT']['source'] = 'Set Source Directory'\n self.__config['IN_OUT']['destination'] = 'Set Destination Directory'\n self.__save_config()\n\n self.__is_dirty = False\n self.__default = True",
"def _create_initial_configure_file():\n if not _are_components_configured():\n touch(INITIAL_CONFIGURE_FILE)",
"def create_license_policy_config_file(\n directory,\n name,\n description,\n allow_unknown_licenses,\n package_query_string,\n spdx_identifiers,\n on_violation_quarantine,\n):\n\n data = {\n \"name\": name,\n \"description\": description,\n \"spdx_identifiers\": list(spdx_identifiers),\n \"allow_unknown_licenses\": allow_unknown_licenses,\n \"package_query_string\": package_query_string,\n \"on_violation_quarantine\": on_violation_quarantine,\n }\n\n file_path = directory / \"LICENSE-POLICY-CONFIG.json\"\n file_path.write_text(str(json.dumps(data)))\n return file_path",
"def _update_site_configuration(self):\n self.site.configuration.site_values = {'THIRD_PARTY_AUTH_ONLY_DOMAIN': self.email_domain_name}\n self.site.configuration.save()",
"def test_basic_remove_policy(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(4)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg2'))\n\n config = self.create_site_config()\n self.write_config_file(config, args)\n collector.reload_config()\n\n time.sleep(4)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg2'))",
"def test_partly_default_configuration(self):\n self.write_configuration_file(\n '[spider]\\n'\n 'max_depth: 10\\n'\n 'crawl_interval: 2\\n'\n 'crawl_timeout: 10\\n'\n 'target_url: .*\\.(com|cn|net)$\\n'\n )\n configuration = mini_spider.parse_configuration(self.configuration_file_path)\n self.assertEqual(configuration.get('spider', 'url_list_file'), './urls')\n self.assertEqual(configuration.get('spider', 'output_directory'), './output')\n self.assertEqual(configuration.getint('spider', 'max_depth'), 10)\n self.assertEqual(configuration.getint('spider', 'crawl_interval'), 2)\n self.assertEqual(configuration.getint('spider', 'crawl_timeout'), 10)\n self.assertEqual(configuration.getint('spider', 'thread_count'), 8)\n self.assertEqual(configuration.get('spider', 'target_url'), '.*\\.(com|cn|net)$')",
"def check_create_settings_file(self):\n path = self.paths[\"settings_file\"]\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n settings_file = check_path_exists(path)\n if not settings_file:\n settings_file_contents = \"# '%s' settings\" % self.app_name\n from django_swagger_utils.core.utils.write_to_file import write_to_file\n write_to_file(settings_file_contents, path)",
"def test_no_config_keyword(self):\n args = self.get_args()\n config = {\n \"site\": {\n \"username\": \"\",\n \"name\": \"\",\n \"ip_address\": \"\",\n \"password\": \"\",\n \"local\": \"\",\n \"use_https\": \"\"\n }\n }\n temp = sys.stdout\n fake_out = FakeStdio()\n sys.stdout = fake_out\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n sys.stdout = temp\n self.assertTrue(fake_out.verify_output(['%% Invalid configuration file', '\\n']))",
"def __create_default_config(self):\n if not os.path.exists(self.__configfile):\n path=os.path.dirname(self.__configfile)\n try:\n os.makedirs(path)\n except:\n pass\n if os.path.exists(path):\n self.save(defaults=True)",
"def create_configfile():\n config = ConfigParser.ConfigParser()\n config.add_section('Common')\n config.set('Common', 'renewal days', 20)\n config.set('Common', 'delayed installation days', 5)\n config.set('Common', 'include chain', True)\n config.set('Common', 'account key', './config/key.pem')\n config.add_section('Load Balancer')\n config.set('Load Balancer', 'cluster', True)\n config.set('Load Balancer', 'Host 1', 'lb1.example.com')\n config.set('Load Balancer', 'Host 2', 'lb2.example.com')\n config.set('Load Balancer', 'username', 'admin')\n config.set('Load Balancer', 'password', 'password01')\n config.set('Load Balancer', 'datagroup', 'acme_responses_dg')\n config.set('Load Balancer', 'datagroup partition', 'Common')\n config.add_section('Certificate Authority')\n config.set('Certificate Authority', 'Directory URL',\n 'https://acme-v01.api.letsencrypt.org/directory')\n config.set('Certificate Authority', 'use proxy', False)\n config.set('Certificate Authority', 'proxy',\n 'http://proxy.example.com:8080')\n\n # As the config file contains password, we should be careful with permissions\n with os.fdopen(os.open(CONFIG_FILE, os.O_WRONLY | os.O_CREAT, 0o660), 'w') as config_file:\n config.write(config_file)",
"async def generate_default_config_file(self, server_id, owner_id):\n parser = configparser.ConfigParser()\n\n # Create each section that we need by default; future cogs\n # may need to handle writing code to modify the config to add sections\n\n parser.add_section('ServerSettings')\n parser.add_section('BotAdmins')\n parser.add_section('ConfigSettings')\n parser.add_section('RoleAssignment')\n parser.add_section('JoinPart')\n parser.add_section('BettingGame')\n parser.add_section('ApiCommands')\n\n parser.set('ServerSettings', 'owner_id', 'NOT_SET')\n parser.set('ServerSettings', 'server_id', 'NOT_SET')\n\n parser.set('BotAdmins', 'bot_admin_users', 'NOT_SET')\n parser.set('BotAdmins', 'bot_admin_roles', 'NOT_SET')\n\n parser.set('ConfigSettings', 'not_accepted_channel_id', 'NOT_SET')\n\n parser.set('RoleAssignment', 'enabled', 'false')\n parser.set('RoleAssignment', 'role_list', 'NOT_SET')\n parser.set('RoleAssignment', 'assignment_channel_id', 'NOT_SET')\n\n parser.set('JoinPart', 'member_join_enabled', 'false')\n parser.set('JoinPart', 'member_part_enabled', 'false')\n parser.set('JoinPart', 'welcome_channel_id', 'NOT_SET')\n parser.set('JoinPart', 'leave_channel_id', 'false')\n parser.set('JoinPart', 'welcome_message', 'Welcome to {server}\\'s Discord, {user}! Relax and have some fun!')\n parser.set('JoinPart', 'part_message', '{name} ({display_name}) has left the server.')\n parser.set('JoinPart', 'assign_role_enabled', 'false')\n parser.ser('JoinPart', 'role_assignment_id', 'NOT_SET')\n\n parser.set('BettingGame', 'minimum_bet', '10')\n parser.set('BettingGame', 'enabled', 'false')\n parser.set('BettingGame', 'bet_channel_id', 'NOT_SET')\n parser.set('BettingGame', 'helpme_cooldown', '86400')\n parser.set('BettingGame', 'helpme_minimum', '500')\n parser.set('BettingGame', 'force_multiple', '100')\n parser.set('BettingGame', 'helpme_start_min', '500')\n parser.set('BettingGame', 'helpme_bonus', '100')\n\n parser.set('ApiCommands', 'enabled', 'false')\n parser.set('ApiCommands', 'api_channel_id', 'NOT_SET')\n\n\n with open(\n '%s.ini' % (\n os.path.join(\n self.server_settings_path,\n str(server_id))), 'w'\n ) as configfile:\n parser.write(configfile)\n return await self.bot.say(\n \"Configuration file generated. You will need to \" \\\n \"configure the file to your desired settings.\"\n )"
]
| [
"0.69123816",
"0.6818789",
"0.6747462",
"0.66409326",
"0.59324014",
"0.580082",
"0.5746218",
"0.56806177",
"0.5652345",
"0.55335194",
"0.5495179",
"0.54804164",
"0.5454108",
"0.54512924",
"0.54390144",
"0.5415683",
"0.5353648",
"0.5330443",
"0.5325028",
"0.52674586",
"0.5241321",
"0.52397645",
"0.5210892",
"0.5187452",
"0.51699036",
"0.51506823",
"0.51451397",
"0.5118251",
"0.51142603",
"0.5108312"
]
| 0.68373185 | 1 |
Set up the remote site. Meant to be overridden by inheriting classes | def setup_remote_site(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_local_site(self):\n raise NotImplementedError",
"def setUp(self):\n self.setup_remote_site()\n self.setup_local_site()",
"def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()",
"def setup(self):\n\n setup_data = get_setup_data()\n\n self.base_url = setup_data['base_url']\n browser = setup_data['browser'].lower()\n if browser == 'chrome':\n self.driver = webdriver.Chrome()\n elif browser == 'firefox':\n self.driver = webdriver.Firefox()\n elif browser == 'phantomjs':\n self.driver = webdriver.PhantomJS()\n else:\n error = (\n '{0:s} is not a supported browser. Supported browsers: '\n 'Chrome, Firefox, PhantomJS'.format(browser)\n )\n\n self.log.error(error)\n raise ValueError(error)\n self.log.info(\n 'Setup data:\\nURL: {0}\\nBrowser: {1}'.format(\n self.base_url, browser\n )\n )\n self.log.info('Load main page')\n self.driver.get(self.base_url)",
"async def setup(self):\n load_base_templates()\n uris = URI.gather()\n for uri, resource in uris.items():\n methods = resource.methods\n if \"get\" not in methods:\n methods[\"get\"] = None\n\n for method in methods.keys():\n self.app.add_routes([\n getattr(aioweb, method)(uri, resource.process)\n ])\n self.app.add_routes([aioweb.get(\"/hello\", hello)])\n\n # TMP code\n max_age = 3600 * 24 * 365 # 1 year\n setup(self.app, PonyStorage(max_age=max_age))\n self.preparing_task = asyncio.create_task(self.prepare_web())",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out = OutsideL3('l3out', tenant)\n epg = OutsideEPG('intersite-testsuite-app-epg', l3out)\n other_epg = OutsideEPG('other', l3out)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_browser_for_site(self, site):\n\t\t\n\t\t# for self.site property\n\t\tself.__site = site\n\t\t\n\t\t# clear \n\t\tself.last_visited = None\n\t\tself.cookies = CookieJar()\n\t\t\n\t\t# Create a connection object for plain HTTP and secure connections. HTTPlib does not open a connection\n\t\t# at this point, so we lose little if we never use one or other of these objects.\n\t\tself.reset() # makes the self.http and self.https",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n l3out2 = OutsideL3('l3out2', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n contract = Contract('contract-1', tenant)\n contract = Contract('contract-2', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out = OutsideL3('l3out', tenant)\n\n contract = Contract('contract-1', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n l3out1 = OutsideL3('l3out1', tenant)\n l3out2 = OutsideL3('l3out2', tenant)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def setUp(self):\n self.brow = webdriver.Firefox()\n staging_server = os.environ.get('STAGING_SERVER')\n if staging_server:\n self.live_server_url = \"http://\" + staging_server",
"def setup_remote_site(self):\n # Create tenant, L3out with contract on site 2\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-remote')\n l3out1 = OutsideL3('l3out1', tenant)\n l3out2 = OutsideL3('l3out2', tenant)\n epg1 = OutsideEPG('intersite-testsuite-app-epg', l3out1)\n other_epg = OutsideEPG('other', l3out1)\n epg2 = OutsideEPG('intersite-testsuite-app-epg', l3out2)\n\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)",
"def __init__(self, repo, website, host='0.0.0.0', port='5252',\r\n home=os.getcwd(), new_website=False, create_admin=False, **kwargs):\r\n if repo and website:\r\n super().__init__(repo=repo, home=home, **kwargs)\r\n\r\n # Website Deployment Information:\r\n self.website = website\r\n self.web_host = host\r\n self.web_port = port\r\n # Path to Flask's Web-Server Files\r\n self.website_path = self.flask / Path(self.website)\r\n\r\n self.Kitchen = Oven(repo=self.repo, user=self.user,\r\n website=self.website, output_dir=self.flask)\r\n logmsg = 'The Website Management class variables have been set.'\r\n self.managementlog.info(logmsg)\r\n\r\n if new_website is True:\r\n logmsg = 'The website cookie is being prepared for the Oven.'\r\n self.managementlog.info(logmsg)\r\n self.Kitchen.bake_the_website(host=self.web_host,\r\n port=self.web_port,\r\n website_path=self.website_path)",
"def __init__( self, site, debug=False, encoding=None, guess_encoding=False, requests_before_reconnect=0, proxy_must_match=None, print_requests=True):\n\t\tobject.__init__(self)\n\t\tself.debug = debug\n\t\tself.encoding = encoding\n\t\tself.guess_encoding = guess_encoding\n\t\tself.proxy_must_match = proxy_must_match # regular expression\n\t\tself.__proxy = None\n\t\t\n\t\tself.add_referer = False\n\t\tself.redirect_automatically = True\n\t\t\n\t\tself.print_requests = print_requests\n\t\t\n\t\tif requests_before_reconnect > 0:\n\t\t\tself.requests_before_reconnect = requests_before_reconnect\n\t\t\tself.requests_count = 1\n\t\telse:\n\t\t\tself.requests_before_reconnect = -1\n\t\t\n\t\tself.headers = {\n\t\t\t\"User-Agent\" : \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)\",\n\t\t}\n\t\t\n\t\tself.https = None\n\t\tself.http = None\n\t\t\n\t\t# pick_a_new_proxy_ip needs to access self.site to create HTTPConnect object\n\t\t# then setup_browser_for_site needs to set up properly\n\t\tself.__site = site\n\t\tself.pick_a_new_proxy_ip()\n\t\tself.setup_browser_for_site(site)",
"def _init_remote():\r\n require('path', provided_by = [staging])\r\n\r\n create_project_dir()\r\n deploy_nosyncdb()\r\n create_virtualenv()\r\n install_requirements()\r\n create_db()\r\n create_secret_settings()\r\n syncdb()\r\n createsuperuser()\r\n install_site()\r\n reload()",
"def setUp(self):\n self.driver = webdriver.PhantomJS()\n self.driver.get(self.get_server_url())",
"def server_link_setup(self):\n pass",
"def main(self):\n base_url = self.env.get(\"base_url\", BASE_URL)\n self.env[\"url\"] = self.get_opera_url(base_url)\n self.output(\"Found URL %s\" % self.env[\"url\"])",
"def setup(self, url, browser_config):\n\n # navigate to the front page\n browser.open_url(url)",
"def setup(self):\n\t\tpass",
"def setup( self ):",
"def setUp(self):\n self.browser = webdriver.Firefox()\n self.url = 'http://127.0.0.1:8000/'",
"def setup(self):\r\n pass",
"def __init__(self, site):\n self._site = site\n super(SingleResultsSite, self).__init__(self._site.URL, self._site.WebRetrieveDelay, self._site.Proxy,\n self._site.TargetType, self._site.ReportStringForResult,\n self._site.Target, self._site.UserAgent, self._site.FriendlyName,\n self._site.RegEx, self._site.FullURL, self._site.BotOutputRequested,\n self._site.ImportantPropertyString, self._site.Params,\n self._site.Headers, self._site.Method, self._site.PostData,\n site._verbose)\n self.postMessage(self.UserMessage + \" \" + self.FullURL)\n websitecontent = self.getContentList(self.getWebScrape())\n if websitecontent:\n self.addResults(websitecontent)"
]
| [
"0.7351865",
"0.71371996",
"0.6471106",
"0.6460886",
"0.6439247",
"0.64348316",
"0.64113367",
"0.635672",
"0.635672",
"0.635672",
"0.635672",
"0.635672",
"0.6330865",
"0.63245213",
"0.6315102",
"0.62966764",
"0.62934047",
"0.6286767",
"0.61604464",
"0.6138893",
"0.60801387",
"0.6069204",
"0.602028",
"0.601086",
"0.6003768",
"0.59863627",
"0.59569603",
"0.592489",
"0.5880228",
"0.58780575"
]
| 0.8395448 | 0 |
Set up the local site. Meant to be overridden by inheriting classes | def setup_local_site(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setUp(self):\n self.setup_remote_site()\n self.setup_local_site()",
"def setup_remote_site(self):\n raise NotImplementedError",
"def setup(self):\n\n setup_data = get_setup_data()\n\n self.base_url = setup_data['base_url']\n browser = setup_data['browser'].lower()\n if browser == 'chrome':\n self.driver = webdriver.Chrome()\n elif browser == 'firefox':\n self.driver = webdriver.Firefox()\n elif browser == 'phantomjs':\n self.driver = webdriver.PhantomJS()\n else:\n error = (\n '{0:s} is not a supported browser. Supported browsers: '\n 'Chrome, Firefox, PhantomJS'.format(browser)\n )\n\n self.log.error(error)\n raise ValueError(error)\n self.log.info(\n 'Setup data:\\nURL: {0}\\nBrowser: {1}'.format(\n self.base_url, browser\n )\n )\n self.log.info('Load main page')\n self.driver.get(self.base_url)",
"def setup(self):\n\t\tpass",
"async def setup(self):\n load_base_templates()\n uris = URI.gather()\n for uri, resource in uris.items():\n methods = resource.methods\n if \"get\" not in methods:\n methods[\"get\"] = None\n\n for method in methods.keys():\n self.app.add_routes([\n getattr(aioweb, method)(uri, resource.process)\n ])\n self.app.add_routes([aioweb.get(\"/hello\", hello)])\n\n # TMP code\n max_age = 3600 * 24 * 365 # 1 year\n setup(self.app, PonyStorage(max_age=max_age))\n self.preparing_task = asyncio.create_task(self.prepare_web())",
"def boot():\n\t\tcreate_project_url_dir(Spider.project_name)\n\t\tcreate_url_data(Spider.project_name, Spider.base_url)\n\t\tSpider.queue = file_to_set(Spider.queue_file)\n\t\tSpider.crawled = file_to_set(Spider.crawled_file)",
"def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n context = Context('vrf', tenant)\n bd = BridgeDomain('bd', tenant)\n app = AppProfile('app', tenant)\n epg = EPG('epg1', app)\n epg2 = EPG('epg2', app)\n bd.add_context(context)\n epg.add_bd(bd)\n epg2.add_bd(bd)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup(self):\r\n pass",
"def setup( self ):",
"def setup_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-local')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-local')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def setup(self):\n pass",
"def webinit():\n\troot = Root()\n\troot.player = Player()\n\troot.songs = Songs()\n\troot.albums = Albums()\n\troot.artists = Artists()\n\t\n\tapp = cherrypy.tree.mount(root, '/', 'data/cherrypy.config')\n\treturn app",
"def startUp(self):\n pass"
]
| [
"0.7307209",
"0.71960807",
"0.6618908",
"0.6584287",
"0.6563329",
"0.6558109",
"0.6512437",
"0.6484648",
"0.6484648",
"0.6484648",
"0.6484648",
"0.64676434",
"0.6448358",
"0.64482987",
"0.6445225",
"0.6445225",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.6424497",
"0.63729626",
"0.6331862"
]
| 0.82535917 | 0 |
Set up the test case. Setup the remote and local site. | def setUp(self):
self.setup_remote_site()
self.setup_local_site() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup(self):\n # Have to wait for a server connection before we\n # can run the test\n self.wait_for_server_connections(10)",
"def setUp(self):\n test_env_setup()",
"def setup_remote_site(self):\n raise NotImplementedError",
"def setUp(self):\n self.brow = webdriver.Firefox()\n staging_server = os.environ.get('STAGING_SERVER')\n if staging_server:\n self.live_server_url = \"http://\" + staging_server",
"def setUp(self):\n logging.debug('setting up')",
"def setUp(self):\n logging.debug('setting up')",
"def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')",
"def _set_up():\n repl._setUp = self.setUp",
"def setUp(self):\n self.server_address = \"http://localhost:3030/$/\"\n self.request_address = \"http://localhost:3030/ds\"\n self.api = \"http://localhost:4032/\"\n self.version = \"0.2\"",
"def setUp(self):\n pyauto.PyUITest.setUp(self)\n\n webapp = self.InstallExtension(self.GetWebappPath())\n self.host.LaunchApp(webapp)\n self.account = self.GetPrivateInfo()['test_chromoting_account']",
"def setup(cls):\n super(TestNonVendorProject, cls).setup()\n cls.change_directory(Path(\"..\"))\n cls.agent_name = \"generic_buyer\"\n cls.run_cli_command(\n \"fetch\", \"fetchai/generic_buyer:0.30.5\", \"--alias\", cls.agent_name\n )\n cls.agents.add(cls.agent_name)\n cls.set_agent_context(cls.agent_name)",
"def setUp(self):\n self.setup_beets()",
"def setUp(self):\n self.hello_url = \"http://localhost:7000\"\n self.store_url = self.hello_url + \"/store\"\n self.session = requests.session()",
"def setup(self):\n\n setup_data = get_setup_data()\n\n self.base_url = setup_data['base_url']\n browser = setup_data['browser'].lower()\n if browser == 'chrome':\n self.driver = webdriver.Chrome()\n elif browser == 'firefox':\n self.driver = webdriver.Firefox()\n elif browser == 'phantomjs':\n self.driver = webdriver.PhantomJS()\n else:\n error = (\n '{0:s} is not a supported browser. Supported browsers: '\n 'Chrome, Firefox, PhantomJS'.format(browser)\n )\n\n self.log.error(error)\n raise ValueError(error)\n self.log.info(\n 'Setup data:\\nURL: {0}\\nBrowser: {1}'.format(\n self.base_url, browser\n )\n )\n self.log.info('Load main page')\n self.driver.get(self.base_url)",
"def _init_remote():\r\n require('path', provided_by = [staging])\r\n\r\n create_project_dir()\r\n deploy_nosyncdb()\r\n create_virtualenv()\r\n install_requirements()\r\n create_db()\r\n create_secret_settings()\r\n syncdb()\r\n createsuperuser()\r\n install_site()\r\n reload()",
"def setUp(self):\n\n self.driver = WebDriver(\n \"http://{0}:{1}/wd/hub\".format(swt.config.ADDRESS, swt.config.SELENIUM_SERVER_PORT),\n self._browser_capabilities,\n proxy=self.proxy.selenium_proxy()\n )\n swt.active_driver = self.driver",
"def setup(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n\n os.chdir(cls.t)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n # add connection first time",
"def setup_package():\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('authentication')\n\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n test_config = {}\n\n # Setup environment variables for the test cases.\n host_port_cfg = {'viewer_host': 'localhost',\n 'viewer_port': env.get_free_port(),\n 'viewer_product': 'authentication'}\n\n test_env = env.test_env(TEST_WORKSPACE)\n\n codechecker_cfg = {\n 'check_env': test_env,\n 'workspace': TEST_WORKSPACE,\n 'checkers': []\n }\n\n codechecker_cfg.update(host_port_cfg)\n\n codechecker_cfg['run_names'] = []\n\n test_config['codechecker_cfg'] = codechecker_cfg\n\n # Export configuration for the tests.\n env.export_test_cfg(TEST_WORKSPACE, test_config)\n\n # Enable authentication and start the CodeChecker server.\n env.enable_auth(TEST_WORKSPACE)\n print(\"Starting server to get results\")\n _start_server(codechecker_cfg, test_config, False)",
"def setup(cls):\n super(TestUpgradeProject, cls).setup()\n cls.change_directory(Path(\"..\"))\n cls.agent_name = \"generic_buyer\"\n cls.latest_agent_name = \"generic_buyer_latest\"\n cls.run_cli_command(\n \"--skip-consistency-check\",\n \"fetch\",\n \"fetchai/generic_buyer:0.30.5\",\n \"--alias\",\n cls.agent_name,\n )\n cls.run_cli_command(\n \"--skip-consistency-check\",\n \"fetch\",\n \"fetchai/generic_buyer:latest\",\n \"--alias\",\n cls.latest_agent_name,\n )\n cls.agents.add(cls.agent_name)\n cls.set_agent_context(cls.agent_name)",
"def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'remote',\n 'manifest.ini')\n TestRun.run_tests(self)",
"def setUp(self):\n\n self.host = 'http://www.weather.com.cn'\n self.ep_path = '/data/cityinfo'\n self.client = HttpClient()",
"def setup():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n setup_directories()\n setup_virtualenv()\n clone_repo()\n checkout_latest()\n install_requirements()\n install_apache_conf()\n deploy_to_s3()",
"def setup_method(self):\n self.project_dir = os.path.join(DIR, 'test-project')\n self.e2e = E2EEnv(self.project_dir)",
"def setUp(self):\n self.driver = webdriver.PhantomJS()\n self.driver.get(self.get_server_url())",
"def localhost_setup(request, integration_test_setup):\n git_command = request.param[0]\n configholder = request.param[1]\n target = request.param[2]\n get_localhost_repos(git_command, configholder, target)",
"def setUp(self):\n self.hass = get_test_home_assistant()\n self.assertTrue(setup_component(self.hass, remote.DOMAIN, {'remote': {\n 'platform': 'demo',\n }}))",
"def setUp(self):\n self.c = Client(host=\"localhost\")",
"def setUp(self):\n MainTests.setUp(self)",
"def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()",
"def setUp(self):\n\n PyFunceble.load_config(generate_directory_structure=False)\n\n self.domains = [\n \"google.com\",\n \"twitter.com\",\n \"github.com\",\n \"facebook.com\",\n \"hello.world\",\n \"world.hello\",\n ]"
]
| [
"0.7332431",
"0.7082246",
"0.70760673",
"0.70298827",
"0.6978908",
"0.6978908",
"0.69545174",
"0.69033223",
"0.6877691",
"0.6832449",
"0.68183964",
"0.6790898",
"0.6786601",
"0.677809",
"0.6768166",
"0.67653364",
"0.67524713",
"0.6714655",
"0.6707392",
"0.67031753",
"0.6702698",
"0.6699395",
"0.66993576",
"0.6695565",
"0.66861945",
"0.6679232",
"0.6675681",
"0.66720974",
"0.6663755",
"0.6658037"
]
| 0.8576229 | 0 |
Tear down the test case. Tear down the remote and local site. | def tearDown(self):
self.teardown_local_site()
self.teardown_remote_site()
time.sleep(2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')",
"def tearDown(self):\n self.log.debug(\"Started TearDown\")\n self.driver.quit()\n self.log.debug(\"Completed TearDown\")",
"def teardown(self):\r\n self.driver.quit()",
"def tear_down(self):\n self.driver.close()\n self.driver.quit()",
"def tearDown(self):\n test_env_teardown()",
"def teardown(self):\n self.log.info('Close browser')\n self.driver.quit()",
"def teardown(self):\n self.logger.info('Tearing down file server vm')\n self.local_env.execute('uninstall', task_retries=40,\n task_retry_interval=30)",
"def tearDownClass(cls):\n\n cls.httpd.shutdown()",
"def __exit__(self, exc_type, exc_val, exc_tb):\n self.server.stop()\n self.driver.quit()",
"def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()",
"def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()",
"def tearDown(self):\n self.testbed.deactivate()",
"def tearDown(self):\n self.testbed.deactivate()",
"def tearDown(self):\n\n self._checkJSErrors()\n swt.active_driver = None\n self.driver.quit()",
"def tearDown(self):\r\n self.driver.close()",
"def tearDown(self):\n self.driver.quit()",
"def tearDown(self):\n self.driver.quit()",
"def tearDown(self):\n self.driver.quit()",
"def tear_down_all(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)\n self.dut.kill_all()",
"def tearDown(self):\n tests.utils.cleanup_environment()",
"def tearDown(self):\n tests.utils.cleanup_environment()",
"def _tearDown(self):\r\n\r\n if core.FW_conf['connection'].isLeader() and core.FW_conf['settings'].TestRun.BLTEnabledInFollower:\r\n executeInFollower(\"core.FW_conf['blt_ue'].stopCurrentMeasuring()\")\r\n\r\n # stop current measurement if battery is available\r\n if core.FW_conf['connection'].battery is not None and core.FW_conf['connection'].battery.isEnabled():\r\n core.FW_conf['connection'].battery.stopCurrentMeasuring()\r\n\r\n # skip tearDown if systemExit exception has occurred or\r\n # we are stopping execution or teardown skipping is wanted\r\n if not self._raiseSystemExit and not core.FW_conf['should_stop']:\r\n debug.out(\"MarbleTestCase tearDown\")\r\n\r\n self.logApply(core.FW_conf['connection']._tearDown, self)\r\n\r\n for remote in core.FW_conf['remote_connection']:\r\n self.logApply(remote._tearDown, self)",
"def tearDown(self):\r\n self.app.application_close(self.util.client, self.app_name)\r\n\r\n self.common.generate_report(self.util.client, False)\r\n # Releases the client so that other clients can approach the agent in the near future.\r\n self.common.release_client(self.util.client)\r\n self.logger.info(\"==============Results=================\")\r\n self.logger.info(\"Number of Strings verified: \" + str(len(Config.results_list)/2))\r\n for i in range(0, len(Config.results_list), 2):\r\n self.logger.info(str(Config.results_list[i]) + \"{:>36}\".format('=====> ')\r\n + str(Config.results_list[i+1]))\r\n self.logger.info(\"Testcase tear-down: COMPLETED\")",
"def shutdown(self):\n # TODO: Build a certificate chain so we can verify our localhost and remove the verify=False workaround.\n requests.get('{local_server_address}/shutdown'.format(local_server_address=self.local_server_address),\n verify=False)",
"def tearDown(self):\n logging.debug('tearing down')",
"def tearDown(self):\n logging.debug('tearing down')",
"def tear_down(self):\n self.destroy_env()\n self.dut.kill_all()",
"def tearDown(self):\n\n self.ssx.close()\n\tself.ether_linux.close()",
"def tearDown(self):\n self.hass.stop()",
"def tearDown(self):\n self.hass.stop()"
]
| [
"0.7534872",
"0.73962086",
"0.7370009",
"0.73310035",
"0.7247043",
"0.7140761",
"0.71196264",
"0.7116654",
"0.7015063",
"0.696482",
"0.696482",
"0.69535244",
"0.69535244",
"0.6951393",
"0.6949726",
"0.689157",
"0.689157",
"0.689157",
"0.688593",
"0.6884048",
"0.6884048",
"0.68794614",
"0.68651915",
"0.6816115",
"0.68147385",
"0.68147385",
"0.6813405",
"0.68064916",
"0.68051463",
"0.68051463"
]
| 0.81366926 | 0 |
Generate a basic configuration containing the local and remote site policies. Actual site credentials are set in global variables imported from multisite_test_credentials | def create_site_config():
config = {
"config": [
{
"site": {
"username": "%s" % SITE1_LOGIN,
"name": "Site1",
"ip_address": "%s" % SITE1_IPADDR,
"password": "%s" % SITE1_PASSWORD,
"local": "True",
"use_https": "False"
}
},
{
"site": {
"username": "%s" % SITE2_LOGIN,
"name": "Site2",
"ip_address": "%s" % SITE2_IPADDR,
"password": "%s" % SITE2_PASSWORD,
"local": "False",
"use_https": "False"
}
}
]
}
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file_before(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\",\n },\n {\n \"contract_name\": \"contract-2\",\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def create_config_file(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite-local\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out1\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n },\n {\n \"l3out\": {\n \"name\": \"l3out2\",\n \"tenant\": \"intersite-testsuite-remote\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output",
"def create_config_file_after(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def get_client_settings_env(**_):\r\n username = os.environ.get('SL_USERNAME')\r\n api_key = os.environ.get('SL_API_KEY')\r\n proxy = os.environ.get('https_proxy')\r\n\r\n config = {'proxy': proxy}\r\n if username and api_key:\r\n config['auth'] = BasicAuthentication(username, api_key)\r\n return config",
"def create_empty_config_file():\n config = {\n \"config\": [\n {\n \"site\": {\n \"username\": \"\",\n \"name\": \"\",\n \"ip_address\": \"\",\n \"password\": \"\",\n \"local\": \"\",\n \"use_https\": \"\"\n }\n }\n ]\n }\n return config",
"def create_local_settings():\n\n require('environment', provided_by=env.environments)\n _load_passwords(['database_password'])\n template = os.path.join(env.templates_dir, 'local_settings.py')\n destination = os.path.join(env.project_root, 'local_settings.py')\n _upload_template(template, destination, context=env, user=env.deploy_user)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n tenant.mark_as_deleted()\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)\n resp = tenant.push_to_apic(site2)\n self.assertTrue(resp.ok)\n\n time.sleep(2)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg1 = EPG('epg', app)\n epg2 = EPG('epg2', app)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)\n time.sleep(2)",
"def get_generated_config(self, auth_provider: KeyProvider, secret_key):\n\n generated_config = {\n 'jupyterhub': {\n 'proxy': {\n 'https': {\n 'hosts': [self.spec['domain']]\n }\n },\n 'ingress': {\n 'hosts': [self.spec['domain']],\n 'tls': [\n {\n 'secretName': 'https-auto-tls',\n 'hosts': [self.spec['domain']]\n }\n ]\n\n },\n 'singleuser': {\n # If image_repo isn't set, just have an empty image dict\n 'image': {'name': self.cluster.spec['image_repo']} if 'image_repo' in self.cluster.spec else {},\n },\n 'hub': {\n 'config': {},\n 'initContainers': [\n {\n 'name': 'templates-clone',\n 'image': 'alpine/git',\n 'args': [\n 'clone',\n '--',\n 'https://github.com/2i2c-org/pilot-homepage',\n '/srv/repo',\n ],\n 'securityContext': {\n 'runAsUser': 1000,\n 'allowPrivilegeEscalation': False,\n 'readOnlyRootFilesystem': True,\n },\n 'volumeMounts': [\n {\n 'name': 'custom-templates',\n 'mountPath': '/srv/repo'\n }\n ]\n }\n ],\n 'extraContainers': [\n {\n 'name': 'templates-sync',\n 'image': 'alpine/git',\n 'workingDir': '/srv/repo',\n 'command': ['/bin/sh'],\n 'args': [\n '-c',\n dedent(\n f'''\\\n while true; do git fetch origin;\n if [[ $(git ls-remote --heads origin {self.spec[\"name\"]} | wc -c) -ne 0 ]]; then\n git reset --hard origin/{self.spec[\"name\"]};\n else\n git reset --hard origin/master;\n fi\n sleep 5m; done\n '''\n )\n ],\n 'securityContext': {\n 'runAsUser': 1000,\n 'allowPrivilegeEscalation': False,\n 'readOnlyRootFilesystem': True,\n },\n 'volumeMounts': [\n {\n 'name': 'custom-templates',\n 'mountPath': '/srv/repo'\n }\n ]\n }\n ],\n 'extraVolumes': [\n {\n 'name': 'custom-templates',\n 'emptyDir': {}\n }\n ],\n 'extraVolumeMounts':[\n {\n 'mountPath': '/usr/local/share/jupyterhub/custom_templates',\n 'name': 'custom-templates',\n 'subPath': 'templates'\n },\n {\n 'mountPath': '/usr/local/share/jupyterhub/static/extra-assets',\n 'name': 'custom-templates',\n 'subPath': 'extra-assets'\n }\n ]\n }\n },\n }\n #\n # Allow explicilty ignoring auth0 setup\n if self.spec['auth0'].get('enabled', True):\n # Auth0 sends users back to this URL after they authenticate\n callback_url = f\"https://{self.spec['domain']}/hub/oauth_callback\"\n # Users are redirected to this URL after they log out\n logout_url = f\"https://{self.spec['domain']}\"\n client = auth_provider.ensure_client(\n name=self.spec['auth0'].get('application_name', f\"{self.cluster.spec['name']}-{self.spec['name']}\"),\n callback_url=callback_url,\n logout_url=logout_url,\n connection_name=self.spec['auth0']['connection'],\n connection_config=self.spec['auth0'].get(self.spec['auth0']['connection'], {}),\n )\n # FIXME: We're hardcoding Auth0OAuthenticator here\n # We should *not*. We need dictionary merging in code, so\n # these can all exist fine.\n generated_config['jupyterhub']['hub']['config']['Auth0OAuthenticator'] = auth_provider.get_client_creds(client, self.spec['auth0']['connection'])\n\n return self.apply_hub_template_fixes(generated_config, secret_key)",
"def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }",
"def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app1 = AppProfile('app1', tenant)\n epg1 = EPG('epg1', app1)\n app2 = AppProfile('app2', tenant)\n epg2 = EPG('epg2', app2)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def GenerateConfig(context):\n\n resources = [\n {\n 'name': 'auth_cloud_sql_client_to_cloud_sql_proxy_sa',\n 'type': 'gcp-types/cloudresourcemanager-v1:virtual.projects.iamMemberBinding',\n 'properties': {\n 'resource': context.env['project'],\n 'role': 'roles/cloudsql.client',\n 'member': 'serviceAccount:$(ref.cloud-sql-proxy-service-acc.email)'\n },\n }\n ]\n return {'resources': resources}",
"def main(global_config, **settings):\n\n auth_policy = AuthenticationStackPolicy()\n policy_array = []\n\n main_policy = AuthTktAuthenticationPolicy(settings['auth.main.secret'], timeout=1800 * 60,\n cookie_name=settings['auth.main.cookie'])\n auth_policy.add_policy('main', main_policy)\n policy_array.append({'name': 'main', 'policy': main_policy})\n\n assistant_policy = AuthTktAuthenticationPolicy(settings['auth.assistant.secret'], timeout=1800 * 60,\n cookie_name=settings['auth.assistant.cookie'])\n auth_policy.add_policy('assistant', assistant_policy)\n policy_array.append({'name': 'assistant', 'policy': assistant_policy})\n\n # authn_policy = AuthTktAuthenticationPolicy(settings['auth.secret'], cookie_name='formshare_auth_tkt')\n authz_policy = ACLAuthorizationPolicy()\n config = Configurator(settings=settings, authentication_policy=auth_policy,\n authorization_policy=authz_policy)\n\n apppath = os.path.dirname(os.path.abspath(__file__))\n\n config.include('.models')\n # Load and configure the host application\n load_environment(settings, config, apppath, policy_array)\n return config.make_wsgi_app()",
"def _create_config(self, enterprise=False, with_local_site=False,\n run_manually=False):\n choice = ReviewRequestRepositoriesChoice()\n\n condition_set = ConditionSet(conditions=[\n Condition(choice=choice,\n operator=choice.get_operator('any'))\n ])\n\n if with_local_site:\n local_site = self.get_local_site(name=self.local_site_name)\n else:\n local_site = None\n\n config = self.integration.create_config(name='Config 1',\n enabled=True,\n local_site=local_site)\n config.set('conditions', condition_set.serialize())\n config.set('travis_yml', 'script:\\n python ./tests/runtests.py')\n config.set('branch_name', 'review-requests')\n config.set('run_manually', run_manually)\n\n if enterprise:\n config.set('travis_endpoint', TravisAPI.ENTERPRISE_ENDPOINT)\n config.set('travis_custom_endpoint', 'https://travis.example.com/')\n else:\n config.set('travis_endpoint', TravisAPI.OPEN_SOURCE_ENDPOINT)\n\n config.save()\n\n return config",
"def setup_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-local')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite-local')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def create_export_policy():\n config = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return config",
"def non_sensitive_config_dict(self):\n config = {\n \"BASE\": \"https://example.com\",\n \"COOKIE_STATE_NAME\": \"TEST_STATE\",\n \"BACKEND_MODULES\": [],\n \"FRONTEND_MODULES\": [],\n \"INTERNAL_ATTRIBUTES\": {\"attributes\": {}}\n }\n return config",
"def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def setup_remote_site(self):\n raise NotImplementedError",
"def get_site_config(sites_path=None, site_path=None, force=False):\n\tconfig = {}\n\n\tsites_path = sites_path or getattr(local, \"sites_path\", None)\n\tsite_path = site_path or getattr(local, \"site_path\", None)\n\n\tif sites_path:\n\t\tcommon_site_config = os.path.join(sites_path, \"common_site_config.json\")\n\t\tif os.path.exists(common_site_config):\n\t\t\tconfig.update(get_file_json(common_site_config))\n\n\tif site_path:\n\t\tsite_config = os.path.join(site_path, \"site_config.json\")\n\t\tif os.path.exists(site_config):\n\t\t\tconfig.update(get_file_json(site_config))\n\t\telif local.site and not local.flags.new_site:\n\t\t\tprint(f\"{local.site} does not exist\")\n\t\t\tsys.exit(1)\n\treturn get_slotted_dict(config, extra_slots=[\n\t\t'admin_password',\n\t\t'developer_mode',\n\t])",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def setup_local_site(self):\n # create Tenant, App, EPG on site 1\n site1 = Session(SITE1_URL, SITE1_LOGIN, SITE1_PASSWORD)\n resp = site1.login()\n self.assertTrue(resp.ok)\n\n tenant = Tenant('intersite-testsuite')\n app = AppProfile('app', tenant)\n epg = EPG('epg', app)\n\n resp = tenant.push_to_apic(site1)\n self.assertTrue(resp.ok)",
"def get_config(site='self'):\n path='/sites/%s/configuration' % (site)\n return _api_request('GET', path)",
"def get_config():\n\n return {\n 'ADMIN_USERNAME': env.get('ECSTEST_ADMIN_USERNAME', 'username'),\n 'ADMIN_PASSWORD': env.get('ECSTEST_ADMIN_PASSWORD', 'password'),\n 'TOKEN': env.get('ECSTEST_TOKEN', None),\n 'CONTROL_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_ENDPOINT', 'https://127.0.0.1:4443'\n ),\n 'TOKEN_ENDPOINT': env.get(\n 'ECSTEST_CONTROL_TOKEN_ENDPOINT', 'https://127.0.0.1:4443/login'\n ),\n 'ALT_CONTROL_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_ENDPOINT',\n env.get('ECSTEST_CONTROL_ENDPOINT',\n 'https://127.0.0.1:4443')),\n 'ALT_TOKEN_ENDPOINT': env.get(\n 'ECSTEST_ALT_CONTROL_TOKEN_ENDPOINT',\n env.get('ECSTEST_CONTROL_TOKEN_ENDPOINT',\n 'https://127.0.0.1:4443/login'),\n ),\n 'VERIFY_SSL': _env_to_bool('ECSTEST_VERIFY_SSL', 0),\n 'REQUEST_TIMEOUT': float(env.get('ECSTEST_REQUEST_TIMEOUT', 15.0)),\n 'TOKEN_FILENAME': env.get(\n 'ECSTEST_TOKEN_FILENAME', '/tmp/ecstest.token'\n ),\n 'CACHE_TOKEN': _env_to_bool('ECSTEST_CACHE_TOKEN', 1),\n 'AUTH_TOKEN_MIN_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MIN_LENGTH', 1),\n 'AUTH_TOKEN_MAX_LENGTH': env.get('ECSTEST_AUTH_TOKEN_MAX_LENGTH', 512),\n 'NAMESPACE': env.get('ECSTEST_NAMESPACE', 'namespace1'),\n 'MAX_LOGIN_TIME': env.get('ECSTEST_MAX_LOGIN_TIME', 3),\n 'ACCESS_SSL': _env_to_bool('ECSTEST_ACCESS_SSL', 0),\n 'ACCESS_SERVER': env.get('ECSTEST_ACCESS_SERVER', 'localhost'),\n 'ALT_ACCESS_SERVER': env.get(\n 'ECSTEST_ALT_ACCESS_SERVER',\n env.get('ECSTEST_ACCESS_SERVER', 'localhost')\n ),\n 'ACCESS_PORT': int(env.get('ECSTEST_ACCESS_PORT', 3128)),\n 'ACCESS_KEY': env.get('ECSTEST_ACCESS_KEY', 'mykey'),\n 'ACCESS_SECRET': env.get('ECSTEST_ACCESS_SECRET', 'mysecret'),\n 'ALT_ACCESS_KEY': env.get(\n 'ECSTEST_ALT_ACCESS_KEY',\n env.get('ECSTEST_ACCESS_KEY', 'mykey')\n ),\n 'ALT_ACCESS_SECRET': env.get(\n 'ECSTEST_ALT_ACCESS_SECRET',\n env.get('ECSTEST_ACCESS_SECRET', 'mysecret')\n ),\n 'VERBOSE_OUTPUT': _env_to_bool('ECSTEST_VERBOSE_OUTPUT', 0),\n 'TEST_TARGET': env.get('ECSTEST_TEST_TARGET', constants.TARGET_AWSS3),\n 'TEST_TYPE': env.get(\n 'ECSTEST_TEST_TYPE', constants.TYPE_COMPATIBILITY\n ),\n 'DNS_BUCKET_NAMING_CONVENTION': _env_to_bool(\n 'ECSTEST_DNS_BUCKET_NAMING_CONVENTION', 0\n ),\n 'NODES_PER_SITE': int(env.get('ECSTEST_NODES_PER_SITE', 1)),\n 'RUN_DISABLED': _env_to_bool('ECSTEST_RUN_DISABLED'),\n 'REUSE_BUCKET_NAME': env.get('ECSTEST_REUSE_BUCKET_NAME'),\n }"
]
| [
"0.653249",
"0.6490301",
"0.63971806",
"0.63197035",
"0.63190347",
"0.5756652",
"0.5688375",
"0.5669199",
"0.5642637",
"0.56357676",
"0.5627176",
"0.5614128",
"0.5578653",
"0.556191",
"0.556073",
"0.5556995",
"0.55450904",
"0.55450904",
"0.5543221",
"0.5520929",
"0.551141",
"0.5489489",
"0.54878974",
"0.54721606",
"0.5444359",
"0.5444359",
"0.5444359",
"0.5444359",
"0.5439604",
"0.5413305"
]
| 0.6967031 | 0 |
Verify that the remote site has the entry | def verify_remote_site_has_entry(self, mac, ip, tenant_name, l3out_name, remote_epg_name):
site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)
resp = site2.login()
self.assertTrue(resp.ok)
query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json?query-target=children' % (tenant_name,
l3out_name,
remote_epg_name))
resp = site2.get(query)
self.assertTrue(resp.ok)
found = False
for item in resp.json()['imdata']:
if 'l3extSubnet' in item:
if item['l3extSubnet']['attributes']['ip'] == ip + '/32':
found = True
break
if not found:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify(self):\n if self.geturl():\n return True\n return False",
"def check_post(self, url, info):\r\n \r\n test = requests.get(url, headers = self.headers).json()['results']\r\n if info == test:\r\n return True\r\n return False",
"def _urlcheck(self):\n if (self['.managerhost'] and self['.settingurl'] and self['.guid']):\n return True\n else:\n return False",
"def check_if_exist(self,url):\r\n\t\t\"\"\" verefier si un lien existe \"\"\"\r\n\t\trequest = mechanize.Request(url)\r\n\t\tBAD_REQ = [400,401,404]\r\n\t\ttry :\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tif response.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tif error.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True",
"def verify_remote_site_has_entry_after(self, mac, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extInstP\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n if item['l3extInstP']['attributes']['name'] == 'intersite-testsuite-app-epg':\n found = True\n break\n if not found:\n return False\n\n # Verify that the l3extInstP is providing the contract\n found_contract1 = False\n found_contract2 = False\n for item in resp.json()['imdata']:\n if 'fvRsProv' in item:\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-1':\n found_contract1 = True\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-2':\n found_contract2 = True\n if not found_contract1 or found_contract2:\n return False\n\n # Look for l3extSubnet\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out'\n '/instP-intersite-testsuite-app-epg.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extSubnet\n found = False\n for item in resp.json()['imdata']:\n if 'l3extSubnet' in item:\n if item['l3extSubnet']['attributes']['ip'] == ip + '/32':\n found = True\n break\n if not found:\n return False\n return True",
"def verify_remote_site_has_entry_before(self, mac, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extInstP\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n if item['l3extInstP']['attributes']['name'] == 'intersite-testsuite-app-epg':\n found = True\n break\n if not found:\n return False\n\n # Verify that the l3extInstP is providing the contracts\n found_contract1 = False\n found_contract2 = False\n for item in resp.json()['imdata']:\n if 'fvRsProv' in item:\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-1':\n found_contract1 = True\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-2':\n found_contract2 = True\n if not found_contract1 or not found_contract2:\n return False\n\n # Look for l3extSubnet\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out'\n '/instP-intersite-testsuite-app-epg.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extSubnet\n found = False\n for item in resp.json()['imdata']:\n if 'l3extSubnet' in item:\n if item['l3extSubnet']['attributes']['name'] == ip:\n found = True\n break\n if not found:\n return False\n return True",
"def check_url(url):\n return get_svninfo(url) != {}",
"def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def _compare_remote_url(self, remote, url):\n\n if url != self._remote_get_url(remote):\n actual_url = self._remote_get_url(remote)\n message = fmt.remote_already_exists_error(remote, url, actual_url)\n self._print(message)\n self._exit(message)",
"def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)",
"def degruyterCheckSite(url):\n dgtestPhrase = 'Licensed Access'\n dgtestPhrase2 = 'viewbooktoc'\n\n # urltoCheck = input(\"\\n what is the URL? \\n\")\n\n urltoCheck = url\n\n r = requests.get(urltoCheck)\n rResult = r.text\n\n dgoutcome = 0\n if (dgtestPhrase in rResult) and (dgtestPhrase2 in rResult):\n dgoutcome = 1\n\n return dgoutcome",
"def verify_remote_site_has_policy(self, tenant_name, l3out_name, instp_name):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json' % (tenant_name, l3out_name, instp_name))\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n found = True\n break\n if not found:\n return False\n return True",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']",
"def _check_field_site_url(self, doi: Doi):\n logger.debug(\"doi,site_url: %s,%s\", doi.doi, doi.site_url)\n\n if doi.site_url:\n try:\n response = requests.get(doi.site_url, timeout=10)\n status_code = response.status_code\n logger.debug(\"from_request status_code,site_url: %s,%s\", status_code, doi.site_url)\n\n # Handle cases when a connection can be made to the server but\n # the status is greater than or equal to 400.\n if status_code >= 400:\n # Need to check its an 404, 503, 500, 403 etc.\n raise requests.HTTPError(f\"status_code,site_url {status_code,doi.site_url}\")\n else:\n logger.info(\"Landing page URL %s is reachable\", doi.site_url)\n except (requests.exceptions.ConnectionError, Exception):\n raise SiteURLNotExistException(\n f\"Landing page URL {doi.site_url} is not reachable. Request \"\n f\"should have a valid URL assigned prior to release.\\n\"\n f\"To bypass this check, rerun the command with the --force \"\n f\"flag provided.\"\n )",
"def _verify_page(self):",
"def check_conn():\n try:\n urllib2.urlopen(\"http://www.google.com\", timeout=5)\n return True\n except urllib2.URLError:\n pass\n return False",
"def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)",
"def check_if_same_host(host, url):\n # print '\\nchecking same origin:', host, get_host_name(url)\n\n if host == get_host_name(url):\n return True\n return False",
"def _check_grib(self, url):\n head = requests.head(url)\n check_exists = head.ok\n if check_exists:\n check_content = int(head.raw.info()['Content-Length']) > 1_000_000\n return check_exists and check_content\n else:\n return False",
"def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok",
"def verify_remote_site_has_entry_with_provided_contract(self, mac, ip, tenant_name, l3out_name, remote_epg_name, contract_name):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = '/api/mo/uni/tn-%s/out-%s.json?query-target=subtree' % (tenant_name, l3out_name)\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extInstP\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n if item['l3extInstP']['attributes']['name'] == remote_epg_name:\n found = True\n break\n if not found:\n return False\n\n # Verify that the l3extInstP is providing the contract\n found = False\n for item in resp.json()['imdata']:\n if 'fvRsProv' in item:\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == contract_name:\n found = True\n break\n if not found:\n return False\n\n return self.verify_remote_site_has_entry(mac, ip, tenant_name, l3out_name, remote_epg_name)",
"def test_link(link):\n r = requests.get(link)\n if (r.status_code != 200):\n return False\n else:\n return True",
"def checkmatomo_url(self):\n try:\n http_code = requests.get(self.matomo_url, verify=self.ssl_verify).status_code\n if http_code == 200:\n return True\n return False\n except ConnectionError:\n return False",
"def req_CHECKPRESENT(self, key):\n # TODO: so we need to maintain mapping from urls to keys. Then\n # we could even store the filename within archive\n # Otherwise it is unrealistic to even require to recompute key if we\n # knew the backend etc\n lgr.debug(\"VERIFYING key %s\" % key)\n akey, afile = self._get_akey_afile(key)\n if self.get_contentlocation(akey):\n self.send(\"CHECKPRESENT-SUCCESS\", key)\n else:\n # TODO: proxy the same to annex itself to verify check for archive.\n # If archive is no longer available -- then CHECKPRESENT-FAILURE\n self.send(\"CHECKPRESENT-UNKNOWN\", key)",
"def is_alive(self):\n conn = HTTPConnection(self.browser.host, self.browser.port)\n conn.request(\"HEAD\", \"/invalid\")\n res = conn.getresponse()\n return res.status == 404",
"def test_url_existence(self):\n self.assertEquals(self.response.status_code, 200)",
"def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"",
"def url_exists(url):\n\n try:\n connection = urlopen(url)\n return connection.getcode() < 400\n except Exception as e:\n return False",
"def net_check():\n resp = None\n host = \"https://gitlab.manjaro.org\"\n # noinspection PyBroadException\n try:\n resp = urllib.request.urlopen(host, timeout=2)\n except Exception:\n pass\n return bool(resp)",
"def is_alive(self, site):\n try:\n return requests.get(site).status_code == 200\n except Exception:\n pass"
]
| [
"0.7095956",
"0.64719343",
"0.64107704",
"0.6186604",
"0.6123676",
"0.6075496",
"0.6052845",
"0.6010299",
"0.59730864",
"0.5961953",
"0.5954525",
"0.59486157",
"0.59384876",
"0.5913",
"0.5904583",
"0.5883021",
"0.58655274",
"0.5852213",
"0.5851459",
"0.584828",
"0.5848112",
"0.58260876",
"0.58019984",
"0.5799052",
"0.5798859",
"0.57916784",
"0.5766648",
"0.5750907",
"0.5745175",
"0.5744299"
]
| 0.652173 | 1 |
Verify that the remote site has the entry and provides the specfied contract | def verify_remote_site_has_entry_with_provided_contract(self, mac, ip, tenant_name, l3out_name, remote_epg_name, contract_name):
site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)
resp = site2.login()
self.assertTrue(resp.ok)
query = '/api/mo/uni/tn-%s/out-%s.json?query-target=subtree' % (tenant_name, l3out_name)
resp = site2.get(query)
self.assertTrue(resp.ok)
# Look for l3extInstP
found = False
for item in resp.json()['imdata']:
if 'l3extInstP' in item:
if item['l3extInstP']['attributes']['name'] == remote_epg_name:
found = True
break
if not found:
return False
# Verify that the l3extInstP is providing the contract
found = False
for item in resp.json()['imdata']:
if 'fvRsProv' in item:
if item['fvRsProv']['attributes']['tnVzBrCPName'] == contract_name:
found = True
break
if not found:
return False
return self.verify_remote_site_has_entry(mac, ip, tenant_name, l3out_name, remote_epg_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_remote_site_has_entry(self, mac, ip, tenant_name, l3out_name, remote_epg_name):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json?query-target=children' % (tenant_name,\n l3out_name,\n remote_epg_name))\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n found = False\n for item in resp.json()['imdata']:\n if 'l3extSubnet' in item:\n if item['l3extSubnet']['attributes']['ip'] == ip + '/32':\n found = True\n break\n if not found:\n return False\n return True",
"def verify_remote_site_has_entry_before(self, mac, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extInstP\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n if item['l3extInstP']['attributes']['name'] == 'intersite-testsuite-app-epg':\n found = True\n break\n if not found:\n return False\n\n # Verify that the l3extInstP is providing the contracts\n found_contract1 = False\n found_contract2 = False\n for item in resp.json()['imdata']:\n if 'fvRsProv' in item:\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-1':\n found_contract1 = True\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-2':\n found_contract2 = True\n if not found_contract1 or not found_contract2:\n return False\n\n # Look for l3extSubnet\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out'\n '/instP-intersite-testsuite-app-epg.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extSubnet\n found = False\n for item in resp.json()['imdata']:\n if 'l3extSubnet' in item:\n if item['l3extSubnet']['attributes']['name'] == ip:\n found = True\n break\n if not found:\n return False\n return True",
"def verify(self):\n if self.geturl():\n return True\n return False",
"def verify_remote_site_has_entry_after(self, mac, ip):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extInstP\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n if item['l3extInstP']['attributes']['name'] == 'intersite-testsuite-app-epg':\n found = True\n break\n if not found:\n return False\n\n # Verify that the l3extInstP is providing the contract\n found_contract1 = False\n found_contract2 = False\n for item in resp.json()['imdata']:\n if 'fvRsProv' in item:\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-1':\n found_contract1 = True\n if item['fvRsProv']['attributes']['tnVzBrCPName'] == 'contract-2':\n found_contract2 = True\n if not found_contract1 or found_contract2:\n return False\n\n # Look for l3extSubnet\n query = ('/api/mo/uni/tn-intersite-testsuite/out-l3out'\n '/instP-intersite-testsuite-app-epg.json?query-target=subtree')\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n # Look for l3extSubnet\n found = False\n for item in resp.json()['imdata']:\n if 'l3extSubnet' in item:\n if item['l3extSubnet']['attributes']['ip'] == ip + '/32':\n found = True\n break\n if not found:\n return False\n return True",
"def test_client_verification_retrieve(self):\n pass",
"def test_one_contract(self):\n correct_contract = factories.ProjectContract(\n projects=self.projects, status=ProjectContract.STATUS_CURRENT)\n response = self._get()\n self.assertEqual(response.status_code, 200)\n contracts = response.context['contracts']\n self.assertEqual(len(contracts), 1)\n self.assertTrue(correct_contract in contracts)",
"def verify(self, response):",
"def ForgetPeerContract(self,name,url):\n if (self.postedContractDatabase.has_key(name)):\n if ('none' != url.lower()):\n r = self.postedContractDatabase[name].UnpostToContractServer(\n url)\n if ('OK' != string.strip(r)):\n raise UnpostContractFailed, (name,url,r)\n del self.postedContractDatabase[name]\n else:\n dibs_logger.Logger.PrintAndLog('No contract named ' + `name` +\n ' exists.',dibs_logger.LOG_WARNING)",
"def RequiredContract(self) -> _n_0_t_1:",
"def test_kyc_get_legal_share_holder_natural(self):\n pass",
"def verify_remote_site_has_policy(self, tenant_name, l3out_name, instp_name):\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n\n query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json' % (tenant_name, l3out_name, instp_name))\n resp = site2.get(query)\n self.assertTrue(resp.ok)\n\n found = False\n for item in resp.json()['imdata']:\n if 'l3extInstP' in item:\n found = True\n break\n if not found:\n return False\n return True",
"def req_CHECKPRESENT(self, key):\n # TODO: so we need to maintain mapping from urls to keys. Then\n # we could even store the filename within archive\n # Otherwise it is unrealistic to even require to recompute key if we\n # knew the backend etc\n lgr.debug(\"VERIFYING key %s\" % key)\n akey, afile = self._get_akey_afile(key)\n if self.get_contentlocation(akey):\n self.send(\"CHECKPRESENT-SUCCESS\", key)\n else:\n # TODO: proxy the same to annex itself to verify check for archive.\n # If archive is no longer available -- then CHECKPRESENT-FAILURE\n self.send(\"CHECKPRESENT-UNKNOWN\", key)",
"def test_kyc_post_legal_share_holder(self):\n pass",
"def verify_request(self, request, client_address):\n\t\treturn True",
"def test_contracts_updated(self):\n assert self.agent_config.contracts == {self.new_contract_id}",
"def verify():",
"def validate(self,value):\n if self.contract:\n try:\n self.contract.check(value)\n except ContractNotRespected as cnr:\n raise Exception(\"Invalid value for \"+self.name+\": \"+cnr.error)",
"def test_kyc_get_legal_share_holders(self):\n pass",
"def test_issuance(logger, dbsession, web3, private_key_hex):\n\n # Creating transactions\n txs = deploy_token_contracts(logger, dbsession, \"testing\", web3,\n ethereum_abi_file=None,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n name=\"Moo Corp\",\n symbol=\"MOO\",\n amount=9999,\n transfer_restriction=\"unrestricted\")\n assert len(txs) == 5\n\n # Send transactions to emphmereal test chain\n txs = broadcast(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n )\n assert len(txs) == 5\n\n # Check they got mined\n txs = update_status(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n )\n assert len(txs) == 5\n for tx in txs: # type: PreparedTransaction\n assert tx.result_transaction_success\n\n token_address = txs[0].contract_address\n\n # Check that we can view the token status\n status = contract_status(logger,\n dbsession,\n \"testing\",\n web3,\n ethereum_abi_file=None,\n ethereum_private_key=private_key_hex,\n ethereum_gas_limit=None,\n ethereum_gas_price=None,\n token_contract=token_address,\n )\n\n assert status[\"name\"] == \"Moo Corp\"\n assert status[\"totalSupply\"] == 9999 * 10**18\n assert status[\"totalSupply\"] == status[\"broadcastBalance\"]",
"def _verify(self):\n pass",
"def verify(self):\r\n pass",
"def part_reject_contract(cid):\r\n take_passwd = request.values.get('take_passwd', '')\r\n with engine.with_session() as ss:\r\n cur_contract = ss.query(LxContract).get(cid)\r\n if not sha256_crypt.verify(take_passwd, cur_contract.take_passwd):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'NO_AUTH_CUR_CONTRACT']})\r\n if cur_contract.stage != constants.CONTRACT_STAGE['OWNER_CONFIRM']:\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'CONTRACT_STAGE_ERROR']})\r\n # TODO reject contract send message to the owner and set stage to new\r\n # contract\r\n cur_contract.update({'stage': constants.CONTRACT_STAGE['NEW_CONTRACT']})\r\n\r\n return jsonify({'success': True, 'data': 1})",
"def verify(self):\n pass",
"def verify(self):\n pass",
"def verify_vn_in_api_server(self):\n self.api_verification_flag = True\n self.api_s_vn_obj = self.api_s_inspect.get_cs_vn(\n domain=self.domain_name, project=self.project_name,\n vn=self.vn_name, refresh=True)\n if not self.api_s_vn_obj:\n self.logger.debug(\"VN %s is not found in API-Server\" %\n (self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n if self.api_s_vn_obj['virtual-network']['uuid'] != self.uuid:\n self.logger.warn(\n \"VN Object ID %s in API-Server is not what was created\" % (self.uuid))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n subnets = list()\n for ipam in self.api_s_vn_obj['virtual-network']['network_ipam_refs']:\n subnets.extend(ipam['attr']['ipam_subnets'])\n for vn_subnet in self.vn_subnets:\n subnet_found = False\n vn_subnet_cidr = str(IPNetwork(vn_subnet['cidr']).ip)\n for subnet in subnets:\n if subnet['subnet']['ip_prefix'] == vn_subnet_cidr:\n subnet_found = True\n if not subnet_found:\n self.logger.warn(\n \"VN Subnet IP %s not found in API-Server for VN %s\" %\n (vn_subnet_cidr, self.vn_name))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n # end for\n self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets(\n vn_id=self.uuid)\n if not self.api_s_route_targets:\n errmsg = \"Route targets not yet found in API-Server for VN %s\" % self.vn_name\n self.logger.error(errmsg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.rt_names = self.api_s_inspect.get_cs_rt_names(\n self.api_s_route_targets)\n\n if not self.rt_names:\n self.logger.debug(\n 'RT names not yet present for VN %s', self.vn_name)\n return False\n\n if self.rt_number:\n if not any(item.endswith(self.rt_number) for item in self.rt_names):\n self.logger.debug('RT %s is not found in API Server RT list %s ' %(\n self.rt_number, self.rt_names))\n self.api_verification_flag = self.api_verification_flag and False\n return False\n\n self.api_s_routing_instance = self.api_s_inspect.get_cs_routing_instances(\n vn_id=self.uuid)\n if not self.api_s_routing_instance:\n msg = \"Routing Instances not found in API-Server for VN %s\" % self.vn_name\n self.logger.warn(msg)\n self.api_verification_flag = self.api_verification_flag and False\n return False\n self.ri_ref = self.api_s_routing_instance['routing_instances'][0]['routing-instance']\n if not self.verify_network_id():\n return False\n self.api_verification_flag = self.api_verification_flag and True\n self.logger.info(\"Verifications in API Server for VN %s passed\" %\n (self.vn_name))\n return True",
"def test_search_contract(self):\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n # asserts that there aren't any contracts in changelist\n self.assertNotIn('table', content)\n self.assertIn(\n '<a href=\"/admin/contracts/contract/add/\" class=\"addlink\">',\n content)\n\n # creates two contracts\n payload = self.contract_one_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n payload = self.contract_two_data\n payload['tenant'] = payload['tenant'].id\n payload['property'] = payload['property'].id\n response = self.client.post(\n '/admin/contracts/contract/add/', payload, follow=True)\n self.assertEqual(response.status_code, 200)\n\n # checks both of them show up in listing\n response = self.client.get('/admin/contracts/contract/')\n content = response.content\n self.assertIn('table', content)\n self.assertIn('Sept. 25, 2017', content)\n self.assertIn('Sept. 25, 2018', content)\n self.assertIn(str(self.contract_one_data['rent']), content)\n self.assertIn('Oct. 22, 2017', content)\n self.assertIn('Sept. 22, 2018', content)\n self.assertIn(str(self.contract_two_data['rent']), content)\n\n # searches for contract\n contract = Contract.objects.get(\n property=self.contract_one_data['property'])\n\n contract_two = Contract.objects.get(\n property=self.contract_two_data['property'])\n response = self.client.get(\n '/admin/contracts/contract/?q={}'.format(\n contract.property.city))\n content = response.content\n self.assertIn('table', content)\n self.assertIn(contract.tenant.get_full_name(), content)\n self.assertIn(contract.property.__unicode__(), content)\n self.assertNotIn(contract_two.tenant.get_full_name(), content)\n self.assertNotIn(contract_two.property.__unicode__(), content)",
"def test_client_nationlity_retrieve(self):\n pass",
"def test_get_and_has__name_and_address(network, example_config):\n addresses = ContractHandler.get_contracts_addresses(\n network, example_config.address_file\n )\n target_address = addresses[\"DTFactory\"]\n\n contract = ContractHandler.get(\"DTFactory\", target_address)\n assert \"createToken\" in str(contract.abi)\n assert contract.address == addresses[\"DTFactory\"]\n\n assert ContractHandler.has(\"DTFactory\", target_address)\n assert not ContractHandler.has(\"foo name\", \"foo address\")\n assert not ContractHandler.has(\"foo name\", contract.address)\n assert not ContractHandler.has(\"DTFactory\", \"foo address\")",
"def verify(self):",
"def test_get(self):\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get',\n kwargs={'secret': REMOTE_SITE_SECRET},\n )\n )\n self.assertEqual(response.status_code, 200)\n expected = self.remote_api.get_source_data(self.target_site)\n response_dict = json.loads(response.content.decode('utf-8'))\n self.assertEqual(response_dict, expected)"
]
| [
"0.56794",
"0.5660135",
"0.56169105",
"0.56155354",
"0.5591733",
"0.5438814",
"0.54283285",
"0.5421029",
"0.5370657",
"0.532726",
"0.53145826",
"0.5304276",
"0.53038454",
"0.52644545",
"0.52625453",
"0.5232082",
"0.5208109",
"0.52033883",
"0.52013546",
"0.5180896",
"0.51710624",
"0.51629454",
"0.5157466",
"0.5157466",
"0.5149785",
"0.5133139",
"0.5118787",
"0.5104792",
"0.5075038",
"0.50631833"
]
| 0.6888689 | 0 |
Verify that the remote site has the policy | def verify_remote_site_has_policy(self, tenant_name, l3out_name, instp_name):
site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)
resp = site2.login()
self.assertTrue(resp.ok)
query = ('/api/mo/uni/tn-%s/out-%s/instP-%s.json' % (tenant_name, l3out_name, instp_name))
resp = site2.get(query)
self.assertTrue(resp.ok)
found = False
for item in resp.json()['imdata']:
if 'l3extInstP' in item:
found = True
break
if not found:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify(self):\n if self.geturl():\n return True\n return False",
"def test_basic_remove_policy(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(4)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg2'))\n\n config = self.create_site_config()\n self.write_config_file(config, args)\n collector.reload_config()\n\n time.sleep(4)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg2'))",
"def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))",
"def mwa_available():\n try:\n urllib2.urlopen(pref('ServerURL'), timeout=1)\n return True\n except urllib2.HTTPError, e:\n if str(e.code) == \"401\":\n return True\n else:\n return False\n except urllib2.URLError as err: \n return False",
"def test_1_privacy(self):\n response = self.client.get(reverse('privacy-policy'), follow=True)\n self.assertEqual(response.status_code, 200)",
"def check_vulnerability(self):\n\t\tpass",
"def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text",
"def check_for_webgate():\n\n http_conf_ORIG='/opt/WebSphere/HTTPServer/conf/httpd.conf.ORIG'\n\n if not os.path.exists(http_conf_ORIG):\n return False\n else:\n return True",
"def check_if_can_fetch(self, url, useragent=\"*\"):\n logger.debug(\"Checking if can fetch %s\" % url)\n return self.rp.can_fetch(useragent=useragent, url=url)",
"def check(self,):\n self.is_valid_according_policy()",
"def test_is_revoked_target(self):\n self.site.mode = SITE_MODE_SOURCE\n self.site.save()\n self.assertEqual(self.project.is_revoked(), False)\n self.remote_project.level = SODAR_CONSTANTS['REMOTE_LEVEL_REVOKED']\n self.remote_project.save()\n self.assertEqual(self.project.is_revoked(), True)",
"def proxy_check(self, proxy):",
"def check_url(url: str) -> bool:\n try:\n potential_error = driver.find_element_by_xpath(\"/html/body/div[5]/div/div/div[1]/div/div/div/section/div[2]/div\").text\n if '403' in potential_error:\n return True\n except:\n return False",
"def acceptPolicy(self):\n if not self.__loaded:\n self.__load()\n \n return self.__acceptCookies",
"def _can_ping_url(self, url, headers):\n try:\n self.http_request(url, \"GET\", \"\", headers, timeout=.75)\n return True\n except:\n return False",
"def test_viewPrivacyPolicyPage(self):\r\n print('========================================================================')\r\n print('Test for check redirect on PrivacyPolicy page after link PrivacyPolicy click')\r\n #Load Registrtion page\r\n self.reg_page.open_registration_page()\r\n driver = self.reg_page.driver\r\n\r\n #cheks if right title\r\n assert self.reg_page.is_title_matches(), \"Registration title page doesn't match\"\r\n\r\n self.reg_page.click_privacyPolicy_lnk()\r\n ppolicy_page = page_PrivacyPolicy.Page_PrivacyPolicy(driver)\r\n\r\n driver.get(ppolicy_page.PPOLICY_URL)\r\n wait = WebDriverWait(driver, 20)\r\n element = wait.until(EC.title_contains('Privacy Policy'))\r\n assert ppolicy_page.get_ppolicy_title().find(\"Privacy Policy\") != -1, \"Privacy Policy title page doesn't match\"\r\n\r\n print('--------- SUCCESS test_viewPrivacyPolicyPage-----------')\r\n driver.quit()",
"def download_allowed(self, url, scheme, netloc):\n robot = urllib.robotparser.RobotFileParser('%s://%s/%s' % (scheme, netloc, config.ROBOTS))\n try:\n robot.read()\n except ValueError:\n raise urllib.error.URLError('<urlopen error no protocol given>')\n\n return robot.can_fetch(config.USER_AGENT, url)",
"def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']",
"def policy(agent):",
"def _verify_page(self):",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def test_has_permission(self):\n self.assertStatusCode(self.url, 200)",
"def is_cookie_policy_accepted(self, request):\n cookie_policy = request.COOKIES.get(self.cookie_name, '')\n try:\n cookie_policy = json.loads(cookie_policy)\n return isinstance(cookie_policy, dict) and cookie_policy.get('usage') is True\n except ValueError:\n return False",
"def passes(self, request: PolicyRequest) -> PolicyResult:\n raise PolicyException()",
"def check_mitm_status_page(self, check_url):\n response = requests.get(check_url)\n if response.status_code == 200:\n return response\n else:\n sys.exit(2)",
"def test_59_help_policy(self):\r\n url = \"/help/cookies-policy\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a TOS page\"\r\n assert \"uses cookies\" in res.data, err_msg",
"def check_site(site):\n return _READY(site)",
"def verify(self, response):"
]
| [
"0.6503222",
"0.62608546",
"0.61887795",
"0.6128206",
"0.6093805",
"0.6025671",
"0.6003396",
"0.5899015",
"0.5826807",
"0.57733697",
"0.5753594",
"0.57472557",
"0.5745977",
"0.5728767",
"0.5715093",
"0.57131964",
"0.5710037",
"0.5691957",
"0.56792384",
"0.567788",
"0.5664685",
"0.5664685",
"0.5664685",
"0.5664685",
"0.5620961",
"0.5604168",
"0.55681765",
"0.5548358",
"0.5487239",
"0.5469218"
]
| 0.6775196 | 0 |
Get a mock of the command line arguments | def get_args():
args = mock.Mock()
args.debug = None
args.generateconfig = None
args.config = 'doesntmatter'
return args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mock_get_args():\n\n return MagicMock()",
"def test_arg_parse(test):\n print(test)\n testargs = ['']\n with mock.patch.object(sys, 'argv', testargs):\n assert gc.arg_parse().interactive",
"def test_parsing_args(event_loop) -> None:\n called = False\n\n async def mock_func(hass, provider, args2):\n \"\"\"Mock function to be called.\"\"\"\n nonlocal called\n called = True\n assert provider.hass.config.config_dir == \"/somewhere/config\"\n assert args2 is args\n\n args = Mock(config=\"/somewhere/config\", func=mock_func)\n\n with patch(\"argparse.ArgumentParser.parse_args\", return_value=args):\n script_auth.run(None)\n\n assert called, \"Mock function did not get called\"",
"def test_prelim_opts_handles_empty(application):\n irrelevant_args = ['myexe', '/path/to/foo']\n with mock.patch.object(sys, 'argv', irrelevant_args):\n opts, args = application.parse_preliminary_options([])\n\n assert args == []",
"def test_main_optional_args(self):\n args = [\"in_toto_keygen.py\"]\n password = \"123456\"\n with patch.object(sys, 'argv', args + [\"-p\", \"bob\"]), \\\n patch(\"getpass.getpass\", return_value=password), self.assertRaises(\n SystemExit):\n in_toto_keygen_main()\n with patch.object(sys, 'argv', args + [\"-p\", \"bob\", \"3072\"]), \\\n patch(\"getpass.getpass\", return_value=password), self.assertRaises(\n SystemExit):\n in_toto_keygen_main()",
"def test_main_arguments():\n args = argparse.Namespace(url=RANDOM_URL,\n username=RANDOM_USERNAME,\n password=RANDOM_PASSWORD,\n tenantcode=RANDOM_TENANTCODE)\n result = Config(\"wso_args.json\").main(args)\n\n assert result is True",
"def test_parse_arguments_default(self):\n self.assertEqual(self.args.python, sys.executable)\n self.assertEqual(self.args.pip_packages, [])\n self.assertEqual(self.args.number_of_threads, multiprocessing.cpu_count())\n self.assertEqual(self.args.cmake_installer,\n CMAKE_INSTALLER_URL_BY_HOST_PLATFORM.get(sys.platform))\n self.assertEqual(self.args.cmake_source_project_root,\n os.path.join(os.getcwd(), 'git', 'falken'))\n self.assertEqual(self.args.cmake_copybara_variable, 'FALKEN_DIR')\n self.assertIsNone(self.args.cmake_configure_args)\n self.assertEqual(self.args.cmake_generator,\n cmake_runner.CMakeRunner.default_generator())\n self.assertEqual(\n self.args.cmake_target_architecture,\n cmake_runner.CMakeRunner.default_architecture(\n cmake_runner.CMakeRunner.default_generator()))\n self.assertEqual(self.args.cmake_build_dir,\n os.path.join(os.getcwd(), 'build'))\n self.assertEqual(self.args.cmake_build_configs, CMAKE_DEFAULT_BUILD_CONFIGS)\n self.assertIsNone(self.args.cmake_package_configs)\n self.assertEqual(self.args.cmake_package_generator, 'ZIP')\n self.assertIsNone(self.args.cmake_test_regex)\n self.assertEqual(self.args.output_dir, 'output')\n self.assertIsNone(self.args.copy_artifacts)\n self.assertIsNone(self.args.zip_artifacts)",
"def test_args(self):\n parser = argparse.ArgumentParser(\n prog=\"sysbottle\", description=\"sysbottle is parsed\"\n )\n subparsers = parser.add_subparsers()\n sysbottle.build(subparsers)\n args = parser.parse_args(\n [\n \"sysbottle\",\n \"abc.txt\",\n \"-c\",\n \"90\",\n \"-q\",\n \"1\",\n \"-d\",\n \"sda\",\n \"-i\",\n \"5\",\n \"-t\",\n \"3\",\n ]\n )\n self.assertTrue(hasattr(args, \"file\"))\n self.assertTrue(hasattr(args, \"cpu\"))\n self.assertTrue(hasattr(args, \"diskQ\"))\n self.assertTrue(hasattr(args, \"disks\"))\n self.assertTrue(hasattr(args, \"iowait\"))\n self.assertTrue(hasattr(args, \"throughput\"))",
"def test_with_command_line_arguments(self, arguments):\n fixed_arguments = self.get_argument_string(arguments)\n result = self.run(\n arguments=fixed_arguments,\n timeout=self.full_timeout,\n use_fresh_profile=True)\n return self._handle_test_result(result)",
"def test_main_named_args():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.main(argv=['-r', 'baz.hex'])\n mock_flash.assert_called_once_with(path_to_python=None,\n paths_to_microbits=[],\n path_to_runtime='baz.hex',\n minify=False,\n keepname=False)",
"def test_main_with_explicitly_passed_argument_as_string(mocker):\n mocker.patch.object(demisto, 'args',\n return_value={'entry_id': 'err_entry_id_1, err_entry_id_2, std_entry_id_1'})\n mocker.patch.object(demisto, 'executeCommand', side_effect=ERROR_ENTRIES)\n demisto_args = mocker.spy(demisto, 'args')\n demisto_results = mocker.spy(demisto, 'results')\n\n main()\n\n demisto_args.assert_called_once()\n expected_error_msgs = ['This is the error message 1', 'This is the error message 2']\n expected_results = CommandResults(\n readable_output='\\n'.join(expected_error_msgs),\n outputs_prefix='ErrorEntries',\n outputs=expected_error_msgs,\n raw_response=expected_error_msgs,\n ).to_context()\n demisto_results.assert_called_once_with(expected_results)",
"def test_main(cmd_map, argv, action, debug, alias):\n cmd_map.return_value = mock.Mock()\n app.main(argv)\n cmd_map.assert_called_once_with(argparse.Namespace(alias=alias, cmd_args=[], color=False, action=action, debug=debug))",
"def test_arguments(self):\n args = []\n def main(reactor, x, y, z):\n args.extend((x, y, z))\n return defer.succeed(None)\n r = _FakeReactor()\n exitError = self.assertRaises(\n SystemExit, task.react, main, [1, 2, 3], _reactor=r)\n self.assertEqual(0, exitError.code)\n self.assertEqual(args, [1, 2, 3])",
"def get_cli_arguments(self):\n pass",
"def test_build_option_parser(self):\n usage = \"Something\"\n epilog = \"Something\"\n argparse.ArgumentParser = mock.Mock()\n parser = cmd_utils.build_option_parser(usage=usage, epilog=epilog,)\n argparse.ArgumentParser.assert_called_with(\n usage=usage, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog,\n )\n assert parser.add_argument.call_count == 5\n\n args = [call[1] for call in parser.add_argument.mock_calls]\n expected = [\n (\"--version\",),\n (\"-v\", \"--verbose\"),\n (\"--server\",),\n (\"--cluster_name\",),\n (\"-s\", \"--save\"),\n ]\n assert args == expected\n\n defaults = [call[2].get(\"default\") for call in parser.add_argument.mock_calls]\n assert defaults == [None, None, None, None, None]",
"def test_cli_args():\n expected = dict(\n paths=[\"path1\", \"path2\"],\n exclude=[\"file*.py\", \"dir/\"],\n ignore_decorators=[\"deco1\", \"deco2\"],\n ignore_names=[\"name1\", \"name2\"],\n make_whitelist=True,\n min_confidence=10,\n sort_by_size=True,\n verbose=True,\n )\n result = _parse_args(\n [\n \"--exclude=file*.py,dir/\",\n \"--ignore-decorators=deco1,deco2\",\n \"--ignore-names=name1,name2\",\n \"--make-whitelist\",\n \"--min-confidence=10\",\n \"--sort-by-size\",\n \"--verbose\",\n \"path1\",\n \"path2\",\n ]\n )\n assert isinstance(result, dict)\n assert result == expected",
"def test_cli_with_test_args(\n config,\n):\n testargs = [\n \"yessssms\",\n \"--test\",\n \"-l\",\n \"06641234567\",\n \"-p\",\n \"passw0rd\",\n \"-t\",\n \"+43676564736\",\n ]\n with mock.patch.object(sys, \"argv\", testargs):\n with requests_mock.Mocker() as m:\n m.register_uri(\n \"POST\",\n _LOGIN_URL,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": _KONTOMANAGER_URL},\n )\n m.register_uri(\"GET\", _KONTOMANAGER_URL, status_code=200)\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n _SMS_FORM_URL,\n status_code=200,\n text=TEST_FORM_TOKEN_SAMPLE,\n )\n m.register_uri(\n \"POST\",\n _SEND_SMS_URL,\n status_code=200,\n text=\"<h1>Ihre SMS wurde erfolgreich \" + \"verschickt!</h1>\",\n )\n m.register_uri(\"GET\", _LOGOUT_URL, status_code=200)\n val = CLI().exit_status\n assert val == 0",
"def test_cli_with_version_arg(config, capsys):\n testargs = [\"yessssms\", \"--version\"]\n with mock.patch.object(sys, \"argv\", testargs):\n CLI()\n captured = capsys.readouterr()\n assert captured.out == \"yessssms \" + VERSION + \"\\n\"",
"def test_main_required_args(self):\n args = [\"in_toto_keygen.py\"]\n\n with patch.object(sys, 'argv', args + [\"bob\"]), \\\n self.assertRaises(SystemExit):\n in_toto_keygen_main()",
"def test_no_options(self):\n args = mock.Mock()\n args.debug = None\n args.generateconfig = None\n args.config = None\n with mock.patch('sys.stdout', new=StringIO()) as fake_out:\n execute_tool(args)\n self.assertEqual(fake_out.getvalue(), '%% No configuration file given.\\n')",
"def test_cli_with_no_arg(config, capsys):\n testargs = [\"yessssms\"]\n with mock.patch.object(sys, \"argv\", testargs):\n CLI()\n captured = capsys.readouterr()\n assert \"usage: yessssms \" in captured.out",
"def test_parse_arguments2():\n args = ['--file', 'data.csv', '--debug']\n parsed_args = parse_arguments.parse_arguments(args)\n assert parsed_args.file == 'data.csv'\n assert parsed_args.logging_level == logging.DEBUG",
"def test_parse_custom_arguments(self):\n arguments = build_cmake_project.parse_arguments([\n '--cmake_package_generator=7Z', '--cmake_configure_args=Xcode',\n '--number_of_threads=7'\n ])\n self.assertEqual(arguments.python, sys.executable)\n self.assertEqual(arguments.pip_packages, [])\n self.assertEqual(arguments.number_of_threads, 7)\n self.assertEqual(arguments.cmake_installer,\n CMAKE_INSTALLER_URL_BY_HOST_PLATFORM.get(sys.platform))\n self.assertEqual(arguments.cmake_source_project_root,\n os.path.join(os.getcwd(), 'git', 'falken'))\n self.assertEqual(arguments.cmake_copybara_variable, 'FALKEN_DIR')\n self.assertEqual(arguments.cmake_configure_args, ['Xcode'])\n self.assertEqual(arguments.cmake_generator,\n cmake_runner.CMakeRunner.default_generator())\n self.assertEqual(\n arguments.cmake_target_architecture,\n cmake_runner.CMakeRunner.default_architecture(\n cmake_runner.CMakeRunner.default_generator()))\n self.assertEqual(arguments.cmake_build_dir,\n os.path.join(os.getcwd(), 'build'))\n self.assertEqual(arguments.cmake_build_configs, CMAKE_DEFAULT_BUILD_CONFIGS)\n self.assertIsNone(arguments.cmake_package_configs)\n self.assertEqual(arguments.cmake_package_generator, '7Z')\n self.assertIsNone(arguments.cmake_test_regex)\n self.assertEqual(arguments.output_dir, 'output')\n self.assertIsNone(arguments.copy_artifacts)\n self.assertIsNone(arguments.zip_artifacts)",
"def get_arguments():\n parser = argparse.ArgumentParser(\n description=\"pilight2mqtt: Translate pilight events to MQTT.\")\n parser.add_argument('--version', action='version', version=__version__)\n parser.add_argument(\n '--mqtt-server',\n default='localhost',\n help='Address of the MQTT server to talk to.')\n parser.add_argument(\n '--mqtt-port',\n default=1883,\n type=int,\n help='Port of the MQTT server to talk to.')\n parser.add_argument(\n '--mqtt-topic',\n default='PILIGHT',\n help='MQTT topic to use.')\n parser.add_argument(\n '--mqtt-username',\n default=None,\n help='MQTT username for authentication.')\n parser.add_argument(\n '--mqtt-password',\n default=None,\n help='MQTT password for authentication.')\n parser.add_argument(\n '--pilight-server',\n default=None,\n help=textwrap.dedent('''\\\n Set the address of the pilight server to use.\n If not specified will try to auto discover'''))\n parser.add_argument(\n '--pilight-port',\n default=5001,\n type=int,\n help=textwrap.dedent('''\\\n Port of the pilight server.\n Only used when pilight-server is also specified'''))\n parser.add_argument(\n '--debug',\n action='store_true',\n help='Start pilight2mqtt in debug mode')\n parser.add_argument(\n '--verbose',\n action='store_true',\n help='Start pilight2mqtt in verbose mode')\n parser.add_argument(\n '--pid-file',\n metavar='path_to_pid_file',\n default=None,\n help='Path to PID file useful for running as daemon')\n if os.name == \"posix\":\n parser.add_argument(\n '--daemon',\n action='store_true',\n help='Run pilight2mqtt as daemon')\n\n arguments = parser.parse_args()\n if os.name != \"posix\" or arguments.debug:\n arguments.daemon = False\n\n return arguments",
"def test_parse_arguments3():\n args = ['--file', 'data.csv', '--confidential']\n parsed_args = parse_arguments.parse_arguments(args)\n assert parsed_args.confidential",
"def test_process_args_fancy(self):\n mocked_args = Mock(spec=Namespace)\n mocked_args.colorize = False\n mocked_args.fancy = False\n mocked_args.reverse = True\n settings = Settings(0)\n settings |= Settings.REVERSE\n output = process_settings_from_args(mocked_args)\n self.assertEqual(settings, output)",
"def test_0_args(library_db):\n import argparse\n\n namespace = app.main([])\n assert namespace.__class__ == argparse.Namespace",
"def test_remote_inner_argv(pytester: pytest.Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import sys\n\n def test_argv():\n assert sys.argv == [\"-c\"]\n \"\"\"\n )\n result = pytester.runpytest(\"-n1\")\n assert result.ret == 0",
"def test_with_explicit_sample_args(self):\n test_dict = CliArgs('sample', ['-a', '26', '-s', 'somefile', '-n', '-u', 'foo', '-v']).__dict__\n self.assertEqual('foo', test_dict['user'])\n self.assertEqual(1, test_dict['verbosity'])\n self.assertEqual('26', test_dict['analyzer_profile'])",
"def test_cmdlineproc_test2():\n\n parameters = {\n \"debug\": False,\n \"disconnect\": False,\n \"executable\": \"\",\n \"executableargs\": \"\",\n \"hosts\": \"\",\n \"job\": \"\",\n \"jobname\": \"\",\n \"log\": \"\",\n \"recover\": \"\",\n \"resource\": \"\",\n \"replicates\": \"\",\n \"verbose\": False\n }\n\n commandlineargs = [\"-about\"]\n\n longbowargs = _commandlineproc(ALLLONGBOWARGS, commandlineargs, parameters)\n\n assert parameters[\"executable\"] == \"\"\n assert parameters[\"executableargs\"] == \"\"\n assert longbowargs == [\"-about\"]"
]
| [
"0.78285116",
"0.73862106",
"0.693358",
"0.6824385",
"0.67919064",
"0.6790265",
"0.67393297",
"0.66914344",
"0.6677997",
"0.6621044",
"0.6559906",
"0.6548342",
"0.65294415",
"0.6523241",
"0.65163857",
"0.6500896",
"0.64581484",
"0.64440006",
"0.64237297",
"0.6401593",
"0.6316826",
"0.63160604",
"0.6312048",
"0.6292821",
"0.62824154",
"0.62456",
"0.62405986",
"0.6237155",
"0.6232878",
"0.6224431"
]
| 0.8090608 | 0 |
Test basic MAC move | def test_basic_mac_move(self):
args = self.get_args()
self.write_config_file(self.create_config_file(), args)
execute_tool(args, test_mode=True)
ip = '3.4.3.4'
mac = '00:11:22:33:33:33'
self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',
'intersite-testsuite-app-epg'))
time.sleep(2)
self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')
time.sleep(2)
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg'))
mac = '00:11:22:33:44:44'
self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')
self.remove_endpoint('00:11:22:33:33:33', ip, 'intersite-testsuite', 'app', 'epg')
time.sleep(2)
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _mac_test(mac):\n\n\t\tif re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def test_prepare_mac_header(self):\n self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)\n self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)\n self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)",
"def isMACCommand(self):\n return self.payload.fport == 0",
"def update_mac(self, key, board):\n macpos_y, macpos_x = self.macpos\n\n offset_y, offset_x = self.arrows.get(key, (50, 0))\n if (macpos_y + offset_y, macpos_x + offset_x) in board:\n self.macpos = (macpos_y + offset_y, macpos_x + offset_x)",
"def setMAC( self, intf, mac ):\n result = self.cmd( 'ifconfig', intf, 'down' )\n result += self.cmd( 'ifconfig', intf, 'hw', 'ether', mac )\n result += self.cmd( 'ifconfig', intf, 'up' )\n return result",
"def change_mac(interface, mac):\r\n print(\"Changing MAC-address for \" + interface + \" to \" + mac)\r\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"down\"])\r\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"hw\", \"ether\", mac])\r\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"up\"])",
"def test_oldmac(args):\n print('============= Testing for Correctness (Old Mac) =============')\n return _replace_test(args, '\\r')",
"def test_cross_language_mac(self):\n alice_private = [\n 0x77, 0x07, 0x6D, 0x0A, 0x73, 0x18, 0xA5, 0x7D,\n 0x3C, 0x16, 0xC1, 0x72, 0x51, 0xB2, 0x66, 0x45,\n 0xDF, 0x4C, 0x2F, 0x87, 0xEB, 0xC0, 0x99, 0x2A,\n 0xB1, 0x77, 0xFB, 0xA5, 0x1D, 0xB9, 0x2C, 0x2A\n ]\n\n bob_key = \"3p7bfXt9wbTTW2HC7OQ1Nz+DQ8hbeGdNrfx+FG+IK08\"\n message = \"Hello world!\"\n extra_info = \"MAC\"\n expected_mac = \"2nSMTXM+TStTU3RUVTNSVVZUTlNWVlpVVGxOV1ZscFY\"\n\n sas_alice = Sas()\n sas_alice._create_sas(bytes(alice_private), 32)\n sas_alice.set_their_pubkey(bob_key)\n\n alice_mac = sas_alice.calculate_mac(message, extra_info)\n\n assert alice_mac == expected_mac",
"def random_mac():\n return '\"02:%02x:%02x:%02x:%02x:%02x\"' % (random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255),\n random.randint(0,255))",
"def test_add_macaddress(self):\n mac = '00:00:00:00:00:00'\n info = self.api.add_macaddress(mac, tags=['asd'])\n self.assertEqual(info['value'], mac)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_human_move_char(self):\n self.ri.return_value = 'c'\n assert False == self.T.human_move()",
"def test_r1t6(capsys):\n helper(\n capsys=capsys,\n terminal_input=['transfer', 'login', 'atm', 'logout', 'No'],\n intput_valid_accounts=['1234568'],\n expected_tail_of_terminal_output=['Thank you for using Quinterac, have a nice day!'],\n expected_output_transactions=['EOS 0000000 000 0000000 ***']\n )",
"def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)",
"def check(interface, mac):\n\tifconfig = sp.check_output(['sudo','ifconfig',interface]).decode()\n\tregexMax = re.compile(r'(\\w\\w:){5}\\w\\w')\n\tresult = regexMax.search(ifconfig)\n\tif not result == None and result.group() == mac:\n\t\tprint('Mac changed')\n\t\tprint('[+] '+interface+' --> '+mac)\n\telse:\n\t\tprint('[[[[!]]]] Faliour',result.group())",
"def test_set_self_address(self):\n print('### Testing set up address ###')\n node_id = \"001\" # node_id of the form of 3 chr string already verified in Nanomodem.py\n \n command = b'$A' + node_id.encode()\n self.serport.write(command)\n\n received_bytes = self.serport.readline()\n index = received_bytes.find(b'#A')\n #print(\"SET_ADDRESS len is \"+ str(len(received_bytes)) +\" and index is \"+str(index))\n\n if (index != -1) and (len(received_bytes) - index == 5 and received_bytes.decode()[1] == 'A'): \n # received_bytes[1] == b'A' as condition doesn't work because x = b'A' still stay b'A' and x[0] give 65 (the byte for A)\n #print(\"SET_ADDRESS A was spot on\")\n if received_bytes[1:4] == command[1:4]:\n node_id = received_bytes.decode()[2:5]\n print(\"SET_ADDRESS node is :\"+ node_id)\n print(\"set self address SUCCESS\")\n return True\n else: \n print(\"set self address FAILURE\")\n return False",
"def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)",
"def test_machine_get_tape(self):\n self.machine.add_state('0 ,R, ,R, ,R, a,N,!')\n self.machine.init_tape(' aba caba_caba caba ')\n assert self.machine.get_tape() == 'aba caba caba caba'",
"def cna(mac):\n return mock.Mock(spec=pvm_net.CNA, mac=mac, vswitch_uri='fake_href')",
"def get_mac(self) -> str:\n self.sendline(\"iw {} info\".format(self.iface_dut))\n # We are looking for MAC definition of STA\n # wdev 0x1\n # addr 96:4e:c9:cc:7a:2c\n # type managed\n self.expect(\"addr (?P<mac>..:..:..:..:..:..)\\r\\n\\t(type|ssid)\")\n return self.match.group('mac')",
"def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)",
"def test_two_presses(self):\n out = \"\"\n def cb():\n nonlocal out\n out += \"+\"\n run(MockSerial([b'FE', b'FE', b'FF', b'FE', b'FE', b'FF']), 10, [cb])\n self.assertEqual(out, \"++\")",
"def test_configured_mac(self, mock_discover, *args):\n mock_discover.return_value = [self.mock_insight_2, self.mock_insight]\n discover_event = Event()\n blk = EventWeMoDiscovery(discover_event)\n # Looking for the 2nd insight in the list only\n self.configure_block(blk, {'device_mac': 'mac'})\n blk.start()\n self.assertTrue(discover_event.wait(1))\n self.assertEqual(blk.device, self.mock_insight)\n blk.stop()\n\n # if the specified MAC isn't found discovery continues\n discover_event.clear()\n blk = EventWeMoDiscovery(discover_event)\n self.configure_block(blk, {'device_mac': 'other'})\n blk.start()\n # Wait some time but don't expect discover to actually finish\n self.assertFalse(discover_event.wait(0.2))\n self.assertIsNone(blk.device)\n self.assertTrue(blk._discovering)\n blk.stop()",
"def get_random_mac():\n\t\n\t# use the Dlink range\n\tmac = \"00:05:5D\"\n\t\n\tfor i in range(0,3):\n\t\tmac += \":%s\" % hex(random.randrange(0,256))[2:]\n\t\t\n\t\t\n\treturn mac",
"def test_unit_mac_address_decode(self):\n octet0 = 0xFF\n octet1 = 0xFE\n octet2 = 0xFB\n octet3 = 0xFA\n octet4 = 0xF7\n octet5 = 0xF6\n decode = MidniteClassicModbusRegisters.UNIT_MAC_ADDRESS['decode']\n registers = []\n registers.append((octet1 << 8) | octet0)\n registers.append((octet3 << 8) | octet2)\n registers.append((octet5 << 8) | octet4)\n expected = {\n 'mac_address': [hex(octet5),\n hex(octet4),\n hex(octet3),\n hex(octet2),\n hex(octet1),\n hex(octet0)]\n }\n self.assertDictEqual(expected, decode(registers))\n registers = ['A', 'B', 'C']\n self.assertRaises(TypeError, decode, registers)\n registers = []\n self.assertRaises(IndexError, decode, registers)",
"def mac_ntoa(mac):\n return '%.2x:%.2x:%.2x:%.2x:%.2x:%.2x' % tuple(map(ord, list(mac)))",
"def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))",
"def generate_mac():\n rand_str = generate_name(choices=\"0123456789abcdef\", length=12)\n return \":\".join(re.findall(\"..\", rand_str))",
"def isMAC(s):\n\n s = s.replace(':', '')\n if len(s) != 12: return 0\n for char in s:\n if re.compile('[a-zA-Z0-9]+').match(char) == None: return 0\n return 1",
"def send_mac(self) -> None:\n self.crypto.mac_gen(base64.b64decode(self.encrypted_data))\n\n iv = '' if self.crypto.iv is None else base64.b64encode(self.crypto.iv).decode()\n tag = '' if self.crypto.tag is None else base64.b64encode(self.crypto.tag).decode()\n nonce = '' if self.crypto.nonce is None else base64.b64encode(self.crypto.nonce).decode()\n\n message = {'type': 'MAC', 'data': base64.b64encode(self.crypto.mac).decode(), 'iv': iv, 'tag': tag, 'nonce': nonce}\n self._send(message)\n self.encrypted_data = ''",
"def test_computer_move():\n # Test if the computer will pick the move that will stop human player from\n # winning the game\n board = Board(*TEST_AGRU1)\n comp = Computer(board, COMP_DISK, HUMAN_DISK)\n comp.b.board = [\n [0, COMP_DISK, COMP_DISK, 0, 0],\n [HUMAN_DISK, HUMAN_DISK, HUMAN_DISK, 0, 0],\n ]\n comp.b.columns_list = [\n [COMP_DISK, HUMAN_DISK],\n [COMP_DISK, HUMAN_DISK],\n [HUMAN_DISK],\n [],\n [],\n ]\n comp.b.new_disk = (1, 0)\n assert comp.computer_move() == (MOVE1)\n\n # Test if the computer will pick the move that will make it win the game\n board = Board(*TEST_AGRU1)\n comp = Computer(board, COMP_DISK, HUMAN_DISK)\n comp.b.board = [\n [HUMAN_DISK, 0, HUMAN_DISK, HUMAN_DISK, 0],\n [HUMAN_DISK, 0, COMP_DISK, COMP_DISK, COMP_DISK],\n ]\n comp.b.columns_list = [\n [HUMAN_DISK, HUMAN_DISK],\n [],\n [HUMAN_DISK, COMP_DISK],\n [HUMAN_DISK, COMP_DISK],\n [],\n ]\n comp.b.new_disk = (MOVE2)\n assert comp.computer_move() == (1, 1)"
]
| [
"0.6172225",
"0.6131883",
"0.5994028",
"0.59358835",
"0.59154856",
"0.59146136",
"0.58304495",
"0.58213615",
"0.5789043",
"0.57643783",
"0.57119405",
"0.56152964",
"0.5574822",
"0.55708",
"0.555919",
"0.555294",
"0.55398077",
"0.54959714",
"0.54862624",
"0.54616684",
"0.54557806",
"0.54495794",
"0.54161423",
"0.541008",
"0.54066193",
"0.536457",
"0.536457",
"0.53557676",
"0.53549683",
"0.53462225"
]
| 0.7466434 | 0 |
Test remove one of multiple endpoints | def test_basic_remove_one_of_multiple_endpoint(self):
args = self.get_args()
config = self.create_config_file()
self.write_config_file(config, args)
execute_tool(args, test_mode=True)
time.sleep(2)
mac1 = '00:11:22:33:33:34'
ip1 = '3.4.3.5'
self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')
mac2 = '00:11:22:33:33:35'
ip2 = '3.4.3.6'
self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')
time.sleep(2)
self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',
'intersite-testsuite-app1-epg1'))
self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',
'intersite-testsuite-app2-epg2'))
self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')
self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',
'intersite-testsuite-app1-epg1'))
self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',
'intersite-testsuite-app2-epg2')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_basic_remove_one_of_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def _remove_endpoint(self, endpoint):\n logger.debug('')\n with self._endpoint_lock:\n count = len(self._endpoints)\n self._endpoints = [e for e in self._endpoints if e != endpoint]\n return (count != len(self._endpoints))",
"def test_admin_api_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/registration_tokens/new\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\n \"DELETE\", \"/_synapse/admin/v1/registration_tokens/abcd\"\n )\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/reset_password/foo\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/users/foo/login\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/account_validity/validity\")",
"def test_delete():\n\n start_ln = len(routes.routes['DELETE'])\n\n @delete('/s/foo')\n def foo_route(request):\n return 200, ''\n\n for path, fn in routes.routes['DELETE']:\n if fn == foo_route:\n found = (path, fn)\n assert found\n routes.routes['DELETE'].remove(found)\n assert len(routes.routes['DELETE']) == start_ln",
"def test_delete_namespaced_route(self):\n pass",
"def delete_endpoint(EndpointName=None):\n pass",
"def remove_endpoint_from_sipserver(self, endpoint: str) -> None:",
"def test_delete_collection_namespaced_route(self):\n pass",
"def removeEndpoint(self, endpoint):\n # If endpoint not recognized, returns False, else True\n self.__lockobj.acquire()\n retval = False\n if endpoint in self.__endpoints.keys():\n del self.__endpoints[endpoint]\n retval = True\n self.__lockobj.acquire()\n return retval",
"def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )",
"def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )",
"def test_remove_one(self):\n pass",
"def test_remove(self):\n pass",
"def delete_handler(event, context):\n delete_endpoint_config(event)",
"def test_device_management_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/delete_devices\")\n self.expect_unrecognized(\"DELETE\", \"/_matrix/client/v3/devices/{DEVICE}\")",
"def delete_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.endpoints.remove(exists)",
"def test_registration_endpoints_removed(self) -> None:\n self.expect_unrecognized(\n \"GET\", \"/_matrix/client/v1/register/m.login.registration_token/validity\"\n )\n # This is still available for AS registrations\n # self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/register\")\n self.expect_unrecognized(\"GET\", \"/_matrix/client/v3/register/available\")\n self.expect_unrecognized(\n \"POST\", \"/_matrix/client/v3/register/email/requestToken\"\n )\n self.expect_unrecognized(\n \"POST\", \"/_matrix/client/v3/register/msisdn/requestToken\"\n )",
"def test_get():\n\n start_ln = len(routes.routes['GET'])\n\n @get('/s/foo')\n def foo_route(request):\n return 200, ''\n\n for path, fn in routes.routes['GET']:\n if fn == foo_route:\n found = (path, fn)\n assert found\n routes.routes['GET'].remove(found)\n assert len(routes.routes['GET']) == start_ln",
"def test_ipam_services_delete(self):\n pass",
"def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()",
"def test_account_management_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/account/deactivate\")\n self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/account/password\")\n self.expect_unrecognized(\n \"POST\", \"/_matrix/client/v3/account/password/email/requestToken\"\n )\n self.expect_unrecognized(\n \"POST\", \"/_matrix/client/v3/account/password/msisdn/requestToken\"\n )",
"def testListEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Empty list\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n self.assertListEqual(json.loads(response), [])\n\n # Register some endpoints\n svc_regs = []\n for _ in range(3):\n # Register a service\n svc_regs.append(\n context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"}))\n\n # Request the list of endpoints\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n\n # Get all endpoints ID\n data = json.loads(response)\n local_uids = [endpoint.uid for endpoint in exporter.endpoints]\n servlet_uids = [item['uid'] for item in data]\n\n self.assertCountEqual(servlet_uids, local_uids)\n\n # Unregister them\n for svc_reg in svc_regs:\n # Unregister the service\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, response = self._http_get(\"/endpoints\")\n\n # Check result\n self.assertEqual(status, 200)\n\n # Get all endpoints ID\n data = json.loads(response)\n local_uids = [endpoint.uid for endpoint in exporter.endpoints]\n servlet_uids = [item['uid'] for item in data]\n\n self.assertCountEqual(servlet_uids, local_uids)",
"def test_post():\n\n start_ln = len(routes.routes['POST'])\n\n @post('/s/foo')\n def foo_route(request):\n return 200, ''\n\n for path, fn in routes.routes['POST']:\n if fn == foo_route:\n found = (path, fn)\n assert found\n routes.routes['POST'].remove(found)\n assert len(routes.routes['POST']) == start_ln",
"def test_remove(self):\n\n message = {\"method\": \"remove\",\n \"params\": {\"elem\": self.container_to_remove}}\n response = yield self._get_response(message)\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"remove\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_remove\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertNotIn(container_name, containers.keys(),\n \"Container has found\")"
]
| [
"0.79431224",
"0.7918341",
"0.7908843",
"0.74060154",
"0.732542",
"0.72361135",
"0.71684533",
"0.7119523",
"0.6881436",
"0.684713",
"0.64847803",
"0.64341676",
"0.64324373",
"0.64073634",
"0.6357826",
"0.631801",
"0.631801",
"0.6308663",
"0.63035923",
"0.62522364",
"0.6231188",
"0.6194335",
"0.6169089",
"0.6163743",
"0.616104",
"0.6107964",
"0.6090907",
"0.60682315",
"0.6059462",
"0.5994048"
]
| 0.8064927 | 0 |
Test add the endpoint | def test_basic_add_endpoint(self):
args = self.get_args()
config = self.create_config_file()
self.write_config_file(config, args)
execute_tool(args, test_mode=True)
time.sleep(2)
mac = '00:11:22:33:33:33'
ip = '3.4.3.4'
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n time.sleep(2)\n\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg2'))",
"def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(4)\n\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))",
"def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n mac3 = '00:11:22:33:33:36'\n ip3 = '3.4.3.7'\n self.add_endpoint(mac3, ip3, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))\n self.assertTrue(self.verify_remote_site_has_entry(mac3, ip3, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))",
"def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))",
"def add_endpoint_hit(db_session, endpoint, time, test, version, job_id):\n endpoint_id = db_session.query(Endpoint.id).filter(Endpoint.name == endpoint).first().id\n test_id = db_session.query(Test.id).filter(Test.name == test).first().id\n db_session.add(TestEndpoint(endpoint_id=endpoint_id, test_id=test_id, duration=time, app_version=version,\n travis_job_id=job_id))",
"def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])",
"def test_add(self):\n query_string = [('x', 56),\n ('y', 56)]\n response = self.client.open('/addition-api/1.0.0/add',\n method='GET',\n query_string=query_string)\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))",
"def test_add_url(self):\n url = 'http://test.com/'\n info = self.api.add_url(url, tags=['asd'])\n self.assertEqual(info['value'], url)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def test_admin_event_admin_add(self):\n response = self.client.get(\"/admin/appointment/event/add/\")\n self.assertEqual(response.status_code, 200)",
"def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"",
"def add_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if not exists:\n self.endpoints.append((endpoint, now()))",
"def test_add_virtual_service(self):\n pass",
"def test_endpoint_leading_slash(self, method):\n self._register_uri(method,\n uri=\"http://test.example.com/%s\" % self.test_endpoint)\n\n self.client = trovebox.Trovebox(host=\"http://test.example.com\",\n **self.test_oauth)\n response = GetOrPost(self.client, method).call(\"/\" + self.test_endpoint)\n self.assertIn(\"OAuth\", self._last_request().headers[\"authorization\"])\n self.assertEqual(response, self.test_data)\n self.assertEqual(self.client.last_url,\n \"http://test.example.com/%s\" % self.test_endpoint)\n self.assertEqual(self.client.last_response.json(), self.test_data)",
"def setup_with_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg1'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n return mac, ip",
"def testGrabEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Tell the servlet to get this endpoint\n grabbed_endpoint = self.servlet.grab_endpoint(\"localhost\", self.port,\n self.servlet_path,\n endpoint.uid)\n\n # Check endpoint values\n self.assertIsNot(grabbed_endpoint, endpoint)\n self.assertEqual(grabbed_endpoint, endpoint)\n\n # Unregister the service\n svc_reg.unregister()\n\n # Check the result\n self.assertIsNone(self.servlet.grab_endpoint(\"localhost\", self.port,\n self.servlet_path,\n endpoint.uid))\n\n # Test on an invalid host/port\n self.assertIsNone(self.servlet.grab_endpoint(\"localhost\", -1,\n self.servlet_path,\n endpoint.uid))",
"def add_endpoint_to_sipserver(self, endpoint: str, password: str) -> None:",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def add_endpoint(self, endpoint):\n name = endpoint.get_name()\n self._calls[name] = endpoint",
"def test_url_endpoint(self):\n url = url_for('create_user')\n assert url == '/users/create/'",
"def test_add_url_rule():\n\n application_services.add_url_rule('/tests/application/rule', view_func=mock_view_function,\n methods=HTTPMethodEnum.GET)"
]
| [
"0.80474126",
"0.75272715",
"0.75091356",
"0.7458578",
"0.7254947",
"0.72521925",
"0.71781117",
"0.70729285",
"0.7032229",
"0.70025855",
"0.6964659",
"0.67963433",
"0.67228585",
"0.6685355",
"0.6635145",
"0.6612205",
"0.6582083",
"0.65518564",
"0.6551546",
"0.6527288",
"0.65100354",
"0.6502431",
"0.64623255",
"0.64421624",
"0.634334",
"0.63421",
"0.6333676",
"0.63220567",
"0.6314105",
"0.6306727"
]
| 0.79640484 | 1 |
Test remove the endpoint | def test_basic_remove_endpoint(self):
args = self.get_args()
config = self.create_config_file()
self.write_config_file(config, args)
execute_tool(args, test_mode=True)
time.sleep(2)
mac = '00:11:22:33:33:33'
ip = '3.4.3.4'
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg'))
self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')
time.sleep(2)
self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app2-epg2'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app2-epg2'))",
"def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()",
"def remove_endpoint_from_sipserver(self, endpoint: str) -> None:",
"def delete_endpoint(EndpointName=None):\n pass",
"def _remove_endpoint(self, endpoint):\n logger.debug('')\n with self._endpoint_lock:\n count = len(self._endpoints)\n self._endpoints = [e for e in self._endpoints if e != endpoint]\n return (count != len(self._endpoints))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))",
"def delete_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.endpoints.remove(exists)",
"def test_delete_namespaced_route(self):\n pass",
"def removeEndpoint(self, endpoint):\n # If endpoint not recognized, returns False, else True\n self.__lockobj.acquire()\n retval = False\n if endpoint in self.__endpoints.keys():\n del self.__endpoints[endpoint]\n retval = True\n self.__lockobj.acquire()\n return retval",
"def delete_handler(event, context):\n delete_endpoint_config(event)",
"def test_admin_api_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/registration_tokens/new\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\n \"DELETE\", \"/_synapse/admin/v1/registration_tokens/abcd\"\n )\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/reset_password/foo\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/users/foo/login\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/account_validity/validity\")",
"def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )",
"def remove(self, *args, **kwargs):\n raise InvalidEndpointOperation(\n 'Not a valid operation on this endpoint.'\n )",
"def access_gemini_url_delete_method(context, endpoint):\n url = urljoin(context.gemini_api_url, endpoint)\n context.response = requests.delete(url)",
"def delete_endpoint_config(EndpointConfigName=None):\n pass",
"def test_no_endpoint(self):\n self.os_fixture.v3_token.remove_service('monitoring')\n conn = self._get_conn()\n # Monasca is not in the service catalog\n self.assertRaises(\n ks_exc.catalog.EndpointNotFound, getattr, conn, 'monitoring'\n )",
"def test_delete():\n\n start_ln = len(routes.routes['DELETE'])\n\n @delete('/s/foo')\n def foo_route(request):\n return 200, ''\n\n for path, fn in routes.routes['DELETE']:\n if fn == foo_route:\n found = (path, fn)\n assert found\n routes.routes['DELETE'].remove(found)\n assert len(routes.routes['DELETE']) == start_ln",
"def test_remove(self):\n pass",
"def test_delete_collection_namespaced_route(self):\n pass",
"def test_delete_deployment(self):\n pass",
"def test_DELETE(self):\n if not self.url:\n return\n response = self.client.delete(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def delete_endpoint(self, endpoint_id):\n raise exception.NotImplemented() # pragma: no cover",
"def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)",
"def test_account_management_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/account/deactivate\")\n self.expect_unrecognized(\"POST\", \"/_matrix/client/v3/account/password\")\n self.expect_unrecognized(\n \"POST\", \"/_matrix/client/v3/account/password/email/requestToken\"\n )\n self.expect_unrecognized(\n \"POST\", \"/_matrix/client/v3/account/password/msisdn/requestToken\"\n )",
"def test_delete_virtual_service(self):\n pass"
]
| [
"0.828536",
"0.79282093",
"0.7899661",
"0.7463046",
"0.74537635",
"0.74340045",
"0.7390489",
"0.7344435",
"0.73168343",
"0.7312396",
"0.7302149",
"0.71278733",
"0.70067316",
"0.6982379",
"0.6963426",
"0.68355227",
"0.67464614",
"0.67464614",
"0.6661923",
"0.6657329",
"0.66452014",
"0.65968573",
"0.65892345",
"0.6548691",
"0.65333223",
"0.6507727",
"0.65060407",
"0.65021616",
"0.6488165",
"0.64785284"
]
| 0.79375327 | 1 |
Create the export policy | def create_export_policy():
config = {
"export": {
"tenant": "intersite-testsuite",
"app": "app",
"epg": "epg",
"remote_epg": "intersite-testsuite-app-epg",
"remote_sites": [
{
"site": {
"name": "Site2",
"interfaces": [
{
"l3out": {
"name": "l3out",
"tenant": "intersite-testsuite"
}
}
]
}
}
]
}
}
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_export_policy(l3out_name):\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": l3out_name,\n \"tenant\": \"intersite-testsuite\"\n }\n }\n ]\n }\n }\n ]\n }\n }\n return export_policy",
"def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:\n return dict(super(cls, cls).get_export_policy(), **{\n 'rules': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'rule_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'default_value': base_models.EXPORT_POLICY.NOT_APPLICABLE\n })",
"def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:\n return dict(super(cls, cls).get_export_policy(), **{\n 'sender_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'sender_email': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'recipient_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'subject': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'html_body': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'sent_datetime': base_models.EXPORT_POLICY.NOT_APPLICABLE\n })",
"def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:\n return dict(super(cls, cls).get_export_policy(), **{\n 'recipient_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'recipient_email': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'sender_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'sender_email': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'intent': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'subject': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'html_body': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'sent_datetime': base_models.EXPORT_POLICY.NOT_APPLICABLE,\n 'email_hash': base_models.EXPORT_POLICY.NOT_APPLICABLE\n })",
"def export_policy(self):\n return self._export_policy",
"def export_policy_model(\n self,\n export_dir: str,\n policy_id: PolicyID = DEFAULT_POLICY_ID,\n onnx: Optional[int] = None,\n ) -> None:\n self.get_policy(policy_id).export_model(export_dir, onnx)",
"def get_export_policy(cls) -> Dict[str, base_models.EXPORT_POLICY]:\n return dict(super(cls, cls).get_export_policy(), **{\n 'value': base_models.EXPORT_POLICY.NOT_APPLICABLE\n })",
"def export_policy(self, path: Union[bytes, str]) -> str:\n path = _to_bytes_or_null(path)\n policy = ffi.new(\"char **\")\n ret = lib.Fapi_ExportPolicy(self._ctx, path, policy)\n _chkrc(ret)\n return ffi.string(_get_dptr(policy, lib.Fapi_Free)).decode()",
"def test_create_namespaced_policy(self):\n pass",
"def create_policy_request():\n return {\n 'public_key':\n r'BBLewg4VqLR38b38daE7Fj\\/uhr543uGrEpyoPFgmFZK6EZ9g2XdK\\/i65RrSJ6sJ96aXD3DJHY3Me2GJQO9\\/ifjE=',\n 'label':\n 'Integration Test Policy',\n 'operations': [{\n 'sensor_id': 10,\n 'action': 'SHARE',\n }, {\n 'sensor_id': 53,\n 'action': 'BIN',\n 'bins': [30.0, 60.0, 90.0]\n }, {\n 'sensor_id': 55,\n 'action': 'MOVING_AVG',\n 'interval': 300\n }]\n }",
"def create_policy(env, policy_type, policy_weights_file=None):\n input_size = env.observation_space.shape[0]\n output_size = env.action_space.shape[0]\n action_low = env.action_space.low\n action_high = env.action_space.high\n policy = policy_type(input_size=input_size,\n output_size=output_size,\n action_high=action_high,\n action_low=action_low)\n if policy_weights_file:\n policy.load_model(policy_weights_file)\n return policy",
"def create_policy(api_url, project_id, username, token, update_flag, validation_messages, json_files, scope, csv_flag,\n input_list):\n try:\n # policy loader log folder exists check\n log_path = '/opt/core/cache/tmp/policyloader_logs/'\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')\n log_filename = 'policyloader_' + timestamp\n my_file = open(log_path + log_filename, \"a\")\n\n # Print and write the log messages\n for message in validation_messages:\n my_file.write(\"%s\\n\" % message)\n\n success_policies = 0\n\n for metadata in json_files:\n # metadata Read\n json_file = open(metadata, 'r')\n file_name = list(metadata.split(\"/\"))\n file_name = file_name[-1]\n req_body = json.dumps(json_file.read()).encode('utf-8')\n req_body = json.loads(req_body)\n json_file.close()\n\n req_body = json.loads(req_body)\n if csv_flag:\n if input_list and req_body.get(\"name\") not in input_list:\n continue\n\n if scope != 'default':\n req_body['scope'] = scope\n\n req_body = json.dumps(req_body).encode('utf-8')\n\n url = \"%s%s/%s\" % (api_url, project_id, 'policies')\n http_client = httplib2.Http()\n headers = {\"X-Auth-User\": username, \"X-Auth-Token\": token}\n\n # call the create policy API\n resp, content = http_client.request(url, method=\"POST\", body=req_body, headers=headers)\n content = json.loads(content)\n\n if resp[\"status\"] == \"200\":\n success_policies += 1\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\", content[\"status\"], content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n elif resp[\"status\"] == \"400\" and update_flag:\n policy_id = None\n url = \"%s%s/%s\" % (api_url, project_id, 'policies')\n list_resp, list_content = http_client.request(url, method=\"GET\", headers=headers)\n list_content = json.loads(list_content)\n if list_resp[\"status\"] == \"200\":\n policy_list = list_content['data']['policies']\n for policy in policy_list:\n if policy['name'] == json.loads(req_body)['name']:\n policy_id = policy[\"id\"]\n url = \"%s%s/%s/%s\" % (api_url, project_id, 'policies', policy_id)\n # call the update policy API\n update_resp, update_content = http_client.request(url, method=\"PUT\", body=req_body,\n headers=headers)\n update_content = json.loads(update_content)\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\", update_content[\"status\"],\n update_content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n if update_resp[\"status\"] == \"200\":\n success_policies += 1\n break\n if not policy_id:\n policy_url = \"%s%s/%s?is_temp=true\" % (api_url, project_id, 'policies')\n list_resp, list_content = http_client.request(policy_url, method=\"GET\", headers=headers)\n list_content = json.loads(list_content)\n if list_resp[\"status\"] == \"200\":\n temp_policy_list = list_content['data']['policies']\n for policy in temp_policy_list:\n if policy['name'] == json.loads(req_body)['name']:\n # call the Update policy API\n policy_id = policy[\"id\"]\n url = \"%s%s/%s/%s\" % (api_url, project_id, 'policies', policy_id)\n update_resp, update_content = \\\n http_client.request(url, method=\"PUT\", body=req_body, headers=headers)\n update_content = json.loads(update_content)\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\", update_content[\"status\"],\n update_content[\"message\"])\n sys.stdout.write(\"%s\\n\" % log_msg)\n if update_resp[\"status\"] == \"200\":\n success_policies += 1\n break\n if not policy_id:\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\", content[\"status\"], content[\"message\"])\n sys.stderr.write(\"%s\\n\" % log_msg)\n my_file.write(\"%s\\n\" % log_msg)\n else:\n log_msg = \"%s%s%s - %s\" % (file_name[:-5], \" ==> status:\", content[\"status\"], content[\"message\"])\n sys.stderr.write(\"%s\\n\" % log_msg)\n my_file.write(\"%s\\n\" % log_msg)\n\n if not csv_flag:\n total_policies = len(json_files)\n failed_policies = total_policies - success_policies\n else:\n total_policies = len(input_list)\n failed_policies = total_policies - success_policies\n\n sys.stdout.write('Total Policies: ' + str(total_policies) + \"\\n\")\n sys.stdout.write(\"Success Policies: \" + str(success_policies) + \"\\n\")\n sys.stdout.write(\"Failed Policies: \" + str(failed_policies) + \"\\n\")\n\n my_file.write('Total Policies: ' + str(total_policies) + \"\\n\")\n my_file.write(\"Failed Policies: \" + str(failed_policies) + \"\\n\")\n my_file.close()\n\n except Exception as e:\n sys.stdout.write(e.message)\n exit(1)",
"def enforce_export_policy(\n self,\n policy_config: dict,\n filename: str,\n tenant: str,\n size: int,\n mime_type: str,\n ) -> bool:\n status = False # until proven otherwise\n try:\n file = os.path.basename(filename)\n check_filename(file, disallowed_start_chars=options.start_chars)\n except Exception as e:\n logging.error(f\"Illegal export filename: {file}\")\n return status\n try:\n any_path_islink(filename, opts=options)\n except Exception as e:\n logging.error(\n f\"Symlink in part of path '{filename}' requested by {self.requestor}: {str(e)}\"\n )\n return status\n if tenant in policy_config.keys():\n policy = policy_config[tenant]\n else:\n policy = policy_config[\"default\"]\n if not policy[\"enabled\"]:\n status = True\n return status\n if \"*\" in policy[\"allowed_mime_types\"]:\n status = True\n else:\n status = True if mime_type in policy[\"allowed_mime_types\"] else False\n if not status:\n logging.error(f\"not allowed to export file with MIME type: {mime_type}\")\n if policy[\"max_size\"] and size > policy[\"max_size\"]:\n logging.error(\n f\"{self.requestor} tried to export a file exceeding the maximum size limit\"\n )\n status = False\n return status",
"def cleanup_policy_create(ctx: click.Context, **kwargs):\n # TODO: use a click type for this check?\n criteria_keys = {'downloaded', 'updated', 'regex'}\n util.move_to_key(kwargs, 'criteria', criteria_keys)\n\n util.rename_keys(kwargs['criteria'], {\n 'downloaded': 'lastDownloaded',\n 'updated': 'lastBlobUpdated',\n })\n\n subcommand_cleanup_policy.cmd_create(ctx.obj, **kwargs)",
"def create_policy(self, fn_inputs):\n\n # determine if the policy is already in place\n response, err_msg = self._get_policy_by_sha256(fn_inputs.get('reaqta_sha256'))\n if err_msg:\n return {}, err_msg\n\n policy_info = response.json()\n if policy_info.get('result'):\n return {}, 'A policy already exists for this file hash: {0}. <a href=\"{1}\" target=\"blank\">{1}</a>'.format(\n fn_inputs.get('reaqta_sha256'),\n self.make_linkback_url(policy_info['result'][0]['id'], POLICY_DETAILS))\n\n params = {\n \"sha256\": fn_inputs.get('reaqta_sha256'),\n \"title\": fn_inputs.get('reaqta_policy_title', ''),\n \"description\": fn_inputs.get('reaqta_policy_description', ''),\n \"disable\": not fn_inputs.get('reaqta_policy_enabled', True),\n \"block\": fn_inputs.get('reaqta_policy_block', False),\n \"enabledGroups\": [],\n \"disabledGroups\": []\n }\n\n # collect all the group names and find the groupIds\n if fn_inputs.get('reaqta_policy_included_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_included_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['enabledGroups'] = group_id_list\n\n if fn_inputs.get('reaqta_policy_excluded_groups'):\n group_name_list = [ group.strip() for group in fn_inputs.get('reaqta_policy_excluded_groups', \"\").split(',') ]\n group_id_list = self.get_group_ids(group_name_list)\n if group_id_list:\n params['disabledGroups'] = group_id_list\n\n LOG.debug(\"create_policy: %s\", params)\n url = urljoin(POLICY_URI, \"trigger-on-process-hash\")\n return self.api_call(\"POST\", url, params)",
"def __init__(__self__, *,\n rules: Optional[pulumi.Input[Sequence[pulumi.Input['ExportPolicyRuleArgs']]]] = None):\n if rules is not None:\n pulumi.set(__self__, \"rules\", rules)",
"def _save_policy(policy, file_name, output_dir):\n assert os.path.isdir(output_dir)\n with open(os.path.join(output_dir, file_name + '.pickle'), 'wb') as file:\n pickle.dump(policy, file)",
"def create_export_object(xform, export_type, options):\n export_options = get_export_options(options)\n return Export(\n xform=xform,\n export_type=export_type,\n options=export_options,\n created_on=timezone.now(),\n )",
"def __create_policy_def(self):\n\n self.logger.info(f\"Creating policy definition {self.policy_id}\")\n policy_definition_res = self.interactor.put_policy_definition(\n self.policy_id, self.policy_json\n )\n\n # definition was not created, report and abort\n if policy_definition_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy definition {self.policy_id} could not be created - {policy_definition_res.status_code}: {policy_definition_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True",
"def build(self):\n if ((self.allowMethods is None or len(self.allowMethods) == 0) and\n (self.denyMethods is None or len(self.denyMethods) == 0)):\n raise NameError(\"No statements defined for the policy\")\n\n policy = {\n 'principalId': self.principalId,\n 'policyDocument': {\n 'Version': self.version,\n 'Statement': []\n }\n }\n\n policy['policyDocument']['Statement'].extend(\n self._getStatementForEffect(\"Allow\", self.allowMethods))\n policy['policyDocument']['Statement'].extend(\n self._getStatementForEffect(\"Deny\", self.denyMethods))\n\n return policy",
"def create_policy(policystore_url, create_policy_request, verbose):\n\n if verbose:\n logging.info('Creating policy')\n pprint.pprint(create_policy_request)\n\n create_url = policystore_url + POLICYSTORE_PREFIX + 'CreateEntitlementPolicy'\n\n r = requests.post(\n create_url, headers=headers(), json=create_policy_request)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n\n sys.exit('Failed to create policy')\n\n resp = r.json()\n\n logging.info(\n f'SUCCESS: Created policy - ID: {resp[\"policy_id\"]}, Token: {resp[\"token\"]}'\n )\n\n return resp",
"def export_policy_checkpoint(\n self,\n export_dir: str,\n policy_id: PolicyID = DEFAULT_POLICY_ID,\n ) -> None:\n policy = self.get_policy(policy_id)\n if policy is None:\n raise KeyError(f\"Policy with ID {policy_id} not found in Algorithm!\")\n policy.export_checkpoint(export_dir)",
"def policy(agent):",
"def test_create_policy_for_all_namespaces(self):\n pass",
"def _schedule_export_config(handle, descr, sched_name,\n max_bkup_files, remote_enabled,\n protocol, hostname, file_path,\n username, password,\n parent_mo_or_dn):\n from ..mometa.mgmt.MgmtCfgExportPolicy import MgmtCfgExportPolicy, \\\n MgmtCfgExportPolicyConsts\n\n if remote_enabled:\n if not file_path:\n raise UcscValidationException(\"Missing file_path argument\")\n\n _validate_remote_host_args(protocol, hostname, username, password)\n proto = protocol\n host = hostname\n remote_file = file_path\n user = username\n pwd = password\n else:\n proto = MgmtCfgExportPolicyConsts.PROTO_NFS_COPY\n host = \"\"\n remote_file = \" \"\n user = \"\"\n pwd = \"\"\n\n cfg_export_pol = MgmtCfgExportPolicy(\n parent_mo_or_dn=parent_mo_or_dn,\n descr=descr,\n admin_state=MgmtCfgExportPolicyConsts.ADMIN_STATE_ENABLE,\n sched_name=sched_name,\n max_files=str(max_bkup_files),\n proto=proto,\n host=host,\n remote_file=remote_file,\n user=user,\n pwd=pwd,\n name=\"default\")\n\n handle.add_mo(cfg_export_pol, modify_present=True)\n handle.commit()\n\n return cfg_export_pol",
"def archiving_policy(self):\n raise RuntimeError(\"archiving_policy is not back compat\")",
"def test_create_cluster_policy(self):\n pass",
"def policy_create(request, **kwargs):\n body = {'policy': kwargs}\n policy = neutronclient(request).create_qos_policy(body=body).get('policy')\n return QoSPolicy(policy)",
"def __assign_policy_def(self):\n\n self.logger.info(\n f\"Creating policy assignment of definition {self.policy_id} to assignment {self.assignment_id}\"\n )\n policy_assignment_res = self.interactor.put_policy_assignment(\n self.policy_id, self.assignment_id\n )\n\n if policy_assignment_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy assignment {self.assignment_id} could not be created - {policy_assignment_res.status_code}: {policy_assignment_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True",
"def test_create_hyperflex_ext_fc_storage_policy(self):\n pass"
]
| [
"0.7591464",
"0.6972289",
"0.67560166",
"0.6715468",
"0.666488",
"0.66225404",
"0.6512321",
"0.59530276",
"0.59368205",
"0.58883494",
"0.5840365",
"0.5751171",
"0.57272696",
"0.5679028",
"0.56665695",
"0.56465685",
"0.56410825",
"0.56036294",
"0.56035244",
"0.5578853",
"0.5486981",
"0.5460568",
"0.5414942",
"0.5380686",
"0.5324405",
"0.5320846",
"0.5315223",
"0.53092074",
"0.52974933",
"0.52540076"
]
| 0.7640657 | 0 |
Test adding the endpoint | def test_basic_add_endpoint(self):
args = self.get_args()
config = self.create_config_file()
self.write_config_file(config, args)
collector = execute_tool(args, test_mode=True)
time.sleep(2)
config['config'].append(self.create_export_policy())
self.write_config_file(config, args)
collector.reload_config()
time.sleep(2)
mac = '00:11:22:33:33:33'
ip = '3.4.3.4'
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',
'intersite-testsuite-app-epg')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n time.sleep(2)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_add_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg2'))",
"def test_basic_add_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(4)\n\n self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))",
"def testEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # With no UID given\n status, _ = self._http_get(\"/endpoint\")\n\n # Check result\n self.assertEqual(status, 404)\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Request the details of the endpoint\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 200)\n\n # Check the content\n data = json.loads(response)\n for key, attr in (('uid', 'uid'), ('sender', 'framework'),\n ('name', 'name')):\n self.assertEqual(data[key], getattr(endpoint, attr))\n\n # Unregister it\n svc_reg.unregister()\n\n # Request the list of endpoints\n status, _ = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n\n # Check result\n self.assertEqual(status, 404)",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))\n self.assertTrue(self.verify_remote_site_has_entry_with_provided_contract(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg', 'contract-1'))",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app1', 'epg1')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app2', 'epg2')\n mac3 = '00:11:22:33:33:36'\n ip3 = '3.4.3.7'\n self.add_endpoint(mac3, ip3, 'intersite-testsuite', 'app2', 'epg2')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))\n self.assertTrue(self.verify_remote_site_has_entry(mac3, ip3, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app2-epg2'))",
"def testPostEndpoints(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register an importer\n importer = ImportListener()\n context.register_service(pelix.remote.SERVICE_IMPORT_ENDPOINT_LISTENER,\n importer,\n {pelix.remote.PROP_REMOTE_CONFIGS_SUPPORTED:\n exporter.configs[0]})\n\n # Register a service\n context.register_service(\"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Get its representation\n status, response = self._http_get(\"/endpoint/{0}\".format(endpoint.uid))\n self.assertEqual(status, 200)\n\n # Change its UID and framework UID\n endpoint_data = json.loads(response)\n endpoint_data['uid'] = 'other-uid'\n endpoint_data['name'] = 'other-name'\n endpoint_data['sender'] = 'other-framework'\n\n # Send the 'discovered' event\n status, response = self._http_post(\"endpoints\",\n json.dumps([endpoint_data]))\n self.assertEqual(status, 200)\n self.assertEqual(response, 'OK')\n\n # Ensure that the service has been registered\n imported_endpoint = importer.endpoints[endpoint_data['uid']]\n self.assertEqual(imported_endpoint.uid, endpoint_data['uid'])\n self.assertEqual(imported_endpoint.framework, endpoint_data['sender'])\n self.assertEqual(imported_endpoint.name, endpoint_data['name'])",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file_before()\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry_before(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_before(mac2, ip2))\n\n config = self.create_config_file_after()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry_after(mac1, ip1))\n self.assertTrue(self.verify_remote_site_has_entry_after(mac2, ip2))",
"def add_endpoint_hit(db_session, endpoint, time, test, version, job_id):\n endpoint_id = db_session.query(Endpoint.id).filter(Endpoint.name == endpoint).first().id\n test_id = db_session.query(Test.id).filter(Test.name == test).first().id\n db_session.add(TestEndpoint(endpoint_id=endpoint_id, test_id=test_id, duration=time, app_version=version,\n travis_job_id=job_id))",
"def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )",
"def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_basic_add_multiple_endpoint(self):\n args = self.get_args()\n config = self.create_config_file('l3out1')\n self.write_config_file(config, args)\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac1 = '00:11:22:33:33:34'\n ip1 = '3.4.3.5'\n self.add_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out1', 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file('l3out2')\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out2', 'intersite-testsuite-app-epg'))",
"def test_endpoint_leading_slash(self, method):\n self._register_uri(method,\n uri=\"http://test.example.com/%s\" % self.test_endpoint)\n\n self.client = trovebox.Trovebox(host=\"http://test.example.com\",\n **self.test_oauth)\n response = GetOrPost(self.client, method).call(\"/\" + self.test_endpoint)\n self.assertIn(\"OAuth\", self._last_request().headers[\"authorization\"])\n self.assertEqual(response, self.test_data)\n self.assertEqual(self.client.last_url,\n \"http://test.example.com/%s\" % self.test_endpoint)\n self.assertEqual(self.client.last_response.json(), self.test_data)",
"def testGrabEndpoint(self):\n # Register an exporter\n context = self.framework.get_bundle_context()\n exporter = Exporter(context)\n context.register_service(pelix.remote.SERVICE_EXPORT_PROVIDER,\n exporter, {})\n\n # Register a service\n svc_reg = context.register_service(\n \"sample.spec\", object(),\n {pelix.remote.PROP_EXPORTED_INTERFACES: \"*\"})\n\n # Get the endpoint bean\n endpoint = exporter.endpoints[-1]\n\n # Tell the servlet to get this endpoint\n grabbed_endpoint = self.servlet.grab_endpoint(\"localhost\", self.port,\n self.servlet_path,\n endpoint.uid)\n\n # Check endpoint values\n self.assertIsNot(grabbed_endpoint, endpoint)\n self.assertEqual(grabbed_endpoint, endpoint)\n\n # Unregister the service\n svc_reg.unregister()\n\n # Check the result\n self.assertIsNone(self.servlet.grab_endpoint(\"localhost\", self.port,\n self.servlet_path,\n endpoint.uid))\n\n # Test on an invalid host/port\n self.assertIsNone(self.servlet.grab_endpoint(\"localhost\", -1,\n self.servlet_path,\n endpoint.uid))",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def test_add(self):\n query_string = [('x', 56),\n ('y', 56)]\n response = self.client.open('/addition-api/1.0.0/add',\n method='GET',\n query_string=query_string)\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def _endpointTest(self, service):\n options = Options()\n options.parseOptions([\"--\" + service, \"tcp:1234\"])\n self.assertEqual(len(options[service]), 1)\n self.assertIsInstance(options[service][0], endpoints.TCP4ServerEndpoint)",
"def test_api(self):\n new_route = self.route.api(\"new\")\n assert new_route != self.route\n assert new_route.route[\"api\"] == \"new\"",
"def setup_with_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg1'))\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n return mac, ip",
"def test_add_url(self):\n url = 'http://test.com/'\n info = self.api.add_url(url, tags=['asd'])\n self.assertEqual(info['value'], url)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def test_missing_endpoint(self, req):\n req.side_effect = ks_exc.EndpointNotFound()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls still\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)",
"def _access_endpoint(self, endpoint, args, status_code, msg):\r\n url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n if endpoint in ['send_email']:\r\n response = self.client.post(url, args)\r\n else:\r\n response = self.client.get(url, args)\r\n self.assertEqual(\r\n response.status_code,\r\n status_code,\r\n msg=msg\r\n )",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def test_basic_remove_one_of_multiple_endpoint(self):\n mac1, ip1 = self.setup_with_endpoint()\n mac2 = '00:11:22:33:33:35'\n ip2 = '3.4.3.6'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite', 'app', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n\n self.remove_endpoint(mac1, ip1, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac1, ip1, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_url_endpoint(self):\n url = url_for('create_user')\n assert url == '/users/create/'",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def test_add_virtual_service(self):\n pass",
"def test_config_endpoint(self):\n endpoint = settings.CONFIG_ENDPOINT\n access_token = config.ACCESS_TOKEN\n self.assertValidGetOicJsonEndpoint(endpoint, access_token)"
]
| [
"0.7961768",
"0.7546528",
"0.7531428",
"0.7487113",
"0.74692094",
"0.72555715",
"0.7200734",
"0.703249",
"0.7005394",
"0.69417787",
"0.6913453",
"0.6858982",
"0.6754575",
"0.6744913",
"0.6741133",
"0.6704777",
"0.6673568",
"0.6602921",
"0.6590168",
"0.6546669",
"0.6535335",
"0.6521049",
"0.650399",
"0.6486065",
"0.64621663",
"0.64576966",
"0.64575726",
"0.6440636",
"0.64266473",
"0.6423758"
]
| 0.80598587 | 0 |
Create a configuration with different EPGs | def create_diff_epg_config_file(self):
config = self.create_site_config()
export_policy = {
"export": {
"tenant": "intersite-testsuite",
"app": "app",
"epg": "epg",
"remote_epg": "intersite-testsuite-app-epg2",
"remote_sites": [
{
"site": {
"name": "Site2",
"interfaces": [
{
"l3out": {
"name": "l3out",
"tenant": "intersite-testsuite"
}
}
]
}
}
]
}
}
config['config'].append(export_policy)
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_configuration(EngineType=None, EngineVersion=None, Name=None, Tags=None):\n pass",
"def create_config(self) -> None:\n pass",
"def create_config(self) -> None:\n pass",
"def _gen_config():\n cfg = {\"frontends\": {}, \"backends\": {}}\n for machine in Machine.objects(\n monitoring__hasmonitoring=True,\n ):\n frontend, backend = _gen_machine_config(machine)\n cfg[\"frontends\"][machine.id] = frontend\n cfg[\"backends\"][machine.id] = backend\n return cfg",
"def _generate_config(self, type, org, node):\n args = {}\n if type == \"peer\":\n args.update({\"peer_id\": \"{}.{}\".format(node, org)})\n args.update({\"peer_address\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_gossip_externalEndpoint\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_chaincodeAddress\": \"{}.{}:{}\".format(node, org, 7052)})\n args.update({\"peer_tls_enabled\": True})\n args.update({\"peer_localMspId\": \"{}MSP\".format(org.capitalize())})\n\n a = NodeConfig(org)\n a.peer(node, **args)\n else:\n args.update({\"General_ListenPort\": 7050})\n args.update(\n {\"General_LocalMSPID\": \"{}OrdererMSP\".format(org.capitalize())})\n args.update({\"General_TLS_Enabled\": True})\n args.update({\"General_BootstrapFile\": \"genesis.block\"})\n\n a = NodeConfig(org)\n a.orderer(node, **args)",
"def create_config(self, context, mgmtport):\n pass",
"def get_config():\n name = 'dynamic_pricing'\n num_products = 5\n scale = 1\n noise_var = 10\n p_max = 1\n\n agents = collections.OrderedDict(\n [('bsPricing',\n functools.partial(BootstrapDynamicPricing,\n num_products, scale, noise_var, p_max))]\n )\n\n environments = collections.OrderedDict(\n [('env',\n functools.partial(DynamicPricing,\n num_products, scale, noise_var, p_max))]\n )\n experiments = collections.OrderedDict(\n [(name, ExperimentNoAction)]\n )\n n_steps = 80\n n_seeds = 2000\n config = Config(name, agents, environments, experiments, n_steps, n_seeds)\n return config",
"def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }",
"def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)",
"def _get_MindtPy_ECP_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n\n _add_common_configs(CONFIG)\n _add_ecp_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG",
"def create_endpoint_config(EndpointConfigName=None, ProductionVariants=None, Tags=None, KmsKeyId=None):\n pass",
"def config():\n if app.args.ui_mode == \"jinja\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": None,\n \"show\": False,\n \"text\": None,\n \"url\": None\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"jinja2\"\n },\n \"title\": \"RENDER\",\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"Render\",\n \"url\": \"/render\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": 'text'\n },\n \"title\": \"RESULT\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n elif app.args.ui_mode == \"schema\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"schema\",\n \"url\": \"/schema\"\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"SCHEMA\",\n \"b1\": {\n \"icon\": \"check\",\n \"show\": True,\n \"text\": \"Validate\",\n \"url\": \"/validate\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"VALIDATION SUCCESS/ERRORS\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n return jsonify(ui_config)",
"def __init__(self):\n cfg = ConfigParser.ConfigParser()\n\n if sys.executable == sys.argv[0]: # Windows binary\n self.VISIONEGG_SYSTEM_DIR = os.curdir\n self.VISIONEGG_USER_DIR = os.curdir\n else:\n # non-standard VisionEgg installations\n try:\n self.VISIONEGG_SYSTEM_DIR = os.environ['VISIONEGG_SYSTEM_DIR']\n except KeyError:\n self.VISIONEGG_SYSTEM_DIR = os.path.split(__file__)[0]\n user_dir = os.path.expanduser(\"~\")\n self.VISIONEGG_USER_DIR = os.path.join(user_dir,\"VisionEgg\")\n\n # See if there's an environment variable for the config file\n if 'VISIONEGG_CONFIG_FILE' in os.environ.keys():\n configFile = os.environ['VISIONEGG_CONFIG_FILE']\n else:\n # Is there one in VISIONEGG_USER_DIR?\n configFile = os.path.join(self.VISIONEGG_USER_DIR,\"VisionEgg.cfg\")\n if not os.path.isfile(configFile):\n configFile = os.path.join(self.VISIONEGG_SYSTEM_DIR,\"VisionEgg.cfg\")\n if not os.path.isfile(configFile):\n configFile = None # No file, use defaults specified in environment variables then here\n\n if configFile:\n cfg.read(configFile)\n else:\n # pretend we have a config file\n cfg.add_section('General')\n for key in defaults.keys():\n cfg.set('General',key,str(defaults[key]))\n if sys.platform == 'darwin':\n cfg.add_section('darwin')\n for key in extra_darwin_defaults.keys():\n cfg.set('darwin',key,str(extra_darwin_defaults[key]))\n\n # Do the general stuff first\n # Set the default values\n for name in defaults.keys():\n if name in os.environ.keys():\n value = os.environ[name]\n else:\n value = defaults[name]\n if isinstance(defaults[name], int):\n\t\tif value == 'False':\n\t\t value = 0\n\t\telif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n # Get the values from the configFile\n general_options = cfg.options('General')\n\n self._delayed_configuration_log_warnings = [] # chick and egg problem\n # set defaults from config file\n for option in general_options:\n name = option.upper()\n if name not in defaults.keys():\n self._delayed_configuration_log_warnings.append(\n \"While reading %s: The variable \\\"%s\\\" is not (anymore) a Vision Egg variable.\"%(os.path.abspath(configFile),option))\n continue\n value = cfg.get('General',option)\n if name in os.environ.keys():\n value = os.environ[name]\n if isinstance(defaults[name], int):\n\t\tif value == 'False':\n\t\t value = 0\n\t\telif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n # Do platform specific stuff\n # Set the default values\n platform_name = sys.platform\n extra_name = \"extra_%s_defaults\"%(platform_name,)\n if extra_name in globals().keys():\n extra_defaults = globals()[extra_name]\n for name in extra_defaults.keys():\n setattr(self,name,extra_defaults[name])\n\n # Get the values from the configFile\n platform_options = cfg.options(platform_name)\n for option in platform_options:\n name = option.upper()\n if name not in extra_defaults.keys():\n raise KeyError(\"No Vision Egg configuration variable \\\"%s\\\"\"%option)\n value = cfg.get(platform_name,option)\n if name in os.environ.keys():\n value = os.environ[name]\n if isinstance(extra_defaults[name], int):\n\t\t if value == 'False':\n\t\t value = 0\n \t\t elif value == 'True':\n\t\t value = 1\n setattr(self,name,int(value))\n elif isinstance(extra_defaults[name], float):\n setattr(self,name,float(value))\n else:\n setattr(self,name,value)\n\n if(configFile):\n self.VISIONEGG_CONFIG_FILE = os.path.abspath(configFile)\n else:\n self.VISIONEGG_CONFIG_FILE = None",
"def get_epix_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config100aV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config100aV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10KV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()",
"def configure_for_pokered(config=config):\n attrs = {\n \"version\": \"red\",\n\n \"map_dir\": os.path.join(config.path, 'maps/'),\n \"gfx_dir\": os.path.join(config.path, 'gfx/tilesets/'),\n \"to_gfx_name\": red_gfx_name,\n \"block_dir\": os.path.join(config.path, 'gfx/blocksets/'), # not used\n \"block_ext\": '.bst', # not used\n\n \"palettes_on\": False,\n\n \"constants_filename\": os.path.join(config.path, 'constants.asm'),\n\n \"time_of_day\": 1,\n }\n return attrs",
"def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue",
"def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}",
"def configuration():",
"def generateConfig(run,subrun,conditions):\n \n configname = (conditions.numcdir + \"/\" + str(run) + \"/\" + str(subrun)\n + \"/numc_config_\" + str(run) + \"_\" + str(subrun) + \".cfg\")\n \n configContents = \"\"\n \n configContents += \"[software]\\n\"\n if conditions.oldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/setup.sh\\n\"\n elif conditions.newoldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/setup.sh\\n\"\n else:\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280/src/neutgeom/setup.sh\\n\"\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280_wBBBA05/src/neutgeom/setup.sh\\n\"\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/setup.sh\\n\"\n \n configContents += \"[geometry]\\n\"\n\n configContents += \"baseline = \" + conditions.geometry +\"\\n\"\n if conditions.waterair == \"water\":\n configContents += \"p0d_water_fill = 1\\n\"\n else:\n configContents += \"p0d_water_fill = 0\\n\"\n \n configContents += \"\"\"\n \n[configuration]\nmodule_list = neutMC\n\n[filenaming]\n\"\"\"\n configContents += \"comment = \" + conditions.comment + \"\\n\"\n configContents += \"run_number = \" + str(run) +\"\\n\"\n configContents += \"subrun = \" + str(subrun) + \"\\n\"\n\n if conditions.oldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/neut.card\n\"\"\"\n elif conditions.newoldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/neut.card\n\"\"\"\n else:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/neut.card\n\"\"\"\n\n configContents += \"flux_file = \" + conditions.ram_disk + \"/\" + conditions.flux_base + \"\\n\"\n\n#flux_file = flux_file\n#\"\"\"\n\n# configContents += \"flux_file_path = \" + conditions.ram_disk + \"/\" + conditions.flux_base\n\n# configContents += \"\"\" \n#flux_file_start = 1\n#flux_file_stop = 300\n#\"\"\"\n\n configContents += \"maxint_file = \" + conditions.maxint_file_local + \"\\n\"\n\n# default: 5e17 but for basket MC special production higher\n configContents += \"\"\" \npot = 5.0e17\nneutrino_type = beam\n\"\"\"\n if conditions.baskmagn == \"basket\":\n configContents += \"\"\" \nflux_region = basket\nmaster_volume = Basket \nrandom_start = 1\n\"\"\"\n elif conditions.baskmagn == \"magnet\":\n configContents += \"\"\" \nflux_region = magnet\nmaster_volume = Magnet \nrandom_start = 1\n\"\"\"\n else:\n print \"Unknown basket/magnet condition\"\n \n\n configContents += \"random_seed = \" + str(getRandom()) +\"\\n\"\n configContents += \"neut_seed1 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed2 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed3 = \" + str(getRandom())+\"\\n\" \n\n configContents += \"\\n\"\n configContents += \"[nd280mc]\\n\"\n configContents += \"mc_type=Neut_RooTracker \\n\"\n\n #print configContents\n\n try:\n macFile = open(configname,\"w\")\n macFile.write(configContents)\n \n except:\n print \"can't write config file\" \n \n\n return configname",
"def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )",
"def test_start_instance_with_configuration(self):\n global configuration_instance\n databases = []\n databases.append({\"name\": \"firstdbconfig\", \"character_set\": \"latin2\",\n \"collate\": \"latin2_general_ci\"})\n databases.append({\"name\": \"db2\"})\n configuration_instance.databases = databases\n users = []\n users.append({\"name\": \"liteconf\", \"password\": \"liteconfpass\",\n \"databases\": [{\"name\": \"firstdbconfig\"}]})\n configuration_instance.users = users\n configuration_instance.name = \"TEST_\" + str(uuid.uuid4()) + \"_config\"\n flavor_href = instance_info.dbaas_flavor_href\n configuration_instance.dbaas_flavor_href = flavor_href\n configuration_instance.volume = instance_info.volume\n configuration_instance.dbaas_datastore = instance_info.dbaas_datastore\n configuration_instance.dbaas_datastore_version = \\\n instance_info.dbaas_datastore_version\n configuration_instance.nics = instance_info.nics\n\n result = instance_info.dbaas.instances.create(\n configuration_instance.name,\n configuration_instance.dbaas_flavor_href,\n configuration_instance.volume,\n configuration_instance.databases,\n configuration_instance.users,\n nics=configuration_instance.nics,\n availability_zone=\"nova\",\n datastore=configuration_instance.dbaas_datastore,\n datastore_version=configuration_instance.dbaas_datastore_version,\n configuration=configuration_href)\n assert_equal(200, instance_info.dbaas.last_http_code)\n assert_equal(\"BUILD\", result.status)\n configuration_instance.id = result.id",
"def test_configuration(self):\n space = Space()\n space.register(Integer(\"yolo1\", \"uniform\", -3, 6, shape=(2,)))\n space.register(Integer(\"yolo2\", \"uniform\", -3, 6, shape=(2,)))\n space.register(Real(\"yolo3\", \"norm\", 0.9))\n space.register(Categorical(\"yolo4\", (\"asdfa\", 2)))\n\n assert space.configuration == {\n \"yolo1\": \"uniform(-3, 3, shape=(2,), discrete=True)\",\n \"yolo2\": \"uniform(-3, 3, shape=(2,), discrete=True)\",\n \"yolo3\": \"normal(0.9)\",\n \"yolo4\": \"choices(['asdfa', 2])\",\n }",
"def makeconf():\n conf = {}\n available_locations = []\n print('Hi. We will help you make a config file.')\n input('Press ENTER to continue or CTRL+C to quit.... ')\n path = input('Please enter the path you want to save the config file. Default . (4nt direcoty) : ')\n time = input('How long, in hours, you would like the instance to leave? Default 24 (hours) : ')\n flavor = input('What is the Instance Flavor ID you would like to spawn? Default 201 (CPU:1, MEM:1gb, SSD:25gb) : ')\n operating_system = input('What is the Instance operating system ID you would like to install? Default 167 (CentOS 7) : ')\n currency = input('Which currency would you like to pay with? \"bitcoin, ethereum, litecoin or bitcoincash\". Default (bitcoin)')\n all_flavor = api_get('flavor')\n all_os = api_get('os')\n\n path = path if path else './instance-conf.json'\n time = time if time else '24'\n flavor = flavor if flavor else '201'\n operating_system = operating_system if operating_system else '167'\n currency = currency if currency else 'bitcoin'\n\n if not all_flavor.get('status') == 200 or not all_os.get('status') == 200:\n print(Bcolors.FAIL + 'ERROR: Something went wrong requesting facelesscloud server.' + Bcolors.ENDC)\n sys.exit(2)\n\n if not all_flavor['result'].get(flavor):\n print(Bcolors.FAIL + 'ERROR: flavor ID entered does not exist.' + Bcolors.ENDC)\n sys.exit(2)\n if not all_os['result'].get(operating_system):\n print(Bcolors.FAIL + 'ERROR: operating system ID entered does not exist.' + Bcolors.ENDC)\n sys.exit(2)\n\n available_locations = all_flavor['result'][flavor].get('available_locations')\n if not available_locations:\n print(Bcolors.FAIL + 'ERROR: No available location found for specified flavor.' + Bcolors.ENDC)\n sys.exit(2)\n\n i = 1\n location_num = {}\n for location in available_locations:\n location_num.update({i: location})\n i = i + 1\n\n region = input('Please select region to deploy instance ' + str(location_num) + ' : ')\n\n if int(region) not in location_num:\n print(Bcolors.FAIL + 'ERROR: Region ID selected not in displayed choice. Exiting no configuration file created.' + Bcolors.ENDC)\n sys.exit(2)\n\n region_id = get_region_id(location_num.get(int(region)))\n\n sshkey_path = input('Please enter the Public SSH Key path. (Let it blank if None.) : ')\n sshkey = file_to_string(sshkey_path) if sshkey_path else None\n kickstart_path = input('Please enter the kickstart Bash script path. (Let it blank if None.) : ')\n kickstart = file_to_string(kickstart_path) if kickstart_path else None\n\n if sshkey and not validate_ssh_key(sshkey):\n print(Bcolors.FAIL + 'ERROR: SSH-KEY format is bad ! Exiting no configuration file created.' + Bcolors.ENDC)\n sys.exit(2)\n\n conf.update({'hours_time': time, 'flavor': flavor, 'operating_system': operating_system, 'region': region_id, 'ssh_key': sshkey, 'kickstart': kickstart, 'currency': currency})\n try:\n with open(path, 'w') as conf_file:\n json.dump(conf, conf_file)\n conf_file.close()\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: Config File path entered not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: Config File path entered, Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n\n print(Bcolors.OKGREEN + 'SUCCESS, Config file writen to ' + path + Bcolors.ENDC)",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def save_config(self):\n\n h_config = configparser.ConfigParser()\n\n h_config[\"general\"] = {}\n if not self.configuration.interval:\n self.configuration.interval = __interval__\n h_config[\"general\"][\"interval\"] = str(self.configuration.interval)\n if not self.configuration.wifi_clients:\n self.configuration.wifi_clients = __wifi_clients_example__\n h_config[\"general\"][\"wifi_clients\"] = \",\".join(self.configuration.wifi_clients)\n if not self.configuration.schedules_names:\n self.configuration.schedules_names = __schedules_names_example__\n h_config[\"general\"][\"schedules_name\"] = \",\".join(self.configuration.schedules_names)\n\n h_config[\"unifi\"] = {}\n if not self.configuration.unifi_host:\n self.configuration.unifi_host = __unifi_controller_host__\n h_config[\"unifi\"][\"host\"] = self.configuration.unifi_host\n if not self.configuration.unifi_port:\n self.configuration.unifi_port = __unifi_controller_port__\n h_config[\"unifi\"][\"port\"] = str(self.configuration.unifi_port)\n if not self.configuration.unifi_username:\n self.configuration.unifi_username = __unifi_controller_user__\n h_config[\"unifi\"][\"username\"] = self.configuration.unifi_username\n if not self.configuration.unifi_password:\n self.configuration.unifi_password = __unifi_controller_pwd__\n h_config[\"unifi\"][\"password\"] = self.configuration.unifi_password\n\n h_config[\"hue\"] = {}\n if not self.configuration.hue_host:\n self.configuration.hue_host = __hue_hub_host__\n h_config[\"hue\"][\"host\"] = self.configuration.hue_host\n if not self.configuration.hue_port:\n self.configuration.hue_port = __hue_hub_port__\n h_config[\"hue\"][\"port\"] = str(self.configuration.hue_port)\n if not self.configuration.hue_key:\n self.configuration.hue_key = __hue_key__\n h_config[\"hue\"][\"key\"] = self.configuration.hue_key\n\n h_config[\"zmq\"] = {}\n if not self.configuration.pub_host:\n self.configuration.pub_host = __zmq_default_publishing_host__\n h_config[\"zmq\"][\"host\"] = self.configuration.pub_host\n if not self.configuration.pub_port:\n self.configuration.pub_port = __zmq_default_publishing_port__\n h_config[\"zmq\"][\"port\"] = str(self.configuration.pub_port)\n if \"no_pub\" in self.configuration:\n h_config[\"zmq\"][\"disabled\"] = str(int(self.configuration.no_pub))\n\n h_config[\"logging\"] = {}\n if self.configuration.syslog_host:\n h_config[\"logging\"][\"syslog_host\"] = self.configuration.syslog_host\n if self.configuration.syslog_port:\n h_config[\"logging\"][\"syslog_port\"] = str(self.configuration.syslog_port)\n if self.configuration.log_file:\n h_config[\"logging\"][\"log_file\"] = str(self.configuration.log_file)\n\n with self.config_file.open(mode='w') as configfile:\n h_config.write(configfile)\n logging.info(\"Configuration saved to {}\".format(str(self.config_file)))",
"def tpe_configspace(self):\n raise NotImplementedError(\"Overwrite for actual experiment\")",
"def create_config(output_dir='my-hls-test', project_name='myproject', backend='Vivado', version='1.0.0', **kwargs):\n backend_list = hls4ml.backends.get_available_backends()\n if backend.lower() not in backend_list:\n raise Exception(f'Unknown backend: {backend}')\n\n backend = hls4ml.backends.get_backend(backend)\n\n backend_config = backend.create_initial_config(**kwargs)\n\n config = {}\n config['OutputDir'] = output_dir\n config['ProjectName'] = project_name\n config['Backend'] = backend.name\n config['Version'] = version\n config.update(backend_config)\n\n return config",
"def build_configs():",
"def generate_config(self):\n self.log.debug(\"generate-config\")\n self.qemu.args = [\n \"-nodefaults\",\n \"-only-migratable\",\n \"-cpu {cpu_model},enforce\",\n # Watch out: kvm.name is used for sanity checking critical actions.\n \"-name {name},process=kvm.{name}\",\n \"-chroot {{chroot}}\",\n \"-runas nobody\",\n \"-serial file:/var/log/vm/{name}.log\",\n \"-display vnc={{vnc}}\",\n \"-pidfile {{pidfile}}\",\n \"-vga std\",\n # We use this '-m' flag to find what a running VM is actually\n # using at the moment. If this flag is changed then that code must\n # be adapted as well. This is used in incoming.py and qemu.py.\n \"-m {memory}\",\n \"-readconfig {{configfile}}\",\n ]\n self.qemu.args = [a.format(**self.cfg) for a in self.qemu.args]\n\n vhost = ' vhost = \"on\"' if self.vhost else \"\"\n\n netconfig = []\n for net, net_config in sorted(self.cfg[\"interfaces\"].items()):\n ifname = \"t{}{}\".format(net, self.cfg[\"id\"])\n netconfig.append(\n \"\"\"\n[device]\n driver = \"virtio-net-pci\"\n netdev = \"{ifname}\"\n mac = \"{mac}\"\n\n[netdev \"{ifname}\"]\n type = \"tap\"\n ifname = \"{ifname}\"\n script = \"/etc/kvm/kvm-ifup\"\n downscript = \"/etc/kvm/kvm-ifdown\"\n{vhost}\n\"\"\".format(\n ifname=ifname, mac=net_config[\"mac\"], vhost=vhost\n )\n )\n\n with open(self.vm_config_template) as f:\n tpl = f.read()\n accelerator = (\n ' accel = \"{}\"'.format(self.accelerator)\n if self.accelerator\n else \"\"\n )\n machine_type = detect_current_machine_type(self.machine_type)\n self.qemu.config = tpl.format(\n accelerator=accelerator,\n machine_type=machine_type,\n disk_cache_mode=self.qemu.disk_cache_mode,\n network=\"\".join(netconfig),\n **self.cfg,\n )"
]
| [
"0.6670641",
"0.62125176",
"0.62125176",
"0.6175586",
"0.6036967",
"0.6032907",
"0.5891014",
"0.5884348",
"0.58781654",
"0.5875689",
"0.5859736",
"0.5702923",
"0.57000494",
"0.56649536",
"0.559409",
"0.55909014",
"0.55751383",
"0.5568713",
"0.556752",
"0.55577177",
"0.5549555",
"0.5547436",
"0.5546312",
"0.553752",
"0.55349195",
"0.55182135",
"0.5502936",
"0.5491393",
"0.5487558",
"0.54829824"
]
| 0.66669893 | 1 |
Test changing the policy name | def test_basic_change_policy_name(self):
args = self.get_args()
config = self.create_config_file()
mac = '00:11:22:33:33:33'
ip = '3.4.3.4'
self.write_config_file(config, args)
collector = execute_tool(args, test_mode=True)
time.sleep(4)
self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg'))
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg'))
config = self.create_diff_epg_config_file()
self.write_config_file(config, args)
collector.reload_config()
time.sleep(4)
self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg'))
self.assertFalse(self.verify_remote_site_has_policy('intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg'))
self.assertTrue(self.verify_remote_site_has_policy('intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg2'))
self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',
'l3out', 'intersite-testsuite-app-epg2')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_policyname(self, policyname):\n self.options[\"policyname\"] = policyname",
"def set_policyname(self, policyname):\n self.options['policyname'] = policyname",
"def test_replace_namespaced_policy(self):\n pass",
"def policy_name(self, policy_name):\n\n self._policy_name = policy_name",
"def policy_name(self, policy_name):\n\n self._policy_name = policy_name",
"def test_update_ikepolicy(self):\r\n resource = 'ikepolicy'\r\n cmd = ikepolicy.UpdateIKEPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })",
"def test_patch_namespaced_policy(self):\n pass",
"def test_create_namespaced_policy(self):\n pass",
"def test_update_bios_policy(self):\n pass",
"def test_update_firewall_policy(self):\r\n resource = 'firewall_policy'\r\n cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),\r\n None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })",
"def test_update_ipsecpolicy(self):\r\n resource = 'ipsecpolicy'\r\n cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def test_read_namespaced_policy(self):\n pass",
"def test_patch_bios_policy(self):\n pass",
"def test_patch_namespaced_policy_binding(self):\n pass",
"def test_replace_namespaced_policy_binding(self):\n pass",
"def update_policy(self):\n pass",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def policy_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_name\")",
"def update_policy(self, *args, **kwargs):\r\n pass",
"def set_policy(self, name, policy):\n client = self.connect(VAULT_TOKEN)\n client.set_policy(name, policy)",
"def test_replace_cluster_policy(self):\n pass",
"def test_create_bios_policy(self):\n pass",
"def policy_name(self):\n return self._policy_name",
"def policy_name(self) -> Optional[str]:\n return pulumi.get(self, \"policy_name\")"
]
| [
"0.73485625",
"0.7300705",
"0.7236852",
"0.7149512",
"0.7149512",
"0.71392226",
"0.7054928",
"0.6891003",
"0.6654521",
"0.663868",
"0.6620467",
"0.65188605",
"0.65013367",
"0.6472393",
"0.6470784",
"0.64379805",
"0.6436578",
"0.63770264",
"0.63770264",
"0.63770264",
"0.63770264",
"0.63770264",
"0.63770264",
"0.63770264",
"0.63573635",
"0.63547003",
"0.62686604",
"0.6222702",
"0.62013066",
"0.61935824"
]
| 0.7603073 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.