repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
cozy/python_cozy_management | cozy_management/ssl.py | make_links | def make_links(current_cn):
'''
Create symlink for nginx
'''
if not os.path.isfile(CURRENT_CERTIFICATE_PATH):
target = '{}/{}.crt'.format(CERTIFICATES_PATH, current_cn)
print 'Create symlink {} -> {}'.format(CURRENT_CERTIFICATE_PATH,
target)
os.symlink(target, CURRENT_CERTIFICATE_PATH)
if not os.path.isfile(CURRENT_PRIVATE_KEY_PATH):
target = '{}/{}.key'.format(CERTIFICATES_PATH, current_cn)
print 'Create symlink {} -> {}'.format(CURRENT_PRIVATE_KEY_PATH,
target)
os.symlink(target, CURRENT_PRIVATE_KEY_PATH) | python | def make_links(current_cn):
'''
Create symlink for nginx
'''
if not os.path.isfile(CURRENT_CERTIFICATE_PATH):
target = '{}/{}.crt'.format(CERTIFICATES_PATH, current_cn)
print 'Create symlink {} -> {}'.format(CURRENT_CERTIFICATE_PATH,
target)
os.symlink(target, CURRENT_CERTIFICATE_PATH)
if not os.path.isfile(CURRENT_PRIVATE_KEY_PATH):
target = '{}/{}.key'.format(CERTIFICATES_PATH, current_cn)
print 'Create symlink {} -> {}'.format(CURRENT_PRIVATE_KEY_PATH,
target)
os.symlink(target, CURRENT_PRIVATE_KEY_PATH) | [
"def",
"make_links",
"(",
"current_cn",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"CURRENT_CERTIFICATE_PATH",
")",
":",
"target",
"=",
"'{}/{}.crt'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"current_cn",
")",
"print",
"'Create symlink {} -> {}'",
".",
"format",
"(",
"CURRENT_CERTIFICATE_PATH",
",",
"target",
")",
"os",
".",
"symlink",
"(",
"target",
",",
"CURRENT_CERTIFICATE_PATH",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"CURRENT_PRIVATE_KEY_PATH",
")",
":",
"target",
"=",
"'{}/{}.key'",
".",
"format",
"(",
"CERTIFICATES_PATH",
",",
"current_cn",
")",
"print",
"'Create symlink {} -> {}'",
".",
"format",
"(",
"CURRENT_PRIVATE_KEY_PATH",
",",
"target",
")",
"os",
".",
"symlink",
"(",
"target",
",",
"CURRENT_PRIVATE_KEY_PATH",
")"
]
| Create symlink for nginx | [
"Create",
"symlink",
"for",
"nginx"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/ssl.py#L310-L324 | train |
assamite/creamas | creamas/core/simulation.py | Simulation.create | def create(self, agent_cls=None, n_agents=10, agent_kwargs={},
env_cls=Environment, env_kwargs={}, callback=None, conns=0,
log_folder=None):
"""A convenience function to create simple simulations.
Method first creates environment, then instantiates agents into it
with give arguments, and finally creates simulation for the
environment.
:param agent_cls:
class for agents, or list of classes. If list, then **n_agents**
and **agent_kwargs** are expected to be lists also.
:param n_agents:
amount of agents for simulation, or list of amounts
:param agent_kwargs:
keyword arguments passed to agents at creation time, or list of
keyword arguments.
:param env_cls:
environment class for simulation
:type env_cls:
:py:class:`~creamas.core.environment.Environment`
:param dict env_kwargs:
keyword arguments passed to environment at creation time
:param callable callback:
optional callable to call after each simulation step
:param conns:
Create **conns** amount of initial (random) connections for agents
in the simulation environment.
:param str log_folder:
folder for possible logging. This overwrites *log_folder* keyword
argument from **agent_kwargs** and **env_kwargs**.
"""
if not issubclass(env_cls, Environment):
raise TypeError("Environment class must be derived from ({}"
.format(Environment.__class__.__name__))
if callback is not None and not hasattr(callback, '__call__'):
raise TypeError("Callback must be callable.")
if hasattr(agent_cls, '__iter__'):
for e in agent_cls:
if not issubclass(e, CreativeAgent):
raise TypeError("All agent classes must be derived from {}"
.format(CreativeAgent.__class__.__name__))
else:
if not issubclass(agent_cls, CreativeAgent):
raise TypeError("Agent class must be derived from {}"
.format(CreativeAgent.__class__.__name__))
env = env_cls.create(**env_kwargs)
agents = []
if hasattr(agent_cls, '__iter__'):
for i in range(len(n_agents)):
agent_kwargs[i]['environment'] = env
agent_kwargs[i]['log_folder'] = log_folder
agents = agents + [agent_cls[i](**agent_kwargs[i]) for e in
range(n_agents[i])]
else:
agent_kwargs['environment'] = env
agent_kwargs['log_folder'] = log_folder
agents = [agent_cls(**agent_kwargs) for e in range(n_agents)]
if conns > 0:
env.create_random_connections(n=conns)
return Simulation(env, callback, log_folder) | python | def create(self, agent_cls=None, n_agents=10, agent_kwargs={},
env_cls=Environment, env_kwargs={}, callback=None, conns=0,
log_folder=None):
"""A convenience function to create simple simulations.
Method first creates environment, then instantiates agents into it
with give arguments, and finally creates simulation for the
environment.
:param agent_cls:
class for agents, or list of classes. If list, then **n_agents**
and **agent_kwargs** are expected to be lists also.
:param n_agents:
amount of agents for simulation, or list of amounts
:param agent_kwargs:
keyword arguments passed to agents at creation time, or list of
keyword arguments.
:param env_cls:
environment class for simulation
:type env_cls:
:py:class:`~creamas.core.environment.Environment`
:param dict env_kwargs:
keyword arguments passed to environment at creation time
:param callable callback:
optional callable to call after each simulation step
:param conns:
Create **conns** amount of initial (random) connections for agents
in the simulation environment.
:param str log_folder:
folder for possible logging. This overwrites *log_folder* keyword
argument from **agent_kwargs** and **env_kwargs**.
"""
if not issubclass(env_cls, Environment):
raise TypeError("Environment class must be derived from ({}"
.format(Environment.__class__.__name__))
if callback is not None and not hasattr(callback, '__call__'):
raise TypeError("Callback must be callable.")
if hasattr(agent_cls, '__iter__'):
for e in agent_cls:
if not issubclass(e, CreativeAgent):
raise TypeError("All agent classes must be derived from {}"
.format(CreativeAgent.__class__.__name__))
else:
if not issubclass(agent_cls, CreativeAgent):
raise TypeError("Agent class must be derived from {}"
.format(CreativeAgent.__class__.__name__))
env = env_cls.create(**env_kwargs)
agents = []
if hasattr(agent_cls, '__iter__'):
for i in range(len(n_agents)):
agent_kwargs[i]['environment'] = env
agent_kwargs[i]['log_folder'] = log_folder
agents = agents + [agent_cls[i](**agent_kwargs[i]) for e in
range(n_agents[i])]
else:
agent_kwargs['environment'] = env
agent_kwargs['log_folder'] = log_folder
agents = [agent_cls(**agent_kwargs) for e in range(n_agents)]
if conns > 0:
env.create_random_connections(n=conns)
return Simulation(env, callback, log_folder) | [
"def",
"create",
"(",
"self",
",",
"agent_cls",
"=",
"None",
",",
"n_agents",
"=",
"10",
",",
"agent_kwargs",
"=",
"{",
"}",
",",
"env_cls",
"=",
"Environment",
",",
"env_kwargs",
"=",
"{",
"}",
",",
"callback",
"=",
"None",
",",
"conns",
"=",
"0",
",",
"log_folder",
"=",
"None",
")",
":",
"if",
"not",
"issubclass",
"(",
"env_cls",
",",
"Environment",
")",
":",
"raise",
"TypeError",
"(",
"\"Environment class must be derived from ({}\"",
".",
"format",
"(",
"Environment",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"callback",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"callback",
",",
"'__call__'",
")",
":",
"raise",
"TypeError",
"(",
"\"Callback must be callable.\"",
")",
"if",
"hasattr",
"(",
"agent_cls",
",",
"'__iter__'",
")",
":",
"for",
"e",
"in",
"agent_cls",
":",
"if",
"not",
"issubclass",
"(",
"e",
",",
"CreativeAgent",
")",
":",
"raise",
"TypeError",
"(",
"\"All agent classes must be derived from {}\"",
".",
"format",
"(",
"CreativeAgent",
".",
"__class__",
".",
"__name__",
")",
")",
"else",
":",
"if",
"not",
"issubclass",
"(",
"agent_cls",
",",
"CreativeAgent",
")",
":",
"raise",
"TypeError",
"(",
"\"Agent class must be derived from {}\"",
".",
"format",
"(",
"CreativeAgent",
".",
"__class__",
".",
"__name__",
")",
")",
"env",
"=",
"env_cls",
".",
"create",
"(",
"*",
"*",
"env_kwargs",
")",
"agents",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"agent_cls",
",",
"'__iter__'",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"n_agents",
")",
")",
":",
"agent_kwargs",
"[",
"i",
"]",
"[",
"'environment'",
"]",
"=",
"env",
"agent_kwargs",
"[",
"i",
"]",
"[",
"'log_folder'",
"]",
"=",
"log_folder",
"agents",
"=",
"agents",
"+",
"[",
"agent_cls",
"[",
"i",
"]",
"(",
"*",
"*",
"agent_kwargs",
"[",
"i",
"]",
")",
"for",
"e",
"in",
"range",
"(",
"n_agents",
"[",
"i",
"]",
")",
"]",
"else",
":",
"agent_kwargs",
"[",
"'environment'",
"]",
"=",
"env",
"agent_kwargs",
"[",
"'log_folder'",
"]",
"=",
"log_folder",
"agents",
"=",
"[",
"agent_cls",
"(",
"*",
"*",
"agent_kwargs",
")",
"for",
"e",
"in",
"range",
"(",
"n_agents",
")",
"]",
"if",
"conns",
">",
"0",
":",
"env",
".",
"create_random_connections",
"(",
"n",
"=",
"conns",
")",
"return",
"Simulation",
"(",
"env",
",",
"callback",
",",
"log_folder",
")"
]
| A convenience function to create simple simulations.
Method first creates environment, then instantiates agents into it
with give arguments, and finally creates simulation for the
environment.
:param agent_cls:
class for agents, or list of classes. If list, then **n_agents**
and **agent_kwargs** are expected to be lists also.
:param n_agents:
amount of agents for simulation, or list of amounts
:param agent_kwargs:
keyword arguments passed to agents at creation time, or list of
keyword arguments.
:param env_cls:
environment class for simulation
:type env_cls:
:py:class:`~creamas.core.environment.Environment`
:param dict env_kwargs:
keyword arguments passed to environment at creation time
:param callable callback:
optional callable to call after each simulation step
:param conns:
Create **conns** amount of initial (random) connections for agents
in the simulation environment.
:param str log_folder:
folder for possible logging. This overwrites *log_folder* keyword
argument from **agent_kwargs** and **env_kwargs**. | [
"A",
"convenience",
"function",
"to",
"create",
"simple",
"simulations",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L28-L101 | train |
assamite/creamas | creamas/core/simulation.py | Simulation._init_step | def _init_step(self):
"""Initialize next step of simulation to be run.
"""
self._age += 1
self.env.age = self._age
self._log(logging.INFO, "")
self._log(logging.INFO, "\t***** Step {:0>4} *****". format(self.age))
self._log(logging.INFO, "")
self._agents_to_act = self._get_order_agents()
self._step_processing_time = 0.0
self._step_start_time = time.time() | python | def _init_step(self):
"""Initialize next step of simulation to be run.
"""
self._age += 1
self.env.age = self._age
self._log(logging.INFO, "")
self._log(logging.INFO, "\t***** Step {:0>4} *****". format(self.age))
self._log(logging.INFO, "")
self._agents_to_act = self._get_order_agents()
self._step_processing_time = 0.0
self._step_start_time = time.time() | [
"def",
"_init_step",
"(",
"self",
")",
":",
"self",
".",
"_age",
"+=",
"1",
"self",
".",
"env",
".",
"age",
"=",
"self",
".",
"_age",
"self",
".",
"_log",
"(",
"logging",
".",
"INFO",
",",
"\"\"",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"INFO",
",",
"\"\\t***** Step {:0>4} *****\"",
".",
"format",
"(",
"self",
".",
"age",
")",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"INFO",
",",
"\"\"",
")",
"self",
".",
"_agents_to_act",
"=",
"self",
".",
"_get_order_agents",
"(",
")",
"self",
".",
"_step_processing_time",
"=",
"0.0",
"self",
".",
"_step_start_time",
"=",
"time",
".",
"time",
"(",
")"
]
| Initialize next step of simulation to be run. | [
"Initialize",
"next",
"step",
"of",
"simulation",
"to",
"be",
"run",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L186-L196 | train |
assamite/creamas | creamas/core/simulation.py | Simulation._finalize_step | def _finalize_step(self):
"""Finalize simulation step after all agents have acted for the current
step.
"""
t = time.time()
if self._callback is not None:
self._callback(self.age)
t2 = time.time()
self._step_processing_time += t2 - t
self._log(logging.INFO, "Step {} run in: {:.3f}s ({:.3f}s of "
"actual processing time used)"
.format(self.age, self._step_processing_time,
t2 - self._step_start_time))
self._processing_time += self._step_processing_time | python | def _finalize_step(self):
"""Finalize simulation step after all agents have acted for the current
step.
"""
t = time.time()
if self._callback is not None:
self._callback(self.age)
t2 = time.time()
self._step_processing_time += t2 - t
self._log(logging.INFO, "Step {} run in: {:.3f}s ({:.3f}s of "
"actual processing time used)"
.format(self.age, self._step_processing_time,
t2 - self._step_start_time))
self._processing_time += self._step_processing_time | [
"def",
"_finalize_step",
"(",
"self",
")",
":",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"_callback",
"is",
"not",
"None",
":",
"self",
".",
"_callback",
"(",
"self",
".",
"age",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_step_processing_time",
"+=",
"t2",
"-",
"t",
"self",
".",
"_log",
"(",
"logging",
".",
"INFO",
",",
"\"Step {} run in: {:.3f}s ({:.3f}s of \"",
"\"actual processing time used)\"",
".",
"format",
"(",
"self",
".",
"age",
",",
"self",
".",
"_step_processing_time",
",",
"t2",
"-",
"self",
".",
"_step_start_time",
")",
")",
"self",
".",
"_processing_time",
"+=",
"self",
".",
"_step_processing_time"
]
| Finalize simulation step after all agents have acted for the current
step. | [
"Finalize",
"simulation",
"step",
"after",
"all",
"agents",
"have",
"acted",
"for",
"the",
"current",
"step",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L198-L211 | train |
assamite/creamas | creamas/core/simulation.py | Simulation.async_step | def async_step(self):
"""Progress simulation by running all agents once asynchronously.
"""
assert len(self._agents_to_act) == 0
self._init_step()
t = time.time()
aiomas.run(until=self.env.trigger_all())
self._agents_to_act = []
self._step_processing_time = time.time() - t
self._finalize_step() | python | def async_step(self):
"""Progress simulation by running all agents once asynchronously.
"""
assert len(self._agents_to_act) == 0
self._init_step()
t = time.time()
aiomas.run(until=self.env.trigger_all())
self._agents_to_act = []
self._step_processing_time = time.time() - t
self._finalize_step() | [
"def",
"async_step",
"(",
"self",
")",
":",
"assert",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
"==",
"0",
"self",
".",
"_init_step",
"(",
")",
"t",
"=",
"time",
".",
"time",
"(",
")",
"aiomas",
".",
"run",
"(",
"until",
"=",
"self",
".",
"env",
".",
"trigger_all",
"(",
")",
")",
"self",
".",
"_agents_to_act",
"=",
"[",
"]",
"self",
".",
"_step_processing_time",
"=",
"time",
".",
"time",
"(",
")",
"-",
"t",
"self",
".",
"_finalize_step",
"(",
")"
]
| Progress simulation by running all agents once asynchronously. | [
"Progress",
"simulation",
"by",
"running",
"all",
"agents",
"once",
"asynchronously",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L220-L229 | train |
assamite/creamas | creamas/core/simulation.py | Simulation.steps | def steps(self, n):
"""Progress simulation with given amount of steps.
Can not be called when some of the agents have not acted for the
current step.
:param int n: amount of steps to run
"""
assert len(self._agents_to_act) == 0
for _ in range(n):
self.step() | python | def steps(self, n):
"""Progress simulation with given amount of steps.
Can not be called when some of the agents have not acted for the
current step.
:param int n: amount of steps to run
"""
assert len(self._agents_to_act) == 0
for _ in range(n):
self.step() | [
"def",
"steps",
"(",
"self",
",",
"n",
")",
":",
"assert",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
"==",
"0",
"for",
"_",
"in",
"range",
"(",
"n",
")",
":",
"self",
".",
"step",
"(",
")"
]
| Progress simulation with given amount of steps.
Can not be called when some of the agents have not acted for the
current step.
:param int n: amount of steps to run | [
"Progress",
"simulation",
"with",
"given",
"amount",
"of",
"steps",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L231-L241 | train |
assamite/creamas | creamas/core/simulation.py | Simulation.step | def step(self):
"""Progress simulation with a single step.
Can not be called when some of the agents have not acted for the
current step.
"""
assert len(self._agents_to_act) == 0
self.next()
while len(self._agents_to_act) > 0:
self.next() | python | def step(self):
"""Progress simulation with a single step.
Can not be called when some of the agents have not acted for the
current step.
"""
assert len(self._agents_to_act) == 0
self.next()
while len(self._agents_to_act) > 0:
self.next() | [
"def",
"step",
"(",
"self",
")",
":",
"assert",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
"==",
"0",
"self",
".",
"next",
"(",
")",
"while",
"len",
"(",
"self",
".",
"_agents_to_act",
")",
">",
"0",
":",
"self",
".",
"next",
"(",
")"
]
| Progress simulation with a single step.
Can not be called when some of the agents have not acted for the
current step. | [
"Progress",
"simulation",
"with",
"a",
"single",
"step",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L243-L252 | train |
assamite/creamas | creamas/core/simulation.py | Simulation.end | def end(self, folder=None):
"""End the simulation and destroy the current simulation environment.
"""
ret = self.env.destroy(folder=folder)
self._end_time = time.time()
self._log(logging.DEBUG, "Simulation run with {} steps took {:.3f}s to"
" complete, while actual processing time was {:.3f}s."
.format(self.age, self._end_time - self._start_time,
self._processing_time))
return ret | python | def end(self, folder=None):
"""End the simulation and destroy the current simulation environment.
"""
ret = self.env.destroy(folder=folder)
self._end_time = time.time()
self._log(logging.DEBUG, "Simulation run with {} steps took {:.3f}s to"
" complete, while actual processing time was {:.3f}s."
.format(self.age, self._end_time - self._start_time,
self._processing_time))
return ret | [
"def",
"end",
"(",
"self",
",",
"folder",
"=",
"None",
")",
":",
"ret",
"=",
"self",
".",
"env",
".",
"destroy",
"(",
"folder",
"=",
"folder",
")",
"self",
".",
"_end_time",
"=",
"time",
".",
"time",
"(",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"DEBUG",
",",
"\"Simulation run with {} steps took {:.3f}s to\"",
"\" complete, while actual processing time was {:.3f}s.\"",
".",
"format",
"(",
"self",
".",
"age",
",",
"self",
".",
"_end_time",
"-",
"self",
".",
"_start_time",
",",
"self",
".",
"_processing_time",
")",
")",
"return",
"ret"
]
| End the simulation and destroy the current simulation environment. | [
"End",
"the",
"simulation",
"and",
"destroy",
"the",
"current",
"simulation",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/core/simulation.py#L282-L291 | train |
Kortemme-Lab/klab | klab/db/mysql.py | DatabaseInterface.get_unique_record | def get_unique_record(self, sql, parameters = None, quiet = False, locked = False):
'''I use this pattern a lot. Return the single record corresponding to the query.'''
results = self.execute_select(sql, parameters = parameters, quiet = quiet, locked = locked)
assert(len(results) == 1)
return results[0] | python | def get_unique_record(self, sql, parameters = None, quiet = False, locked = False):
'''I use this pattern a lot. Return the single record corresponding to the query.'''
results = self.execute_select(sql, parameters = parameters, quiet = quiet, locked = locked)
assert(len(results) == 1)
return results[0] | [
"def",
"get_unique_record",
"(",
"self",
",",
"sql",
",",
"parameters",
"=",
"None",
",",
"quiet",
"=",
"False",
",",
"locked",
"=",
"False",
")",
":",
"results",
"=",
"self",
".",
"execute_select",
"(",
"sql",
",",
"parameters",
"=",
"parameters",
",",
"quiet",
"=",
"quiet",
",",
"locked",
"=",
"locked",
")",
"assert",
"(",
"len",
"(",
"results",
")",
"==",
"1",
")",
"return",
"results",
"[",
"0",
"]"
]
| I use this pattern a lot. Return the single record corresponding to the query. | [
"I",
"use",
"this",
"pattern",
"a",
"lot",
".",
"Return",
"the",
"single",
"record",
"corresponding",
"to",
"the",
"query",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/mysql.py#L167-L171 | train |
Kortemme-Lab/klab | klab/db/mysql.py | DatabaseInterface.run_transaction | def run_transaction(self, command_list, do_commit=True):
'''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.'''
pass
# I decided against creating this for now.
# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure
# in the DDGadmin project and then use callproc
for c in command_list:
if c.find(";") != -1 or c.find("\\G") != -1:
# Catches *some* injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c)
if do_commit:
sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list)
else:
sql = "START TRANSACTION;\n%s;" % "\n".join(command_list)
#print(sql)
return | python | def run_transaction(self, command_list, do_commit=True):
'''This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled.'''
pass
# I decided against creating this for now.
# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure
# in the DDGadmin project and then use callproc
for c in command_list:
if c.find(";") != -1 or c.find("\\G") != -1:
# Catches *some* injections
raise Exception("The SQL command '%s' contains a semi-colon or \\G. This is a potential SQL injection." % c)
if do_commit:
sql = "START TRANSACTION;\n%s;\nCOMMIT" % "\n".join(command_list)
else:
sql = "START TRANSACTION;\n%s;" % "\n".join(command_list)
#print(sql)
return | [
"def",
"run_transaction",
"(",
"self",
",",
"command_list",
",",
"do_commit",
"=",
"True",
")",
":",
"pass",
"# I decided against creating this for now.",
"# It may be more useful to create a stored procedure like in e.g. _create_protein_deletion_stored_procedure",
"# in the DDGadmin project and then use callproc",
"for",
"c",
"in",
"command_list",
":",
"if",
"c",
".",
"find",
"(",
"\";\"",
")",
"!=",
"-",
"1",
"or",
"c",
".",
"find",
"(",
"\"\\\\G\"",
")",
"!=",
"-",
"1",
":",
"# Catches *some* injections",
"raise",
"Exception",
"(",
"\"The SQL command '%s' contains a semi-colon or \\\\G. This is a potential SQL injection.\"",
"%",
"c",
")",
"if",
"do_commit",
":",
"sql",
"=",
"\"START TRANSACTION;\\n%s;\\nCOMMIT\"",
"%",
"\"\\n\"",
".",
"join",
"(",
"command_list",
")",
"else",
":",
"sql",
"=",
"\"START TRANSACTION;\\n%s;\"",
"%",
"\"\\n\"",
".",
"join",
"(",
"command_list",
")",
"#print(sql)",
"return"
]
| This can be used to stage multiple commands and roll back the transaction if an error occurs. This is useful
if you want to remove multiple records in multiple tables for one entity but do not want the deletion to occur
if the entity is tied to table not specified in the list of commands. Performing this as a transaction avoids
the situation where the records are partially removed. If do_commit is false, the entire transaction is cancelled. | [
"This",
"can",
"be",
"used",
"to",
"stage",
"multiple",
"commands",
"and",
"roll",
"back",
"the",
"transaction",
"if",
"an",
"error",
"occurs",
".",
"This",
"is",
"useful",
"if",
"you",
"want",
"to",
"remove",
"multiple",
"records",
"in",
"multiple",
"tables",
"for",
"one",
"entity",
"but",
"do",
"not",
"want",
"the",
"deletion",
"to",
"occur",
"if",
"the",
"entity",
"is",
"tied",
"to",
"table",
"not",
"specified",
"in",
"the",
"list",
"of",
"commands",
".",
"Performing",
"this",
"as",
"a",
"transaction",
"avoids",
"the",
"situation",
"where",
"the",
"records",
"are",
"partially",
"removed",
".",
"If",
"do_commit",
"is",
"false",
"the",
"entire",
"transaction",
"is",
"cancelled",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/mysql.py#L327-L347 | train |
Kortemme-Lab/klab | klab/db/mysql.py | DatabaseInterface.callproc | def callproc(self, procname, parameters=(), quiet=False, expect_return_value=False):
"""Calls a MySQL stored procedure procname and returns the return values. This uses DictCursor.
To get return values back out of a stored procedure, prefix the parameter with a @ character.
"""
self.procedures_run += 1
i = 0
errcode = 0
caughte = None
out_param_indices = []
for j in range(len(parameters)):
p = parameters[j]
if type(p) == type('') and p[0] == '@':
assert(p.find(' ') == -1)
out_param_indices.append(j)
if procname not in self.list_stored_procedures():
raise Exception("The stored procedure '%s' does not exist." % procname)
if not re.match("^\s*\w+\s*$", procname):
raise Exception("Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
self._get_connection()
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
self.lastrowid = int(cursor.lastrowid)
cursor.close()
# Get the out parameters
out_param_results = []
if out_param_indices:
out_param_results = self.execute('SELECT %s' % ", ".join(['@_%s_%d AS %s' % (procname, pindex, parameters[pindex][1:]) for pindex in out_param_indices]))
return out_param_results
except MySQLdb.OperationalError, e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte) | python | def callproc(self, procname, parameters=(), quiet=False, expect_return_value=False):
"""Calls a MySQL stored procedure procname and returns the return values. This uses DictCursor.
To get return values back out of a stored procedure, prefix the parameter with a @ character.
"""
self.procedures_run += 1
i = 0
errcode = 0
caughte = None
out_param_indices = []
for j in range(len(parameters)):
p = parameters[j]
if type(p) == type('') and p[0] == '@':
assert(p.find(' ') == -1)
out_param_indices.append(j)
if procname not in self.list_stored_procedures():
raise Exception("The stored procedure '%s' does not exist." % procname)
if not re.match("^\s*\w+\s*$", procname):
raise Exception("Expected a stored procedure name in callproc but received '%s'." % procname)
while i < self.numTries:
i += 1
try:
self._get_connection()
cursor = self.connection.cursor()
if type(parameters) != type(()):
parameters = (parameters,)
errcode = cursor.callproc(procname, parameters)
self.lastrowid = int(cursor.lastrowid)
cursor.close()
# Get the out parameters
out_param_results = []
if out_param_indices:
out_param_results = self.execute('SELECT %s' % ", ".join(['@_%s_%d AS %s' % (procname, pindex, parameters[pindex][1:]) for pindex in out_param_indices]))
return out_param_results
except MySQLdb.OperationalError, e:
self._close_connection()
errcode = e[0]
caughte = e
continue
except:
self._close_connection()
traceback.print_exc()
break
if not quiet:
sys.stderr.write("\nSQL execution error call stored procedure %s at %s:" % (
procname, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
sys.stderr.write("\nErrorcode/Error: %d - '%s'.\n" % (errcode, str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte) | [
"def",
"callproc",
"(",
"self",
",",
"procname",
",",
"parameters",
"=",
"(",
")",
",",
"quiet",
"=",
"False",
",",
"expect_return_value",
"=",
"False",
")",
":",
"self",
".",
"procedures_run",
"+=",
"1",
"i",
"=",
"0",
"errcode",
"=",
"0",
"caughte",
"=",
"None",
"out_param_indices",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"parameters",
")",
")",
":",
"p",
"=",
"parameters",
"[",
"j",
"]",
"if",
"type",
"(",
"p",
")",
"==",
"type",
"(",
"''",
")",
"and",
"p",
"[",
"0",
"]",
"==",
"'@'",
":",
"assert",
"(",
"p",
".",
"find",
"(",
"' '",
")",
"==",
"-",
"1",
")",
"out_param_indices",
".",
"append",
"(",
"j",
")",
"if",
"procname",
"not",
"in",
"self",
".",
"list_stored_procedures",
"(",
")",
":",
"raise",
"Exception",
"(",
"\"The stored procedure '%s' does not exist.\"",
"%",
"procname",
")",
"if",
"not",
"re",
".",
"match",
"(",
"\"^\\s*\\w+\\s*$\"",
",",
"procname",
")",
":",
"raise",
"Exception",
"(",
"\"Expected a stored procedure name in callproc but received '%s'.\"",
"%",
"procname",
")",
"while",
"i",
"<",
"self",
".",
"numTries",
":",
"i",
"+=",
"1",
"try",
":",
"self",
".",
"_get_connection",
"(",
")",
"cursor",
"=",
"self",
".",
"connection",
".",
"cursor",
"(",
")",
"if",
"type",
"(",
"parameters",
")",
"!=",
"type",
"(",
"(",
")",
")",
":",
"parameters",
"=",
"(",
"parameters",
",",
")",
"errcode",
"=",
"cursor",
".",
"callproc",
"(",
"procname",
",",
"parameters",
")",
"self",
".",
"lastrowid",
"=",
"int",
"(",
"cursor",
".",
"lastrowid",
")",
"cursor",
".",
"close",
"(",
")",
"# Get the out parameters",
"out_param_results",
"=",
"[",
"]",
"if",
"out_param_indices",
":",
"out_param_results",
"=",
"self",
".",
"execute",
"(",
"'SELECT %s'",
"%",
"\", \"",
".",
"join",
"(",
"[",
"'@_%s_%d AS %s'",
"%",
"(",
"procname",
",",
"pindex",
",",
"parameters",
"[",
"pindex",
"]",
"[",
"1",
":",
"]",
")",
"for",
"pindex",
"in",
"out_param_indices",
"]",
")",
")",
"return",
"out_param_results",
"except",
"MySQLdb",
".",
"OperationalError",
",",
"e",
":",
"self",
".",
"_close_connection",
"(",
")",
"errcode",
"=",
"e",
"[",
"0",
"]",
"caughte",
"=",
"e",
"continue",
"except",
":",
"self",
".",
"_close_connection",
"(",
")",
"traceback",
".",
"print_exc",
"(",
")",
"break",
"if",
"not",
"quiet",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\nSQL execution error call stored procedure %s at %s:\"",
"%",
"(",
"procname",
",",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\nErrorcode/Error: %d - '%s'.\\n\"",
"%",
"(",
"errcode",
",",
"str",
"(",
"caughte",
")",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"raise",
"MySQLdb",
".",
"OperationalError",
"(",
"caughte",
")"
]
| Calls a MySQL stored procedure procname and returns the return values. This uses DictCursor.
To get return values back out of a stored procedure, prefix the parameter with a @ character. | [
"Calls",
"a",
"MySQL",
"stored",
"procedure",
"procname",
"and",
"returns",
"the",
"return",
"values",
".",
"This",
"uses",
"DictCursor",
".",
"To",
"get",
"return",
"values",
"back",
"out",
"of",
"a",
"stored",
"procedure",
"prefix",
"the",
"parameter",
"with",
"a"
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/mysql.py#L462-L514 | train |
Kortemme-Lab/klab | klab/db/mysql.py | DatabaseInterface.t_insert_dict_if_new | def t_insert_dict_if_new(self, tblname, d, PKfields, fields=None):
'''A version of insertDictIfNew for transactions. This does not call commit.'''
SQL, values = self._insert_dict_if_new_inner(tblname, d, PKfields, fields=fields)
if SQL != False:
self.execute_select(SQL, parameters=values, locked=True)
return True, d
return False, values | python | def t_insert_dict_if_new(self, tblname, d, PKfields, fields=None):
'''A version of insertDictIfNew for transactions. This does not call commit.'''
SQL, values = self._insert_dict_if_new_inner(tblname, d, PKfields, fields=fields)
if SQL != False:
self.execute_select(SQL, parameters=values, locked=True)
return True, d
return False, values | [
"def",
"t_insert_dict_if_new",
"(",
"self",
",",
"tblname",
",",
"d",
",",
"PKfields",
",",
"fields",
"=",
"None",
")",
":",
"SQL",
",",
"values",
"=",
"self",
".",
"_insert_dict_if_new_inner",
"(",
"tblname",
",",
"d",
",",
"PKfields",
",",
"fields",
"=",
"fields",
")",
"if",
"SQL",
"!=",
"False",
":",
"self",
".",
"execute_select",
"(",
"SQL",
",",
"parameters",
"=",
"values",
",",
"locked",
"=",
"True",
")",
"return",
"True",
",",
"d",
"return",
"False",
",",
"values"
]
| A version of insertDictIfNew for transactions. This does not call commit. | [
"A",
"version",
"of",
"insertDictIfNew",
"for",
"transactions",
".",
"This",
"does",
"not",
"call",
"commit",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/mysql.py#L538-L544 | train |
Kortemme-Lab/klab | klab/db/mysql.py | DatabaseInterface.create_insert_dict_string | def create_insert_dict_string(self, tblname, d, PKfields=[], fields=None, check_existing = False):
'''The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
if type(PKfields) == type(""):
PKfields = [PKfields]
if fields == None:
fields = sorted(d.keys())
values = None
SQL = None
try:
# Search for existing records
wherestr = []
PKvalues = []
for PKfield in PKfields:
if d[PKfield] == None:
wherestr.append("%s IS NULL" % PKfield)
else:
wherestr.append("%s=%%s" % PKfield)
PKvalues.append(d[PKfield])
PKfields = join(PKfields, ",")
wherestr = join(wherestr, " AND ")
record_exists = None
if check_existing:
record_exists = not(not(self.execute_select("SELECT %s FROM %s" % (PKfields, tblname) + " WHERE %s" % wherestr, parameters=tuple(PKvalues), locked = False)))
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
return SQL, values, record_exists
except Exception, e:
raise Exception("Error occurred during database insertion: '%s'. %s" % (str(e), traceback.format_exc())) | python | def create_insert_dict_string(self, tblname, d, PKfields=[], fields=None, check_existing = False):
'''The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned.'''
if type(PKfields) == type(""):
PKfields = [PKfields]
if fields == None:
fields = sorted(d.keys())
values = None
SQL = None
try:
# Search for existing records
wherestr = []
PKvalues = []
for PKfield in PKfields:
if d[PKfield] == None:
wherestr.append("%s IS NULL" % PKfield)
else:
wherestr.append("%s=%%s" % PKfield)
PKvalues.append(d[PKfield])
PKfields = join(PKfields, ",")
wherestr = join(wherestr, " AND ")
record_exists = None
if check_existing:
record_exists = not(not(self.execute_select("SELECT %s FROM %s" % (PKfields, tblname) + " WHERE %s" % wherestr, parameters=tuple(PKvalues), locked = False)))
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % (
tblname, join(fields, ", "), join(['%s' for x in range(len(fields))], ','))
values = tuple([d[k] for k in fields])
return SQL, values, record_exists
except Exception, e:
raise Exception("Error occurred during database insertion: '%s'. %s" % (str(e), traceback.format_exc())) | [
"def",
"create_insert_dict_string",
"(",
"self",
",",
"tblname",
",",
"d",
",",
"PKfields",
"=",
"[",
"]",
",",
"fields",
"=",
"None",
",",
"check_existing",
"=",
"False",
")",
":",
"if",
"type",
"(",
"PKfields",
")",
"==",
"type",
"(",
"\"\"",
")",
":",
"PKfields",
"=",
"[",
"PKfields",
"]",
"if",
"fields",
"==",
"None",
":",
"fields",
"=",
"sorted",
"(",
"d",
".",
"keys",
"(",
")",
")",
"values",
"=",
"None",
"SQL",
"=",
"None",
"try",
":",
"# Search for existing records",
"wherestr",
"=",
"[",
"]",
"PKvalues",
"=",
"[",
"]",
"for",
"PKfield",
"in",
"PKfields",
":",
"if",
"d",
"[",
"PKfield",
"]",
"==",
"None",
":",
"wherestr",
".",
"append",
"(",
"\"%s IS NULL\"",
"%",
"PKfield",
")",
"else",
":",
"wherestr",
".",
"append",
"(",
"\"%s=%%s\"",
"%",
"PKfield",
")",
"PKvalues",
".",
"append",
"(",
"d",
"[",
"PKfield",
"]",
")",
"PKfields",
"=",
"join",
"(",
"PKfields",
",",
"\",\"",
")",
"wherestr",
"=",
"join",
"(",
"wherestr",
",",
"\" AND \"",
")",
"record_exists",
"=",
"None",
"if",
"check_existing",
":",
"record_exists",
"=",
"not",
"(",
"not",
"(",
"self",
".",
"execute_select",
"(",
"\"SELECT %s FROM %s\"",
"%",
"(",
"PKfields",
",",
"tblname",
")",
"+",
"\" WHERE %s\"",
"%",
"wherestr",
",",
"parameters",
"=",
"tuple",
"(",
"PKvalues",
")",
",",
"locked",
"=",
"False",
")",
")",
")",
"SQL",
"=",
"'INSERT INTO %s (%s) VALUES (%s)'",
"%",
"(",
"tblname",
",",
"join",
"(",
"fields",
",",
"\", \"",
")",
",",
"join",
"(",
"[",
"'%s'",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"fields",
")",
")",
"]",
",",
"','",
")",
")",
"values",
"=",
"tuple",
"(",
"[",
"d",
"[",
"k",
"]",
"for",
"k",
"in",
"fields",
"]",
")",
"return",
"SQL",
",",
"values",
",",
"record_exists",
"except",
"Exception",
",",
"e",
":",
"raise",
"Exception",
"(",
"\"Error occurred during database insertion: '%s'. %s\"",
"%",
"(",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")"
]
| The main function of the insert_dict functions.
This creates and returns the SQL query and parameters used by the other functions but does not insert any data into the database.
Simple function for inserting a dictionary whose keys match the fieldnames of tblname. The function returns two values, the
second of which is a dict containing the primary keys of the record. If a record already exists then no insertion is performed and
(False, the dictionary of existing primary keys) is returned. Otherwise, the record is inserted into the database and (True, d)
is returned. | [
"The",
"main",
"function",
"of",
"the",
"insert_dict",
"functions",
".",
"This",
"creates",
"and",
"returns",
"the",
"SQL",
"query",
"and",
"parameters",
"used",
"by",
"the",
"other",
"functions",
"but",
"does",
"not",
"insert",
"any",
"data",
"into",
"the",
"database",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/db/mysql.py#L649-L687 | train |
ronhanson/python-tbx | tbx/bytes.py | bytes_to_int | def bytes_to_int(byte_array, big_endian=True, signed=False):
"""
Converts a byte array to an integer.
"""
if six.PY3:
order = 'little'
if big_endian:
order = 'big'
return int.from_bytes(byte_array, byteorder=order, signed=signed)
else:
length = len(byte_array)
if length == 1:
code = 'B'
elif length == 2:
code = 'H'
elif length == 4:
code = 'L'
elif length == 8:
code = 'Q'
else:
raise Exception("bytes_to_int : length of byte_array should be 1, 2, 4, or 8")
if big_endian:
code = '>'+code
else:
code = '<'+code
if signed:
code = code.lower()
return struct.unpack(code, byte_array)[0] | python | def bytes_to_int(byte_array, big_endian=True, signed=False):
"""
Converts a byte array to an integer.
"""
if six.PY3:
order = 'little'
if big_endian:
order = 'big'
return int.from_bytes(byte_array, byteorder=order, signed=signed)
else:
length = len(byte_array)
if length == 1:
code = 'B'
elif length == 2:
code = 'H'
elif length == 4:
code = 'L'
elif length == 8:
code = 'Q'
else:
raise Exception("bytes_to_int : length of byte_array should be 1, 2, 4, or 8")
if big_endian:
code = '>'+code
else:
code = '<'+code
if signed:
code = code.lower()
return struct.unpack(code, byte_array)[0] | [
"def",
"bytes_to_int",
"(",
"byte_array",
",",
"big_endian",
"=",
"True",
",",
"signed",
"=",
"False",
")",
":",
"if",
"six",
".",
"PY3",
":",
"order",
"=",
"'little'",
"if",
"big_endian",
":",
"order",
"=",
"'big'",
"return",
"int",
".",
"from_bytes",
"(",
"byte_array",
",",
"byteorder",
"=",
"order",
",",
"signed",
"=",
"signed",
")",
"else",
":",
"length",
"=",
"len",
"(",
"byte_array",
")",
"if",
"length",
"==",
"1",
":",
"code",
"=",
"'B'",
"elif",
"length",
"==",
"2",
":",
"code",
"=",
"'H'",
"elif",
"length",
"==",
"4",
":",
"code",
"=",
"'L'",
"elif",
"length",
"==",
"8",
":",
"code",
"=",
"'Q'",
"else",
":",
"raise",
"Exception",
"(",
"\"bytes_to_int : length of byte_array should be 1, 2, 4, or 8\"",
")",
"if",
"big_endian",
":",
"code",
"=",
"'>'",
"+",
"code",
"else",
":",
"code",
"=",
"'<'",
"+",
"code",
"if",
"signed",
":",
"code",
"=",
"code",
".",
"lower",
"(",
")",
"return",
"struct",
".",
"unpack",
"(",
"code",
",",
"byte_array",
")",
"[",
"0",
"]"
]
| Converts a byte array to an integer. | [
"Converts",
"a",
"byte",
"array",
"to",
"an",
"integer",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/bytes.py#L20-L48 | train |
ronhanson/python-tbx | tbx/bytes.py | ip_to_bytes | def ip_to_bytes(ip_str, big_endian=True):
"""
Converts an IP given as a string to a byte sequence
"""
if big_endian:
code = '>L'
else:
code = '<L'
return bytes(struct.unpack(code, socket.inet_aton(ip_str))[0]) | python | def ip_to_bytes(ip_str, big_endian=True):
"""
Converts an IP given as a string to a byte sequence
"""
if big_endian:
code = '>L'
else:
code = '<L'
return bytes(struct.unpack(code, socket.inet_aton(ip_str))[0]) | [
"def",
"ip_to_bytes",
"(",
"ip_str",
",",
"big_endian",
"=",
"True",
")",
":",
"if",
"big_endian",
":",
"code",
"=",
"'>L'",
"else",
":",
"code",
"=",
"'<L'",
"return",
"bytes",
"(",
"struct",
".",
"unpack",
"(",
"code",
",",
"socket",
".",
"inet_aton",
"(",
"ip_str",
")",
")",
"[",
"0",
"]",
")"
]
| Converts an IP given as a string to a byte sequence | [
"Converts",
"an",
"IP",
"given",
"as",
"a",
"string",
"to",
"a",
"byte",
"sequence"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/bytes.py#L169-L177 | train |
assamite/creamas | creamas/logging.py | ObjectLogger.get_file | def get_file(self, attr_name):
'''Return absolute path to logging file for obj's attribute.'''
return os.path.abspath(os.path.join(self.folder, "{}.log"
.format(attr_name))) | python | def get_file(self, attr_name):
'''Return absolute path to logging file for obj's attribute.'''
return os.path.abspath(os.path.join(self.folder, "{}.log"
.format(attr_name))) | [
"def",
"get_file",
"(",
"self",
",",
"attr_name",
")",
":",
"return",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"folder",
",",
"\"{}.log\"",
".",
"format",
"(",
"attr_name",
")",
")",
")"
]
| Return absolute path to logging file for obj's attribute. | [
"Return",
"absolute",
"path",
"to",
"logging",
"file",
"for",
"obj",
"s",
"attribute",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/logging.py#L115-L118 | train |
assamite/creamas | creamas/logging.py | ObjectLogger.log_attr | def log_attr(self, level, attr_name):
'''Log attribute to file and pass the message to underlying logger.
:param int level: logging level
:param str attr_name: attribute's name to be logged
'''
msg = self.write(attr_name)
self.log(level, msg) | python | def log_attr(self, level, attr_name):
'''Log attribute to file and pass the message to underlying logger.
:param int level: logging level
:param str attr_name: attribute's name to be logged
'''
msg = self.write(attr_name)
self.log(level, msg) | [
"def",
"log_attr",
"(",
"self",
",",
"level",
",",
"attr_name",
")",
":",
"msg",
"=",
"self",
".",
"write",
"(",
"attr_name",
")",
"self",
".",
"log",
"(",
"level",
",",
"msg",
")"
]
| Log attribute to file and pass the message to underlying logger.
:param int level: logging level
:param str attr_name: attribute's name to be logged | [
"Log",
"attribute",
"to",
"file",
"and",
"pass",
"the",
"message",
"to",
"underlying",
"logger",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/logging.py#L120-L127 | train |
assamite/creamas | creamas/logging.py | ObjectLogger.write | def write(self, attr_name, prefix=None):
'''Write attribute's value to a file.
:param str attr_name:
Attribute's name to be logged
:param str prefix:
Optional. Attribute's name that is prefixed to logging message,
defaults to ``None``.
:returns: message written to file
:rtype: str
'''
if self._folder is None:
return
separator = "\t"
attr = getattr(self.obj, attr_name)
if hasattr(attr, '__iter__'):
msg = separator.join([str(e) for e in attr])
else:
msg = str(attr)
if prefix is not None:
msg = "{}\t{}".format(getattr(self.obj, prefix), msg)
path = self.get_file(attr_name)
with open(path, 'a') as f:
f.write("{}\n".format(msg))
return msg | python | def write(self, attr_name, prefix=None):
'''Write attribute's value to a file.
:param str attr_name:
Attribute's name to be logged
:param str prefix:
Optional. Attribute's name that is prefixed to logging message,
defaults to ``None``.
:returns: message written to file
:rtype: str
'''
if self._folder is None:
return
separator = "\t"
attr = getattr(self.obj, attr_name)
if hasattr(attr, '__iter__'):
msg = separator.join([str(e) for e in attr])
else:
msg = str(attr)
if prefix is not None:
msg = "{}\t{}".format(getattr(self.obj, prefix), msg)
path = self.get_file(attr_name)
with open(path, 'a') as f:
f.write("{}\n".format(msg))
return msg | [
"def",
"write",
"(",
"self",
",",
"attr_name",
",",
"prefix",
"=",
"None",
")",
":",
"if",
"self",
".",
"_folder",
"is",
"None",
":",
"return",
"separator",
"=",
"\"\\t\"",
"attr",
"=",
"getattr",
"(",
"self",
".",
"obj",
",",
"attr_name",
")",
"if",
"hasattr",
"(",
"attr",
",",
"'__iter__'",
")",
":",
"msg",
"=",
"separator",
".",
"join",
"(",
"[",
"str",
"(",
"e",
")",
"for",
"e",
"in",
"attr",
"]",
")",
"else",
":",
"msg",
"=",
"str",
"(",
"attr",
")",
"if",
"prefix",
"is",
"not",
"None",
":",
"msg",
"=",
"\"{}\\t{}\"",
".",
"format",
"(",
"getattr",
"(",
"self",
".",
"obj",
",",
"prefix",
")",
",",
"msg",
")",
"path",
"=",
"self",
".",
"get_file",
"(",
"attr_name",
")",
"with",
"open",
"(",
"path",
",",
"'a'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"{}\\n\"",
".",
"format",
"(",
"msg",
")",
")",
"return",
"msg"
]
| Write attribute's value to a file.
:param str attr_name:
Attribute's name to be logged
:param str prefix:
Optional. Attribute's name that is prefixed to logging message,
defaults to ``None``.
:returns: message written to file
:rtype: str | [
"Write",
"attribute",
"s",
"value",
"to",
"a",
"file",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/logging.py#L133-L163 | train |
adaptive-learning/proso-apps | proso/func.py | is_lambda | def is_lambda(fun):
"""
Check whether the given function is a lambda function.
.. testsetup::
from proso.func import is_lambda
.. testcode::
def not_lambda_fun():
return 1
lambda_fun = lambda: 1
print(
is_lambda(not_lambda_fun),
is_lambda(lambda_fun)
)
.. testoutput::
False True
Args:
fun (function)
Returns:
bool: True if the given function is a lambda function, False otherwise
"""
return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__ | python | def is_lambda(fun):
"""
Check whether the given function is a lambda function.
.. testsetup::
from proso.func import is_lambda
.. testcode::
def not_lambda_fun():
return 1
lambda_fun = lambda: 1
print(
is_lambda(not_lambda_fun),
is_lambda(lambda_fun)
)
.. testoutput::
False True
Args:
fun (function)
Returns:
bool: True if the given function is a lambda function, False otherwise
"""
return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__ | [
"def",
"is_lambda",
"(",
"fun",
")",
":",
"return",
"isinstance",
"(",
"fun",
",",
"type",
"(",
"LAMBDA",
")",
")",
"and",
"fun",
".",
"__name__",
"==",
"LAMBDA",
".",
"__name__"
]
| Check whether the given function is a lambda function.
.. testsetup::
from proso.func import is_lambda
.. testcode::
def not_lambda_fun():
return 1
lambda_fun = lambda: 1
print(
is_lambda(not_lambda_fun),
is_lambda(lambda_fun)
)
.. testoutput::
False True
Args:
fun (function)
Returns:
bool: True if the given function is a lambda function, False otherwise | [
"Check",
"whether",
"the",
"given",
"function",
"is",
"a",
"lambda",
"function",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/func.py#L4-L33 | train |
adaptive-learning/proso-apps | proso/func.py | fixed_point | def fixed_point(is_zero, plus, minus, f, x):
"""
Get the least fixed point when it can be computed piecewise.
.. testsetup::
from proso.func import fixed_point
.. doctest::
>>> sorted(fixed_point(
... is_zero=lambda xs: len(xs) == 0,
... plus=lambda xs, ys: xs + ys,
... minus=lambda xs, ys: [x for x in xs if x not in ys],
... f=lambda xs: [x + 1 for x in xs if x < 10],
... x=[0, 5, 8]
... ))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Args:
is_zero: function returning True if the given value is zero
plus: function taking two values and returning their addition
minus: function taking two values and returning ther difference
f: function computing the expected value
x: initial value
Returns:
The least fixed point.
"""
@memo_Y
def _fixed_point(fixed_point_fun):
def __fixed_point(collected, new):
diff = minus(new, collected)
if is_zero(diff):
return collected
return fixed_point_fun(plus(collected, diff), f(diff))
return __fixed_point
return _fixed_point(x, f(x)) | python | def fixed_point(is_zero, plus, minus, f, x):
"""
Get the least fixed point when it can be computed piecewise.
.. testsetup::
from proso.func import fixed_point
.. doctest::
>>> sorted(fixed_point(
... is_zero=lambda xs: len(xs) == 0,
... plus=lambda xs, ys: xs + ys,
... minus=lambda xs, ys: [x for x in xs if x not in ys],
... f=lambda xs: [x + 1 for x in xs if x < 10],
... x=[0, 5, 8]
... ))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Args:
is_zero: function returning True if the given value is zero
plus: function taking two values and returning their addition
minus: function taking two values and returning ther difference
f: function computing the expected value
x: initial value
Returns:
The least fixed point.
"""
@memo_Y
def _fixed_point(fixed_point_fun):
def __fixed_point(collected, new):
diff = minus(new, collected)
if is_zero(diff):
return collected
return fixed_point_fun(plus(collected, diff), f(diff))
return __fixed_point
return _fixed_point(x, f(x)) | [
"def",
"fixed_point",
"(",
"is_zero",
",",
"plus",
",",
"minus",
",",
"f",
",",
"x",
")",
":",
"@",
"memo_Y",
"def",
"_fixed_point",
"(",
"fixed_point_fun",
")",
":",
"def",
"__fixed_point",
"(",
"collected",
",",
"new",
")",
":",
"diff",
"=",
"minus",
"(",
"new",
",",
"collected",
")",
"if",
"is_zero",
"(",
"diff",
")",
":",
"return",
"collected",
"return",
"fixed_point_fun",
"(",
"plus",
"(",
"collected",
",",
"diff",
")",
",",
"f",
"(",
"diff",
")",
")",
"return",
"__fixed_point",
"return",
"_fixed_point",
"(",
"x",
",",
"f",
"(",
"x",
")",
")"
]
| Get the least fixed point when it can be computed piecewise.
.. testsetup::
from proso.func import fixed_point
.. doctest::
>>> sorted(fixed_point(
... is_zero=lambda xs: len(xs) == 0,
... plus=lambda xs, ys: xs + ys,
... minus=lambda xs, ys: [x for x in xs if x not in ys],
... f=lambda xs: [x + 1 for x in xs if x < 10],
... x=[0, 5, 8]
... ))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Args:
is_zero: function returning True if the given value is zero
plus: function taking two values and returning their addition
minus: function taking two values and returning ther difference
f: function computing the expected value
x: initial value
Returns:
The least fixed point. | [
"Get",
"the",
"least",
"fixed",
"point",
"when",
"it",
"can",
"be",
"computed",
"piecewise",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/func.py#L44-L83 | train |
adaptive-learning/proso-apps | proso/func.py | memo_Y | def memo_Y(f):
"""
Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075
"""
sub = {}
def Yf(*args):
hashable_args = tuple([repr(x) for x in args])
if args:
if hashable_args not in sub:
ret = sub[hashable_args] = f(Yf)(*args)
else:
ret = sub[hashable_args]
return ret
return f(Yf)()
return f(Yf) | python | def memo_Y(f):
"""
Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075
"""
sub = {}
def Yf(*args):
hashable_args = tuple([repr(x) for x in args])
if args:
if hashable_args not in sub:
ret = sub[hashable_args] = f(Yf)(*args)
else:
ret = sub[hashable_args]
return ret
return f(Yf)()
return f(Yf) | [
"def",
"memo_Y",
"(",
"f",
")",
":",
"sub",
"=",
"{",
"}",
"def",
"Yf",
"(",
"*",
"args",
")",
":",
"hashable_args",
"=",
"tuple",
"(",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"args",
"]",
")",
"if",
"args",
":",
"if",
"hashable_args",
"not",
"in",
"sub",
":",
"ret",
"=",
"sub",
"[",
"hashable_args",
"]",
"=",
"f",
"(",
"Yf",
")",
"(",
"*",
"args",
")",
"else",
":",
"ret",
"=",
"sub",
"[",
"hashable_args",
"]",
"return",
"ret",
"return",
"f",
"(",
"Yf",
")",
"(",
")",
"return",
"f",
"(",
"Yf",
")"
]
| Memoized Y combinator.
.. testsetup::
from proso.func import memo_Y
.. testcode::
@memo_Y
def fib(f):
def inner_fib(n):
if n > 1:
return f(n - 1) + f(n - 2)
else:
return n
return inner_fib
print(fib(100))
.. testoutput::
354224848179261915075 | [
"Memoized",
"Y",
"combinator",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/func.py#L86-L122 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | install | def install(application, default_content_type, encoding=None):
"""
Install the media type management settings.
:param tornado.web.Application application: the application to
install a :class:`.ContentSettings` object into.
:param str|NoneType default_content_type:
:param str|NoneType encoding:
:returns: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings
"""
try:
settings = application.settings[SETTINGS_KEY]
except KeyError:
settings = application.settings[SETTINGS_KEY] = ContentSettings()
settings.default_content_type = default_content_type
settings.default_encoding = encoding
return settings | python | def install(application, default_content_type, encoding=None):
"""
Install the media type management settings.
:param tornado.web.Application application: the application to
install a :class:`.ContentSettings` object into.
:param str|NoneType default_content_type:
:param str|NoneType encoding:
:returns: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings
"""
try:
settings = application.settings[SETTINGS_KEY]
except KeyError:
settings = application.settings[SETTINGS_KEY] = ContentSettings()
settings.default_content_type = default_content_type
settings.default_encoding = encoding
return settings | [
"def",
"install",
"(",
"application",
",",
"default_content_type",
",",
"encoding",
"=",
"None",
")",
":",
"try",
":",
"settings",
"=",
"application",
".",
"settings",
"[",
"SETTINGS_KEY",
"]",
"except",
"KeyError",
":",
"settings",
"=",
"application",
".",
"settings",
"[",
"SETTINGS_KEY",
"]",
"=",
"ContentSettings",
"(",
")",
"settings",
".",
"default_content_type",
"=",
"default_content_type",
"settings",
".",
"default_encoding",
"=",
"encoding",
"return",
"settings"
]
| Install the media type management settings.
:param tornado.web.Application application: the application to
install a :class:`.ContentSettings` object into.
:param str|NoneType default_content_type:
:param str|NoneType encoding:
:returns: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings | [
"Install",
"the",
"media",
"type",
"management",
"settings",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L127-L146 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | get_settings | def get_settings(application, force_instance=False):
"""
Retrieve the media type settings for a application.
:param tornado.web.Application application:
:keyword bool force_instance: if :data:`True` then create the
instance if it does not exist
:return: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings
"""
try:
return application.settings[SETTINGS_KEY]
except KeyError:
if not force_instance:
return None
return install(application, None) | python | def get_settings(application, force_instance=False):
"""
Retrieve the media type settings for a application.
:param tornado.web.Application application:
:keyword bool force_instance: if :data:`True` then create the
instance if it does not exist
:return: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings
"""
try:
return application.settings[SETTINGS_KEY]
except KeyError:
if not force_instance:
return None
return install(application, None) | [
"def",
"get_settings",
"(",
"application",
",",
"force_instance",
"=",
"False",
")",
":",
"try",
":",
"return",
"application",
".",
"settings",
"[",
"SETTINGS_KEY",
"]",
"except",
"KeyError",
":",
"if",
"not",
"force_instance",
":",
"return",
"None",
"return",
"install",
"(",
"application",
",",
"None",
")"
]
| Retrieve the media type settings for a application.
:param tornado.web.Application application:
:keyword bool force_instance: if :data:`True` then create the
instance if it does not exist
:return: the content settings instance
:rtype: sprockets.mixins.mediatype.content.ContentSettings | [
"Retrieve",
"the",
"media",
"type",
"settings",
"for",
"a",
"application",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L149-L166 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | add_binary_content_type | def add_binary_content_type(application, content_type, pack, unpack):
"""
Add handler for a binary content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param pack: function that packs a dictionary to a byte string.
``pack(dict) -> bytes``
:param unpack: function that takes a byte string and returns a
dictionary. ``unpack(bytes) -> dict``
"""
add_transcoder(application,
handlers.BinaryContentHandler(content_type, pack, unpack)) | python | def add_binary_content_type(application, content_type, pack, unpack):
"""
Add handler for a binary content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param pack: function that packs a dictionary to a byte string.
``pack(dict) -> bytes``
:param unpack: function that takes a byte string and returns a
dictionary. ``unpack(bytes) -> dict``
"""
add_transcoder(application,
handlers.BinaryContentHandler(content_type, pack, unpack)) | [
"def",
"add_binary_content_type",
"(",
"application",
",",
"content_type",
",",
"pack",
",",
"unpack",
")",
":",
"add_transcoder",
"(",
"application",
",",
"handlers",
".",
"BinaryContentHandler",
"(",
"content_type",
",",
"pack",
",",
"unpack",
")",
")"
]
| Add handler for a binary content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param pack: function that packs a dictionary to a byte string.
``pack(dict) -> bytes``
:param unpack: function that takes a byte string and returns a
dictionary. ``unpack(bytes) -> dict`` | [
"Add",
"handler",
"for",
"a",
"binary",
"content",
"type",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L169-L182 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | add_text_content_type | def add_text_content_type(application, content_type, default_encoding,
dumps, loads):
"""
Add handler for a text content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param str default_encoding: encoding to use when one is unspecified
:param dumps: function that dumps a dictionary to a string.
``dumps(dict, encoding:str) -> str``
:param loads: function that loads a dictionary from a string.
``loads(str, encoding:str) -> dict``
Note that the ``charset`` parameter is stripped from `content_type`
if it is present.
"""
parsed = headers.parse_content_type(content_type)
parsed.parameters.pop('charset', None)
normalized = str(parsed)
add_transcoder(application,
handlers.TextContentHandler(normalized, dumps, loads,
default_encoding)) | python | def add_text_content_type(application, content_type, default_encoding,
dumps, loads):
"""
Add handler for a text content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param str default_encoding: encoding to use when one is unspecified
:param dumps: function that dumps a dictionary to a string.
``dumps(dict, encoding:str) -> str``
:param loads: function that loads a dictionary from a string.
``loads(str, encoding:str) -> dict``
Note that the ``charset`` parameter is stripped from `content_type`
if it is present.
"""
parsed = headers.parse_content_type(content_type)
parsed.parameters.pop('charset', None)
normalized = str(parsed)
add_transcoder(application,
handlers.TextContentHandler(normalized, dumps, loads,
default_encoding)) | [
"def",
"add_text_content_type",
"(",
"application",
",",
"content_type",
",",
"default_encoding",
",",
"dumps",
",",
"loads",
")",
":",
"parsed",
"=",
"headers",
".",
"parse_content_type",
"(",
"content_type",
")",
"parsed",
".",
"parameters",
".",
"pop",
"(",
"'charset'",
",",
"None",
")",
"normalized",
"=",
"str",
"(",
"parsed",
")",
"add_transcoder",
"(",
"application",
",",
"handlers",
".",
"TextContentHandler",
"(",
"normalized",
",",
"dumps",
",",
"loads",
",",
"default_encoding",
")",
")"
]
| Add handler for a text content type.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to add
:param str default_encoding: encoding to use when one is unspecified
:param dumps: function that dumps a dictionary to a string.
``dumps(dict, encoding:str) -> str``
:param loads: function that loads a dictionary from a string.
``loads(str, encoding:str) -> dict``
Note that the ``charset`` parameter is stripped from `content_type`
if it is present. | [
"Add",
"handler",
"for",
"a",
"text",
"content",
"type",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L185-L207 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | add_transcoder | def add_transcoder(application, transcoder, content_type=None):
"""
Register a transcoder for a specific content type.
:param tornado.web.Application application: the application to modify
:param transcoder: object that translates between :class:`bytes` and
:class:`object` instances
:param str content_type: the content type to add. If this is
unspecified or :data:`None`, then the transcoder's ``content_type``
attribute is used.
The `transcoder` instance is required to implement the following
simple protocol:
.. attribute:: transcoder.content_type
:class:`str` that identifies the MIME type that the transcoder
implements.
.. method:: transcoder.to_bytes(inst_data, encoding=None) -> bytes
:param object inst_data: the object to encode
:param str encoding: character encoding to apply or :data:`None`
:returns: the encoded :class:`bytes` instance
.. method:: transcoder.from_bytes(data_bytes, encoding=None) -> object
:param bytes data_bytes: the :class:`bytes` instance to decode
:param str encoding: character encoding to use or :data:`None`
:returns: the decoded :class:`object` instance
"""
settings = get_settings(application, force_instance=True)
settings[content_type or transcoder.content_type] = transcoder | python | def add_transcoder(application, transcoder, content_type=None):
"""
Register a transcoder for a specific content type.
:param tornado.web.Application application: the application to modify
:param transcoder: object that translates between :class:`bytes` and
:class:`object` instances
:param str content_type: the content type to add. If this is
unspecified or :data:`None`, then the transcoder's ``content_type``
attribute is used.
The `transcoder` instance is required to implement the following
simple protocol:
.. attribute:: transcoder.content_type
:class:`str` that identifies the MIME type that the transcoder
implements.
.. method:: transcoder.to_bytes(inst_data, encoding=None) -> bytes
:param object inst_data: the object to encode
:param str encoding: character encoding to apply or :data:`None`
:returns: the encoded :class:`bytes` instance
.. method:: transcoder.from_bytes(data_bytes, encoding=None) -> object
:param bytes data_bytes: the :class:`bytes` instance to decode
:param str encoding: character encoding to use or :data:`None`
:returns: the decoded :class:`object` instance
"""
settings = get_settings(application, force_instance=True)
settings[content_type or transcoder.content_type] = transcoder | [
"def",
"add_transcoder",
"(",
"application",
",",
"transcoder",
",",
"content_type",
"=",
"None",
")",
":",
"settings",
"=",
"get_settings",
"(",
"application",
",",
"force_instance",
"=",
"True",
")",
"settings",
"[",
"content_type",
"or",
"transcoder",
".",
"content_type",
"]",
"=",
"transcoder"
]
| Register a transcoder for a specific content type.
:param tornado.web.Application application: the application to modify
:param transcoder: object that translates between :class:`bytes` and
:class:`object` instances
:param str content_type: the content type to add. If this is
unspecified or :data:`None`, then the transcoder's ``content_type``
attribute is used.
The `transcoder` instance is required to implement the following
simple protocol:
.. attribute:: transcoder.content_type
:class:`str` that identifies the MIME type that the transcoder
implements.
.. method:: transcoder.to_bytes(inst_data, encoding=None) -> bytes
:param object inst_data: the object to encode
:param str encoding: character encoding to apply or :data:`None`
:returns: the encoded :class:`bytes` instance
.. method:: transcoder.from_bytes(data_bytes, encoding=None) -> object
:param bytes data_bytes: the :class:`bytes` instance to decode
:param str encoding: character encoding to use or :data:`None`
:returns: the decoded :class:`object` instance | [
"Register",
"a",
"transcoder",
"for",
"a",
"specific",
"content",
"type",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L210-L243 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | set_default_content_type | def set_default_content_type(application, content_type, encoding=None):
"""
Store the default content type for an application.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to default to
:param str|None encoding: encoding to use when one is unspecified
"""
settings = get_settings(application, force_instance=True)
settings.default_content_type = content_type
settings.default_encoding = encoding | python | def set_default_content_type(application, content_type, encoding=None):
"""
Store the default content type for an application.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to default to
:param str|None encoding: encoding to use when one is unspecified
"""
settings = get_settings(application, force_instance=True)
settings.default_content_type = content_type
settings.default_encoding = encoding | [
"def",
"set_default_content_type",
"(",
"application",
",",
"content_type",
",",
"encoding",
"=",
"None",
")",
":",
"settings",
"=",
"get_settings",
"(",
"application",
",",
"force_instance",
"=",
"True",
")",
"settings",
".",
"default_content_type",
"=",
"content_type",
"settings",
".",
"default_encoding",
"=",
"encoding"
]
| Store the default content type for an application.
:param tornado.web.Application application: the application to modify
:param str content_type: the content type to default to
:param str|None encoding: encoding to use when one is unspecified | [
"Store",
"the",
"default",
"content",
"type",
"for",
"an",
"application",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L246-L257 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | ContentMixin.get_response_content_type | def get_response_content_type(self):
"""Figure out what content type will be used in the response."""
if self._best_response_match is None:
settings = get_settings(self.application, force_instance=True)
acceptable = headers.parse_accept(
self.request.headers.get(
'Accept',
settings.default_content_type
if settings.default_content_type else '*/*'))
try:
selected, _ = algorithms.select_content_type(
acceptable, settings.available_content_types)
self._best_response_match = '/'.join(
[selected.content_type, selected.content_subtype])
if selected.content_suffix is not None:
self._best_response_match = '+'.join(
[self._best_response_match, selected.content_suffix])
except errors.NoMatch:
self._best_response_match = settings.default_content_type
return self._best_response_match | python | def get_response_content_type(self):
"""Figure out what content type will be used in the response."""
if self._best_response_match is None:
settings = get_settings(self.application, force_instance=True)
acceptable = headers.parse_accept(
self.request.headers.get(
'Accept',
settings.default_content_type
if settings.default_content_type else '*/*'))
try:
selected, _ = algorithms.select_content_type(
acceptable, settings.available_content_types)
self._best_response_match = '/'.join(
[selected.content_type, selected.content_subtype])
if selected.content_suffix is not None:
self._best_response_match = '+'.join(
[self._best_response_match, selected.content_suffix])
except errors.NoMatch:
self._best_response_match = settings.default_content_type
return self._best_response_match | [
"def",
"get_response_content_type",
"(",
"self",
")",
":",
"if",
"self",
".",
"_best_response_match",
"is",
"None",
":",
"settings",
"=",
"get_settings",
"(",
"self",
".",
"application",
",",
"force_instance",
"=",
"True",
")",
"acceptable",
"=",
"headers",
".",
"parse_accept",
"(",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"'Accept'",
",",
"settings",
".",
"default_content_type",
"if",
"settings",
".",
"default_content_type",
"else",
"'*/*'",
")",
")",
"try",
":",
"selected",
",",
"_",
"=",
"algorithms",
".",
"select_content_type",
"(",
"acceptable",
",",
"settings",
".",
"available_content_types",
")",
"self",
".",
"_best_response_match",
"=",
"'/'",
".",
"join",
"(",
"[",
"selected",
".",
"content_type",
",",
"selected",
".",
"content_subtype",
"]",
")",
"if",
"selected",
".",
"content_suffix",
"is",
"not",
"None",
":",
"self",
".",
"_best_response_match",
"=",
"'+'",
".",
"join",
"(",
"[",
"self",
".",
"_best_response_match",
",",
"selected",
".",
"content_suffix",
"]",
")",
"except",
"errors",
".",
"NoMatch",
":",
"self",
".",
"_best_response_match",
"=",
"settings",
".",
"default_content_type",
"return",
"self",
".",
"_best_response_match"
]
| Figure out what content type will be used in the response. | [
"Figure",
"out",
"what",
"content",
"type",
"will",
"be",
"used",
"in",
"the",
"response",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L287-L307 | train |
sprockets/sprockets.mixins.mediatype | sprockets/mixins/mediatype/content.py | ContentMixin.send_response | def send_response(self, body, set_content_type=True):
"""
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
"""
settings = get_settings(self.application, force_instance=True)
handler = settings[self.get_response_content_type()]
content_type, data_bytes = handler.to_bytes(body)
if set_content_type:
self.set_header('Content-Type', content_type)
self.add_header('Vary', 'Accept')
self.write(data_bytes) | python | def send_response(self, body, set_content_type=True):
"""
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
"""
settings = get_settings(self.application, force_instance=True)
handler = settings[self.get_response_content_type()]
content_type, data_bytes = handler.to_bytes(body)
if set_content_type:
self.set_header('Content-Type', content_type)
self.add_header('Vary', 'Accept')
self.write(data_bytes) | [
"def",
"send_response",
"(",
"self",
",",
"body",
",",
"set_content_type",
"=",
"True",
")",
":",
"settings",
"=",
"get_settings",
"(",
"self",
".",
"application",
",",
"force_instance",
"=",
"True",
")",
"handler",
"=",
"settings",
"[",
"self",
".",
"get_response_content_type",
"(",
")",
"]",
"content_type",
",",
"data_bytes",
"=",
"handler",
".",
"to_bytes",
"(",
"body",
")",
"if",
"set_content_type",
":",
"self",
".",
"set_header",
"(",
"'Content-Type'",
",",
"content_type",
")",
"self",
".",
"add_header",
"(",
"'Vary'",
",",
"'Accept'",
")",
"self",
".",
"write",
"(",
"data_bytes",
")"
]
| Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True` | [
"Serialize",
"and",
"send",
"body",
"in",
"the",
"response",
"."
]
| c034e04f674201487a8d6ce9f8ce36f3f5de07d8 | https://github.com/sprockets/sprockets.mixins.mediatype/blob/c034e04f674201487a8d6ce9f8ce36f3f5de07d8/sprockets/mixins/mediatype/content.py#L344-L359 | train |
assamite/creamas | creamas/nx.py | connections_from_graph | def connections_from_graph(env, G, edge_data=False):
"""Create connections for agents in the given environment from the given
NetworkX graph structure.
:param env:
Environment where the agents live. The environment should be derived
from :class:`~creamas.core.environment.Environment`,
:class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
:param G:
NetworkX graph structure, either :class:`networkx.graph.Graph` or
:class:`networkx.digraph.DiGraph`. The graph needs to have the same
number of nodes as the environment has agents (excluding the managers).
:param bool edge_data:
If ``True``, edge data from the given graph is copied to the agents'
:attr:`connections`.
.. note::
By design, manager agents are excluded from the connections and should
not be counted towards environment's agent count.
The created connections are stored in each agent's
:attr:`~creamas.core.agent.CreativeAgent.connections` and the possible
edge data is stored as key-value pairs in the connection dictionary.
The agents are sorted by their environments' hosts and ports before each
agent is mapped to a node in **G**. This should cause some network
generation methods in NetworkX, e.g.
:func:`~networkx.generators.random_graphs.connected_watts_strogatz_graph`,
to create more connections between agents in the same environment and/or
node when using :class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
"""
if not issubclass(G.__class__, (Graph, DiGraph)):
raise TypeError("Graph structure must be derived from Networkx's "
"Graph or DiGraph.")
if not hasattr(env, 'get_agents'):
raise TypeError("Parameter 'env' must have get_agents.")
addrs = env.get_agents(addr=True)
if len(addrs) != len(G):
raise ValueError("The number of graph nodes and agents in the "
"environment (excluding the manager agent) must "
"match. Now got {} nodes and {} agents."
.format(len(G), len(addrs)))
# Sort agent addresses to the order they were added to the environment.
addrs = sort_addrs(addrs)
_addrs2nodes(addrs, G)
conn_map = _edges2conns(G, edge_data)
env.create_connections(conn_map) | python | def connections_from_graph(env, G, edge_data=False):
"""Create connections for agents in the given environment from the given
NetworkX graph structure.
:param env:
Environment where the agents live. The environment should be derived
from :class:`~creamas.core.environment.Environment`,
:class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
:param G:
NetworkX graph structure, either :class:`networkx.graph.Graph` or
:class:`networkx.digraph.DiGraph`. The graph needs to have the same
number of nodes as the environment has agents (excluding the managers).
:param bool edge_data:
If ``True``, edge data from the given graph is copied to the agents'
:attr:`connections`.
.. note::
By design, manager agents are excluded from the connections and should
not be counted towards environment's agent count.
The created connections are stored in each agent's
:attr:`~creamas.core.agent.CreativeAgent.connections` and the possible
edge data is stored as key-value pairs in the connection dictionary.
The agents are sorted by their environments' hosts and ports before each
agent is mapped to a node in **G**. This should cause some network
generation methods in NetworkX, e.g.
:func:`~networkx.generators.random_graphs.connected_watts_strogatz_graph`,
to create more connections between agents in the same environment and/or
node when using :class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
"""
if not issubclass(G.__class__, (Graph, DiGraph)):
raise TypeError("Graph structure must be derived from Networkx's "
"Graph or DiGraph.")
if not hasattr(env, 'get_agents'):
raise TypeError("Parameter 'env' must have get_agents.")
addrs = env.get_agents(addr=True)
if len(addrs) != len(G):
raise ValueError("The number of graph nodes and agents in the "
"environment (excluding the manager agent) must "
"match. Now got {} nodes and {} agents."
.format(len(G), len(addrs)))
# Sort agent addresses to the order they were added to the environment.
addrs = sort_addrs(addrs)
_addrs2nodes(addrs, G)
conn_map = _edges2conns(G, edge_data)
env.create_connections(conn_map) | [
"def",
"connections_from_graph",
"(",
"env",
",",
"G",
",",
"edge_data",
"=",
"False",
")",
":",
"if",
"not",
"issubclass",
"(",
"G",
".",
"__class__",
",",
"(",
"Graph",
",",
"DiGraph",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Graph structure must be derived from Networkx's \"",
"\"Graph or DiGraph.\"",
")",
"if",
"not",
"hasattr",
"(",
"env",
",",
"'get_agents'",
")",
":",
"raise",
"TypeError",
"(",
"\"Parameter 'env' must have get_agents.\"",
")",
"addrs",
"=",
"env",
".",
"get_agents",
"(",
"addr",
"=",
"True",
")",
"if",
"len",
"(",
"addrs",
")",
"!=",
"len",
"(",
"G",
")",
":",
"raise",
"ValueError",
"(",
"\"The number of graph nodes and agents in the \"",
"\"environment (excluding the manager agent) must \"",
"\"match. Now got {} nodes and {} agents.\"",
".",
"format",
"(",
"len",
"(",
"G",
")",
",",
"len",
"(",
"addrs",
")",
")",
")",
"# Sort agent addresses to the order they were added to the environment.",
"addrs",
"=",
"sort_addrs",
"(",
"addrs",
")",
"_addrs2nodes",
"(",
"addrs",
",",
"G",
")",
"conn_map",
"=",
"_edges2conns",
"(",
"G",
",",
"edge_data",
")",
"env",
".",
"create_connections",
"(",
"conn_map",
")"
]
| Create connections for agents in the given environment from the given
NetworkX graph structure.
:param env:
Environment where the agents live. The environment should be derived
from :class:`~creamas.core.environment.Environment`,
:class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
:param G:
NetworkX graph structure, either :class:`networkx.graph.Graph` or
:class:`networkx.digraph.DiGraph`. The graph needs to have the same
number of nodes as the environment has agents (excluding the managers).
:param bool edge_data:
If ``True``, edge data from the given graph is copied to the agents'
:attr:`connections`.
.. note::
By design, manager agents are excluded from the connections and should
not be counted towards environment's agent count.
The created connections are stored in each agent's
:attr:`~creamas.core.agent.CreativeAgent.connections` and the possible
edge data is stored as key-value pairs in the connection dictionary.
The agents are sorted by their environments' hosts and ports before each
agent is mapped to a node in **G**. This should cause some network
generation methods in NetworkX, e.g.
:func:`~networkx.generators.random_graphs.connected_watts_strogatz_graph`,
to create more connections between agents in the same environment and/or
node when using :class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`. | [
"Create",
"connections",
"for",
"agents",
"in",
"the",
"given",
"environment",
"from",
"the",
"given",
"NetworkX",
"graph",
"structure",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/nx.py#L22-L74 | train |
assamite/creamas | creamas/nx.py | graph_from_connections | def graph_from_connections(env, directed=False):
"""Create NetworkX graph from agent connections in a given environment.
:param env:
Environment where the agents live. The environment must be derived from
:class:`~creamas.core.environment.Environment`,
:class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
:param bool directed:
If ``True``, creates an instance of :class:`~networkx.digraph.DiGraph`,
otherwise creates an instance of :class:`~networkx.graph.Graph`.
:returns: The created NetworkX graph.
:rtype:
:class:`~networkx.digraph.DiGraph` or :class:`~networkx.graph.Graph`
.. note::
If the created graph is undirected and two connected agents have
different data stored for each other, then the data for the given edge
is chosen randomly between the two agents.
"""
G = DiGraph() if directed else Graph()
conn_list = env.get_connections(data=True)
for agent, conns in conn_list:
G.add_node(agent)
ebunch = []
for nb, data in conns.items():
ebunch.append((agent, nb, data))
if len(ebunch) > 0:
G.add_edges_from(ebunch)
return G | python | def graph_from_connections(env, directed=False):
"""Create NetworkX graph from agent connections in a given environment.
:param env:
Environment where the agents live. The environment must be derived from
:class:`~creamas.core.environment.Environment`,
:class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
:param bool directed:
If ``True``, creates an instance of :class:`~networkx.digraph.DiGraph`,
otherwise creates an instance of :class:`~networkx.graph.Graph`.
:returns: The created NetworkX graph.
:rtype:
:class:`~networkx.digraph.DiGraph` or :class:`~networkx.graph.Graph`
.. note::
If the created graph is undirected and two connected agents have
different data stored for each other, then the data for the given edge
is chosen randomly between the two agents.
"""
G = DiGraph() if directed else Graph()
conn_list = env.get_connections(data=True)
for agent, conns in conn_list:
G.add_node(agent)
ebunch = []
for nb, data in conns.items():
ebunch.append((agent, nb, data))
if len(ebunch) > 0:
G.add_edges_from(ebunch)
return G | [
"def",
"graph_from_connections",
"(",
"env",
",",
"directed",
"=",
"False",
")",
":",
"G",
"=",
"DiGraph",
"(",
")",
"if",
"directed",
"else",
"Graph",
"(",
")",
"conn_list",
"=",
"env",
".",
"get_connections",
"(",
"data",
"=",
"True",
")",
"for",
"agent",
",",
"conns",
"in",
"conn_list",
":",
"G",
".",
"add_node",
"(",
"agent",
")",
"ebunch",
"=",
"[",
"]",
"for",
"nb",
",",
"data",
"in",
"conns",
".",
"items",
"(",
")",
":",
"ebunch",
".",
"append",
"(",
"(",
"agent",
",",
"nb",
",",
"data",
")",
")",
"if",
"len",
"(",
"ebunch",
")",
">",
"0",
":",
"G",
".",
"add_edges_from",
"(",
"ebunch",
")",
"return",
"G"
]
| Create NetworkX graph from agent connections in a given environment.
:param env:
Environment where the agents live. The environment must be derived from
:class:`~creamas.core.environment.Environment`,
:class:`~creamas.mp.MultiEnvironment` or
:class:`~creamas.ds.DistributedEnvironment`.
:param bool directed:
If ``True``, creates an instance of :class:`~networkx.digraph.DiGraph`,
otherwise creates an instance of :class:`~networkx.graph.Graph`.
:returns: The created NetworkX graph.
:rtype:
:class:`~networkx.digraph.DiGraph` or :class:`~networkx.graph.Graph`
.. note::
If the created graph is undirected and two connected agents have
different data stored for each other, then the data for the given edge
is chosen randomly between the two agents. | [
"Create",
"NetworkX",
"graph",
"from",
"agent",
"connections",
"in",
"a",
"given",
"environment",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/nx.py#L77-L109 | train |
assamite/creamas | creamas/nx.py | _addrs2nodes | def _addrs2nodes(addrs, G):
"""Map agent addresses to nodes in the graph.
"""
for i, n in enumerate(G.nodes()):
G.node[n]['addr'] = addrs[i] | python | def _addrs2nodes(addrs, G):
"""Map agent addresses to nodes in the graph.
"""
for i, n in enumerate(G.nodes()):
G.node[n]['addr'] = addrs[i] | [
"def",
"_addrs2nodes",
"(",
"addrs",
",",
"G",
")",
":",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"G",
".",
"nodes",
"(",
")",
")",
":",
"G",
".",
"node",
"[",
"n",
"]",
"[",
"'addr'",
"]",
"=",
"addrs",
"[",
"i",
"]"
]
| Map agent addresses to nodes in the graph. | [
"Map",
"agent",
"addresses",
"to",
"nodes",
"in",
"the",
"graph",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/nx.py#L112-L116 | train |
assamite/creamas | creamas/nx.py | _edges2conns | def _edges2conns(G, edge_data=False):
"""Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict
"""
cm = {}
for n in G.nodes(data=True):
if edge_data:
cm[n[1]['addr']] = [(G.node[nb]['addr'], G[n[0]][nb])
for nb in G[n[0]]]
else:
cm[n[1]['addr']] = [(G.node[nb]['addr'], {}) for nb in G[n[0]]]
return cm | python | def _edges2conns(G, edge_data=False):
"""Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict
"""
cm = {}
for n in G.nodes(data=True):
if edge_data:
cm[n[1]['addr']] = [(G.node[nb]['addr'], G[n[0]][nb])
for nb in G[n[0]]]
else:
cm[n[1]['addr']] = [(G.node[nb]['addr'], {}) for nb in G[n[0]]]
return cm | [
"def",
"_edges2conns",
"(",
"G",
",",
"edge_data",
"=",
"False",
")",
":",
"cm",
"=",
"{",
"}",
"for",
"n",
"in",
"G",
".",
"nodes",
"(",
"data",
"=",
"True",
")",
":",
"if",
"edge_data",
":",
"cm",
"[",
"n",
"[",
"1",
"]",
"[",
"'addr'",
"]",
"]",
"=",
"[",
"(",
"G",
".",
"node",
"[",
"nb",
"]",
"[",
"'addr'",
"]",
",",
"G",
"[",
"n",
"[",
"0",
"]",
"]",
"[",
"nb",
"]",
")",
"for",
"nb",
"in",
"G",
"[",
"n",
"[",
"0",
"]",
"]",
"]",
"else",
":",
"cm",
"[",
"n",
"[",
"1",
"]",
"[",
"'addr'",
"]",
"]",
"=",
"[",
"(",
"G",
".",
"node",
"[",
"nb",
"]",
"[",
"'addr'",
"]",
",",
"{",
"}",
")",
"for",
"nb",
"in",
"G",
"[",
"n",
"[",
"0",
"]",
"]",
"]",
"return",
"cm"
]
| Create a mapping from graph edges to agent connections to be created.
:param G:
NetworkX's Graph or DiGraph which has :attr:`addr` attribute for each
node.
:param bool edge_data:
If ``True``, stores also edge data to the returned dictionary.
:returns:
A dictionary where keys are agent addresses and values are lists of
addresses to which key-agent should create connections in order to
recreate the graph structure in an agent society.
:rtype: dict | [
"Create",
"a",
"mapping",
"from",
"graph",
"edges",
"to",
"agent",
"connections",
"to",
"be",
"created",
"."
]
| 54dc3e31c97a3f938e58272f8ab80b6bcafeff58 | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/nx.py#L119-L143 | train |
adaptive-learning/proso-apps | proso_user/views.py | profile | def profile(request, status=200):
"""
Get the user's profile. If the user has no assigned profile, the HTTP 404
is returned. Make a POST request to modify the user's profile.
GET parameters:
html
turn on the HTML version of the API
username:
username of user (only for users with public profile)
stats:
attache addition user statistics
POST parameters (JSON):
send_emails:
switcher turning on sending e-mails to user
public:
swicher making the user's profile publicly available
user:
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name
"""
if request.method == 'GET':
if request.GET.get("username", False):
try:
user_profile = User.objects.get(username=request.GET.get("username"),
userprofile__public=True).userprofile
except ObjectDoesNotExist:
raise Http404("user not found or have not public profile")
else:
user_id = get_user_id(request)
if get_config('proso_user', 'google.openid.migration', default=True) and not is_user_id_overridden(request):
migrated_user = migrate_google_openid_user(request.user)
if migrated_user is not None:
auth.logout(request)
migrated_user.backend = 'social.backends.google.GoogleOAuth2'
auth.login(request, migrated_user)
user_profile = get_object_or_404(UserProfile, user_id=user_id)
return render_json(
request, user_profile, status=status,
template='user_profile.html', help_text=profile.__doc__)
elif request.method == 'POST':
with transaction.atomic():
to_save = json_body(request.body.decode("utf-8"))
user_id = get_user_id(request)
user_profile = get_object_or_404(UserProfile, user_id=user_id)
user = to_save.get('user', None)
if 'send_emails' in to_save:
user_profile.send_emails = bool(to_save['send_emails'])
if 'public' in to_save:
user_profile.public = bool(to_save['public'])
if user:
error = _save_user(request, user, new=False)
if error:
return render_json(request, error, template='user_json.html', status=400)
if 'properties' in to_save:
user_profile.save_properties(to_save['properties'])
user_profile.save()
request.method = "GET"
return profile(request, status=202)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | python | def profile(request, status=200):
"""
Get the user's profile. If the user has no assigned profile, the HTTP 404
is returned. Make a POST request to modify the user's profile.
GET parameters:
html
turn on the HTML version of the API
username:
username of user (only for users with public profile)
stats:
attache addition user statistics
POST parameters (JSON):
send_emails:
switcher turning on sending e-mails to user
public:
swicher making the user's profile publicly available
user:
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name
"""
if request.method == 'GET':
if request.GET.get("username", False):
try:
user_profile = User.objects.get(username=request.GET.get("username"),
userprofile__public=True).userprofile
except ObjectDoesNotExist:
raise Http404("user not found or have not public profile")
else:
user_id = get_user_id(request)
if get_config('proso_user', 'google.openid.migration', default=True) and not is_user_id_overridden(request):
migrated_user = migrate_google_openid_user(request.user)
if migrated_user is not None:
auth.logout(request)
migrated_user.backend = 'social.backends.google.GoogleOAuth2'
auth.login(request, migrated_user)
user_profile = get_object_or_404(UserProfile, user_id=user_id)
return render_json(
request, user_profile, status=status,
template='user_profile.html', help_text=profile.__doc__)
elif request.method == 'POST':
with transaction.atomic():
to_save = json_body(request.body.decode("utf-8"))
user_id = get_user_id(request)
user_profile = get_object_or_404(UserProfile, user_id=user_id)
user = to_save.get('user', None)
if 'send_emails' in to_save:
user_profile.send_emails = bool(to_save['send_emails'])
if 'public' in to_save:
user_profile.public = bool(to_save['public'])
if user:
error = _save_user(request, user, new=False)
if error:
return render_json(request, error, template='user_json.html', status=400)
if 'properties' in to_save:
user_profile.save_properties(to_save['properties'])
user_profile.save()
request.method = "GET"
return profile(request, status=202)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"profile",
"(",
"request",
",",
"status",
"=",
"200",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"if",
"request",
".",
"GET",
".",
"get",
"(",
"\"username\"",
",",
"False",
")",
":",
"try",
":",
"user_profile",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"\"username\"",
")",
",",
"userprofile__public",
"=",
"True",
")",
".",
"userprofile",
"except",
"ObjectDoesNotExist",
":",
"raise",
"Http404",
"(",
"\"user not found or have not public profile\"",
")",
"else",
":",
"user_id",
"=",
"get_user_id",
"(",
"request",
")",
"if",
"get_config",
"(",
"'proso_user'",
",",
"'google.openid.migration'",
",",
"default",
"=",
"True",
")",
"and",
"not",
"is_user_id_overridden",
"(",
"request",
")",
":",
"migrated_user",
"=",
"migrate_google_openid_user",
"(",
"request",
".",
"user",
")",
"if",
"migrated_user",
"is",
"not",
"None",
":",
"auth",
".",
"logout",
"(",
"request",
")",
"migrated_user",
".",
"backend",
"=",
"'social.backends.google.GoogleOAuth2'",
"auth",
".",
"login",
"(",
"request",
",",
"migrated_user",
")",
"user_profile",
"=",
"get_object_or_404",
"(",
"UserProfile",
",",
"user_id",
"=",
"user_id",
")",
"return",
"render_json",
"(",
"request",
",",
"user_profile",
",",
"status",
"=",
"status",
",",
"template",
"=",
"'user_profile.html'",
",",
"help_text",
"=",
"profile",
".",
"__doc__",
")",
"elif",
"request",
".",
"method",
"==",
"'POST'",
":",
"with",
"transaction",
".",
"atomic",
"(",
")",
":",
"to_save",
"=",
"json_body",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"user_id",
"=",
"get_user_id",
"(",
"request",
")",
"user_profile",
"=",
"get_object_or_404",
"(",
"UserProfile",
",",
"user_id",
"=",
"user_id",
")",
"user",
"=",
"to_save",
".",
"get",
"(",
"'user'",
",",
"None",
")",
"if",
"'send_emails'",
"in",
"to_save",
":",
"user_profile",
".",
"send_emails",
"=",
"bool",
"(",
"to_save",
"[",
"'send_emails'",
"]",
")",
"if",
"'public'",
"in",
"to_save",
":",
"user_profile",
".",
"public",
"=",
"bool",
"(",
"to_save",
"[",
"'public'",
"]",
")",
"if",
"user",
":",
"error",
"=",
"_save_user",
"(",
"request",
",",
"user",
",",
"new",
"=",
"False",
")",
"if",
"error",
":",
"return",
"render_json",
"(",
"request",
",",
"error",
",",
"template",
"=",
"'user_json.html'",
",",
"status",
"=",
"400",
")",
"if",
"'properties'",
"in",
"to_save",
":",
"user_profile",
".",
"save_properties",
"(",
"to_save",
"[",
"'properties'",
"]",
")",
"user_profile",
".",
"save",
"(",
")",
"request",
".",
"method",
"=",
"\"GET\"",
"return",
"profile",
"(",
"request",
",",
"status",
"=",
"202",
")",
"else",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"method %s is not allowed\"",
".",
"format",
"(",
"request",
".",
"method",
")",
")"
]
| Get the user's profile. If the user has no assigned profile, the HTTP 404
is returned. Make a POST request to modify the user's profile.
GET parameters:
html
turn on the HTML version of the API
username:
username of user (only for users with public profile)
stats:
attache addition user statistics
POST parameters (JSON):
send_emails:
switcher turning on sending e-mails to user
public:
swicher making the user's profile publicly available
user:
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name | [
"Get",
"the",
"user",
"s",
"profile",
".",
"If",
"the",
"user",
"has",
"no",
"assigned",
"profile",
"the",
"HTTP",
"404",
"is",
"returned",
".",
"Make",
"a",
"POST",
"request",
"to",
"modify",
"the",
"user",
"s",
"profile",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views.py#L22-L89 | train |
adaptive-learning/proso-apps | proso_user/views.py | signup | def signup(request):
"""
Create a new user with the given credentials.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
email:
user's e-mail
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name
"""
if request.method == 'GET':
return render(request, 'user_signup.html', {}, help_text=signup.__doc__)
elif request.method == 'POST':
if request.user.is_authenticated() and hasattr(request.user, "userprofile"):
return render_json(request, {
'error': _('User already logged in'),
'error_type': 'username_logged'
}, template='user_json.html', status=400)
credentials = json_body(request.body.decode("utf-8"))
error = _save_user(request, credentials, new=True)
if error is not None:
return render_json(request, error, template='user_json.html', status=400)
else:
auth.login(request, request.user)
request.method = "GET"
return profile(request, status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | python | def signup(request):
"""
Create a new user with the given credentials.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
email:
user's e-mail
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name
"""
if request.method == 'GET':
return render(request, 'user_signup.html', {}, help_text=signup.__doc__)
elif request.method == 'POST':
if request.user.is_authenticated() and hasattr(request.user, "userprofile"):
return render_json(request, {
'error': _('User already logged in'),
'error_type': 'username_logged'
}, template='user_json.html', status=400)
credentials = json_body(request.body.decode("utf-8"))
error = _save_user(request, credentials, new=True)
if error is not None:
return render_json(request, error, template='user_json.html', status=400)
else:
auth.login(request, request.user)
request.method = "GET"
return profile(request, status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"signup",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"render",
"(",
"request",
",",
"'user_signup.html'",
",",
"{",
"}",
",",
"help_text",
"=",
"signup",
".",
"__doc__",
")",
"elif",
"request",
".",
"method",
"==",
"'POST'",
":",
"if",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"and",
"hasattr",
"(",
"request",
".",
"user",
",",
"\"userprofile\"",
")",
":",
"return",
"render_json",
"(",
"request",
",",
"{",
"'error'",
":",
"_",
"(",
"'User already logged in'",
")",
",",
"'error_type'",
":",
"'username_logged'",
"}",
",",
"template",
"=",
"'user_json.html'",
",",
"status",
"=",
"400",
")",
"credentials",
"=",
"json_body",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"error",
"=",
"_save_user",
"(",
"request",
",",
"credentials",
",",
"new",
"=",
"True",
")",
"if",
"error",
"is",
"not",
"None",
":",
"return",
"render_json",
"(",
"request",
",",
"error",
",",
"template",
"=",
"'user_json.html'",
",",
"status",
"=",
"400",
")",
"else",
":",
"auth",
".",
"login",
"(",
"request",
",",
"request",
".",
"user",
")",
"request",
".",
"method",
"=",
"\"GET\"",
"return",
"profile",
"(",
"request",
",",
"status",
"=",
"201",
")",
"else",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"method %s is not allowed\"",
".",
"format",
"(",
"request",
".",
"method",
")",
")"
]
| Create a new user with the given credentials.
GET parameters:
html
turn on the HTML version of the API
POST parameters (JSON):
username:
user's name
email:
user's e-mail
password:
user's password
password_check:
user's password again to check it
first_name (optional):
user's first name
last_name (optional):
user's last name | [
"Create",
"a",
"new",
"user",
"with",
"the",
"given",
"credentials",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views.py#L243-L282 | train |
adaptive-learning/proso-apps | proso_user/views.py | session | def session(request):
"""
Get the information about the current session or modify the current session.
GET parameters:
html
turn on the HTML version of the API
POST parameters:
locale:
client's locale
time_zone:
client's time zone
display_width:
width of the client's display
display_height
height of the client's display
"""
if request.user.id is None: # Google Bot
return render_json(request, {
'error': _('There is no user available to create a session.'),
'error_type': 'user_undefined'
}, status=400, template='user_json.html')
if request.method == 'GET':
return render_json(
request,
Session.objects.get_current_session(),
template='user_session.html', help_text=session.__doc__)
elif request.method == 'POST':
current_session = Session.objects.get_current_session()
if current_session is None:
return HttpResponseBadRequest("there is no current session to modify")
data = json_body(request.body.decode("utf-8"))
locale = data.get('locale', None)
time_zone = data.get('time_zone', None)
display_width = data.get('display_width', None)
display_height = data.get('display_height', None)
if locale:
current_session.locale = locale
if time_zone:
current_session.time_zone = TimeZone.objects.from_content(time_zone)
if display_width:
current_session.display_width = display_width
if display_height:
current_session.display_height = display_height
current_session.save()
return HttpResponse('ok', status=202)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | python | def session(request):
"""
Get the information about the current session or modify the current session.
GET parameters:
html
turn on the HTML version of the API
POST parameters:
locale:
client's locale
time_zone:
client's time zone
display_width:
width of the client's display
display_height
height of the client's display
"""
if request.user.id is None: # Google Bot
return render_json(request, {
'error': _('There is no user available to create a session.'),
'error_type': 'user_undefined'
}, status=400, template='user_json.html')
if request.method == 'GET':
return render_json(
request,
Session.objects.get_current_session(),
template='user_session.html', help_text=session.__doc__)
elif request.method == 'POST':
current_session = Session.objects.get_current_session()
if current_session is None:
return HttpResponseBadRequest("there is no current session to modify")
data = json_body(request.body.decode("utf-8"))
locale = data.get('locale', None)
time_zone = data.get('time_zone', None)
display_width = data.get('display_width', None)
display_height = data.get('display_height', None)
if locale:
current_session.locale = locale
if time_zone:
current_session.time_zone = TimeZone.objects.from_content(time_zone)
if display_width:
current_session.display_width = display_width
if display_height:
current_session.display_height = display_height
current_session.save()
return HttpResponse('ok', status=202)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | [
"def",
"session",
"(",
"request",
")",
":",
"if",
"request",
".",
"user",
".",
"id",
"is",
"None",
":",
"# Google Bot",
"return",
"render_json",
"(",
"request",
",",
"{",
"'error'",
":",
"_",
"(",
"'There is no user available to create a session.'",
")",
",",
"'error_type'",
":",
"'user_undefined'",
"}",
",",
"status",
"=",
"400",
",",
"template",
"=",
"'user_json.html'",
")",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"return",
"render_json",
"(",
"request",
",",
"Session",
".",
"objects",
".",
"get_current_session",
"(",
")",
",",
"template",
"=",
"'user_session.html'",
",",
"help_text",
"=",
"session",
".",
"__doc__",
")",
"elif",
"request",
".",
"method",
"==",
"'POST'",
":",
"current_session",
"=",
"Session",
".",
"objects",
".",
"get_current_session",
"(",
")",
"if",
"current_session",
"is",
"None",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"there is no current session to modify\"",
")",
"data",
"=",
"json_body",
"(",
"request",
".",
"body",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"locale",
"=",
"data",
".",
"get",
"(",
"'locale'",
",",
"None",
")",
"time_zone",
"=",
"data",
".",
"get",
"(",
"'time_zone'",
",",
"None",
")",
"display_width",
"=",
"data",
".",
"get",
"(",
"'display_width'",
",",
"None",
")",
"display_height",
"=",
"data",
".",
"get",
"(",
"'display_height'",
",",
"None",
")",
"if",
"locale",
":",
"current_session",
".",
"locale",
"=",
"locale",
"if",
"time_zone",
":",
"current_session",
".",
"time_zone",
"=",
"TimeZone",
".",
"objects",
".",
"from_content",
"(",
"time_zone",
")",
"if",
"display_width",
":",
"current_session",
".",
"display_width",
"=",
"display_width",
"if",
"display_height",
":",
"current_session",
".",
"display_height",
"=",
"display_height",
"current_session",
".",
"save",
"(",
")",
"return",
"HttpResponse",
"(",
"'ok'",
",",
"status",
"=",
"202",
")",
"else",
":",
"return",
"HttpResponseBadRequest",
"(",
"\"method %s is not allowed\"",
".",
"format",
"(",
"request",
".",
"method",
")",
")"
]
| Get the information about the current session or modify the current session.
GET parameters:
html
turn on the HTML version of the API
POST parameters:
locale:
client's locale
time_zone:
client's time zone
display_width:
width of the client's display
display_height
height of the client's display | [
"Get",
"the",
"information",
"about",
"the",
"current",
"session",
"or",
"modify",
"the",
"current",
"session",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views.py#L288-L338 | train |
adaptive-learning/proso-apps | proso_user/views.py | initmobile_view | def initmobile_view(request):
"""
Create lazy user with a password. Used from the Android app.
Also returns csrf token.
GET parameters:
username:
user's name
password:
user's password
"""
if 'username' in request.GET and 'password' in request.GET:
username = request.GET['username']
password = request.GET['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
else:
user = request.user
response = {
'username': user.username,
'csrftoken': get_token(request),
}
if not user.has_usable_password():
password = User.objects.make_random_password()
user.set_password(password)
user.save()
response['password'] = password
return HttpResponse(json.dumps(response)) | python | def initmobile_view(request):
"""
Create lazy user with a password. Used from the Android app.
Also returns csrf token.
GET parameters:
username:
user's name
password:
user's password
"""
if 'username' in request.GET and 'password' in request.GET:
username = request.GET['username']
password = request.GET['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
else:
user = request.user
response = {
'username': user.username,
'csrftoken': get_token(request),
}
if not user.has_usable_password():
password = User.objects.make_random_password()
user.set_password(password)
user.save()
response['password'] = password
return HttpResponse(json.dumps(response)) | [
"def",
"initmobile_view",
"(",
"request",
")",
":",
"if",
"'username'",
"in",
"request",
".",
"GET",
"and",
"'password'",
"in",
"request",
".",
"GET",
":",
"username",
"=",
"request",
".",
"GET",
"[",
"'username'",
"]",
"password",
"=",
"request",
".",
"GET",
"[",
"'password'",
"]",
"user",
"=",
"auth",
".",
"authenticate",
"(",
"username",
"=",
"username",
",",
"password",
"=",
"password",
")",
"if",
"user",
"is",
"not",
"None",
":",
"if",
"user",
".",
"is_active",
":",
"login",
"(",
"request",
",",
"user",
")",
"else",
":",
"user",
"=",
"request",
".",
"user",
"response",
"=",
"{",
"'username'",
":",
"user",
".",
"username",
",",
"'csrftoken'",
":",
"get_token",
"(",
"request",
")",
",",
"}",
"if",
"not",
"user",
".",
"has_usable_password",
"(",
")",
":",
"password",
"=",
"User",
".",
"objects",
".",
"make_random_password",
"(",
")",
"user",
".",
"set_password",
"(",
"password",
")",
"user",
".",
"save",
"(",
")",
"response",
"[",
"'password'",
"]",
"=",
"password",
"return",
"HttpResponse",
"(",
"json",
".",
"dumps",
"(",
"response",
")",
")"
]
| Create lazy user with a password. Used from the Android app.
Also returns csrf token.
GET parameters:
username:
user's name
password:
user's password | [
"Create",
"lazy",
"user",
"with",
"a",
"password",
".",
"Used",
"from",
"the",
"Android",
"app",
".",
"Also",
"returns",
"csrf",
"token",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_user/views.py#L341-L370 | train |
truveris/py-mdstat | mdstat/disk.py | parse_device_disk | def parse_device_disk(token):
"""Parse a single disk from the header line.
Each disks has at least a device name and a unique number in its array,
after that could follow a list of special flags:
(W) write-mostly
(S) spare disk
(F) faulty disk
(R) replacement disk
Some are mutually exclusive (e.g. can't be spare and faulty).
"""
name, token = token.split("[", 1)
number, flags = token.split("]", 1)
return name, {
"number": int(number),
"write_mostly": "W" in flags,
"faulty": "F" in flags,
"spare": "S" in flags,
"replacement": "R" in flags,
} | python | def parse_device_disk(token):
"""Parse a single disk from the header line.
Each disks has at least a device name and a unique number in its array,
after that could follow a list of special flags:
(W) write-mostly
(S) spare disk
(F) faulty disk
(R) replacement disk
Some are mutually exclusive (e.g. can't be spare and faulty).
"""
name, token = token.split("[", 1)
number, flags = token.split("]", 1)
return name, {
"number": int(number),
"write_mostly": "W" in flags,
"faulty": "F" in flags,
"spare": "S" in flags,
"replacement": "R" in flags,
} | [
"def",
"parse_device_disk",
"(",
"token",
")",
":",
"name",
",",
"token",
"=",
"token",
".",
"split",
"(",
"\"[\"",
",",
"1",
")",
"number",
",",
"flags",
"=",
"token",
".",
"split",
"(",
"\"]\"",
",",
"1",
")",
"return",
"name",
",",
"{",
"\"number\"",
":",
"int",
"(",
"number",
")",
",",
"\"write_mostly\"",
":",
"\"W\"",
"in",
"flags",
",",
"\"faulty\"",
":",
"\"F\"",
"in",
"flags",
",",
"\"spare\"",
":",
"\"S\"",
"in",
"flags",
",",
"\"replacement\"",
":",
"\"R\"",
"in",
"flags",
",",
"}"
]
| Parse a single disk from the header line.
Each disks has at least a device name and a unique number in its array,
after that could follow a list of special flags:
(W) write-mostly
(S) spare disk
(F) faulty disk
(R) replacement disk
Some are mutually exclusive (e.g. can't be spare and faulty). | [
"Parse",
"a",
"single",
"disk",
"from",
"the",
"header",
"line",
"."
]
| 881af99d1168694d2f38e606af377ef6cabe2297 | https://github.com/truveris/py-mdstat/blob/881af99d1168694d2f38e606af377ef6cabe2297/mdstat/disk.py#L6-L27 | train |
adaptive-learning/proso-apps | proso/list.py | group_by | def group_by(what, by):
"""
Take a list and apply the given function on each its value, then group the
values by the function results.
.. testsetup::
from proso.list import group_by
.. doctest::
>>> group_by([i for i in range(10)], by=lambda x: x % 2 == 0)
{False: [1, 3, 5, 7, 9], True: [0, 2, 4, 6, 8]}
Args:
what: a list which will be transformed
by: a function which will be applied on values of the given list
Returns:
dict: values groupped by the function results
"""
return proso.dict.group_keys_by_values({x: by(x) for x in what}) | python | def group_by(what, by):
"""
Take a list and apply the given function on each its value, then group the
values by the function results.
.. testsetup::
from proso.list import group_by
.. doctest::
>>> group_by([i for i in range(10)], by=lambda x: x % 2 == 0)
{False: [1, 3, 5, 7, 9], True: [0, 2, 4, 6, 8]}
Args:
what: a list which will be transformed
by: a function which will be applied on values of the given list
Returns:
dict: values groupped by the function results
"""
return proso.dict.group_keys_by_values({x: by(x) for x in what}) | [
"def",
"group_by",
"(",
"what",
",",
"by",
")",
":",
"return",
"proso",
".",
"dict",
".",
"group_keys_by_values",
"(",
"{",
"x",
":",
"by",
"(",
"x",
")",
"for",
"x",
"in",
"what",
"}",
")"
]
| Take a list and apply the given function on each its value, then group the
values by the function results.
.. testsetup::
from proso.list import group_by
.. doctest::
>>> group_by([i for i in range(10)], by=lambda x: x % 2 == 0)
{False: [1, 3, 5, 7, 9], True: [0, 2, 4, 6, 8]}
Args:
what: a list which will be transformed
by: a function which will be applied on values of the given list
Returns:
dict: values groupped by the function results | [
"Take",
"a",
"list",
"and",
"apply",
"the",
"given",
"function",
"on",
"each",
"its",
"value",
"then",
"group",
"the",
"values",
"by",
"the",
"function",
"results",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso/list.py#L25-L46 | train |
mardix/Mocha | mocha/cli.py | copy_resource_dir | def copy_resource_dir(src, dest):
"""
To copy package data directory to destination
"""
package_name = "mocha"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource_dir(src + "/" + res, dest)
else:
if not os.path.isfile(dest) and os.path.splitext(src)[1] not in [".pyc"]:
copy_resource_file(src, dest) | python | def copy_resource_dir(src, dest):
"""
To copy package data directory to destination
"""
package_name = "mocha"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource_dir(src + "/" + res, dest)
else:
if not os.path.isfile(dest) and os.path.splitext(src)[1] not in [".pyc"]:
copy_resource_file(src, dest) | [
"def",
"copy_resource_dir",
"(",
"src",
",",
"dest",
")",
":",
"package_name",
"=",
"\"mocha\"",
"dest",
"=",
"(",
"dest",
"+",
"\"/\"",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
".",
"rstrip",
"(",
"\"/\"",
")",
"if",
"pkg_resources",
".",
"resource_isdir",
"(",
"package_name",
",",
"src",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dest",
")",
":",
"os",
".",
"makedirs",
"(",
"dest",
")",
"for",
"res",
"in",
"pkg_resources",
".",
"resource_listdir",
"(",
"__name__",
",",
"src",
")",
":",
"copy_resource_dir",
"(",
"src",
"+",
"\"/\"",
"+",
"res",
",",
"dest",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"dest",
")",
"and",
"os",
".",
"path",
".",
"splitext",
"(",
"src",
")",
"[",
"1",
"]",
"not",
"in",
"[",
"\".pyc\"",
"]",
":",
"copy_resource_file",
"(",
"src",
",",
"dest",
")"
]
| To copy package data directory to destination | [
"To",
"copy",
"package",
"data",
"directory",
"to",
"destination"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/cli.py#L71-L84 | train |
mardix/Mocha | mocha/cli.py | init | def init():
""" Setup Mocha in the current directory """
mochapyfile = os.path.join(os.path.join(CWD, "brew.py"))
header("Initializing Mocha ...")
if os.path.isfile(mochapyfile):
print("WARNING: It seems like Mocha is already setup!")
print("*" * 80)
else:
print("")
print("Copying files to the current directory...")
copy_resource_dir(SKELETON_DIR + "/create/", CWD)
print("")
_npm_install_static()
print("")
print("----- Your Mocha is ready! ----")
print("")
print("> What's next?")
print("- Edit the config [ application/config.py ] ")
print("- If necessary setup your model database [ mocha :initdb ]")
print("- Launch app on development mode, run [ mocha :serve ]")
print("")
print("*" * 80) | python | def init():
""" Setup Mocha in the current directory """
mochapyfile = os.path.join(os.path.join(CWD, "brew.py"))
header("Initializing Mocha ...")
if os.path.isfile(mochapyfile):
print("WARNING: It seems like Mocha is already setup!")
print("*" * 80)
else:
print("")
print("Copying files to the current directory...")
copy_resource_dir(SKELETON_DIR + "/create/", CWD)
print("")
_npm_install_static()
print("")
print("----- Your Mocha is ready! ----")
print("")
print("> What's next?")
print("- Edit the config [ application/config.py ] ")
print("- If necessary setup your model database [ mocha :initdb ]")
print("- Launch app on development mode, run [ mocha :serve ]")
print("")
print("*" * 80) | [
"def",
"init",
"(",
")",
":",
"mochapyfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"CWD",
",",
"\"brew.py\"",
")",
")",
"header",
"(",
"\"Initializing Mocha ...\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"mochapyfile",
")",
":",
"print",
"(",
"\"WARNING: It seems like Mocha is already setup!\"",
")",
"print",
"(",
"\"*\"",
"*",
"80",
")",
"else",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Copying files to the current directory...\"",
")",
"copy_resource_dir",
"(",
"SKELETON_DIR",
"+",
"\"/create/\"",
",",
"CWD",
")",
"print",
"(",
"\"\"",
")",
"_npm_install_static",
"(",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"----- Your Mocha is ready! ----\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"> What's next?\"",
")",
"print",
"(",
"\"- Edit the config [ application/config.py ] \"",
")",
"print",
"(",
"\"- If necessary setup your model database [ mocha :initdb ]\"",
")",
"print",
"(",
"\"- Launch app on development mode, run [ mocha :serve ]\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"*\"",
"*",
"80",
")"
]
| Setup Mocha in the current directory | [
"Setup",
"Mocha",
"in",
"the",
"current",
"directory"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/cli.py#L154-L178 | train |
mardix/Mocha | mocha/cli.py | add_view | def add_view(name, no_template):
""" Create a new view and template page """
app_dest = APPLICATION_DIR
viewsrc = "%s/create-view/view.py" % SKELETON_DIR
tplsrc = "%s/create-view/template.jade" % SKELETON_DIR
viewdest_dir = os.path.join(app_dest, "views")
viewdest = os.path.join(viewdest_dir, "%s.py" % name)
tpldest_dir = os.path.join(app_dest, "templates/%s/Index" % name)
tpldest = os.path.join(tpldest_dir, "index.jade")
header("Adding New View")
print("View: %s" % viewdest.replace(CWD, ""))
if not no_template:
print("Template: %s" % tpldest.replace(CWD, ""))
else:
print("* Template will not be created because of the flag --no-template| -t")
if os.path.isfile(viewdest) or os.path.isfile(tpldest):
print("*** ERROR: View or Template file exist already")
else:
if not os.path.isdir(viewdest_dir):
utils.make_dirs(viewdest_dir)
copy_resource_file(viewsrc, viewdest)
with open(viewdest, "r+") as vd:
content = vd.read()\
.replace("%ROUTE%", name.lower())\
.replace("%NAV_TITLE%", name.capitalize())
vd.seek(0)
vd.write(content)
vd.truncate()
if not no_template:
if not os.path.isdir(tpldest_dir):
utils.make_dirs(tpldest_dir)
copy_resource_file(tplsrc, tpldest)
print("")
print("*" * 80) | python | def add_view(name, no_template):
""" Create a new view and template page """
app_dest = APPLICATION_DIR
viewsrc = "%s/create-view/view.py" % SKELETON_DIR
tplsrc = "%s/create-view/template.jade" % SKELETON_DIR
viewdest_dir = os.path.join(app_dest, "views")
viewdest = os.path.join(viewdest_dir, "%s.py" % name)
tpldest_dir = os.path.join(app_dest, "templates/%s/Index" % name)
tpldest = os.path.join(tpldest_dir, "index.jade")
header("Adding New View")
print("View: %s" % viewdest.replace(CWD, ""))
if not no_template:
print("Template: %s" % tpldest.replace(CWD, ""))
else:
print("* Template will not be created because of the flag --no-template| -t")
if os.path.isfile(viewdest) or os.path.isfile(tpldest):
print("*** ERROR: View or Template file exist already")
else:
if not os.path.isdir(viewdest_dir):
utils.make_dirs(viewdest_dir)
copy_resource_file(viewsrc, viewdest)
with open(viewdest, "r+") as vd:
content = vd.read()\
.replace("%ROUTE%", name.lower())\
.replace("%NAV_TITLE%", name.capitalize())
vd.seek(0)
vd.write(content)
vd.truncate()
if not no_template:
if not os.path.isdir(tpldest_dir):
utils.make_dirs(tpldest_dir)
copy_resource_file(tplsrc, tpldest)
print("")
print("*" * 80) | [
"def",
"add_view",
"(",
"name",
",",
"no_template",
")",
":",
"app_dest",
"=",
"APPLICATION_DIR",
"viewsrc",
"=",
"\"%s/create-view/view.py\"",
"%",
"SKELETON_DIR",
"tplsrc",
"=",
"\"%s/create-view/template.jade\"",
"%",
"SKELETON_DIR",
"viewdest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_dest",
",",
"\"views\"",
")",
"viewdest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"viewdest_dir",
",",
"\"%s.py\"",
"%",
"name",
")",
"tpldest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_dest",
",",
"\"templates/%s/Index\"",
"%",
"name",
")",
"tpldest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tpldest_dir",
",",
"\"index.jade\"",
")",
"header",
"(",
"\"Adding New View\"",
")",
"print",
"(",
"\"View: %s\"",
"%",
"viewdest",
".",
"replace",
"(",
"CWD",
",",
"\"\"",
")",
")",
"if",
"not",
"no_template",
":",
"print",
"(",
"\"Template: %s\"",
"%",
"tpldest",
".",
"replace",
"(",
"CWD",
",",
"\"\"",
")",
")",
"else",
":",
"print",
"(",
"\"* Template will not be created because of the flag --no-template| -t\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"viewdest",
")",
"or",
"os",
".",
"path",
".",
"isfile",
"(",
"tpldest",
")",
":",
"print",
"(",
"\"*** ERROR: View or Template file exist already\"",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"viewdest_dir",
")",
":",
"utils",
".",
"make_dirs",
"(",
"viewdest_dir",
")",
"copy_resource_file",
"(",
"viewsrc",
",",
"viewdest",
")",
"with",
"open",
"(",
"viewdest",
",",
"\"r+\"",
")",
"as",
"vd",
":",
"content",
"=",
"vd",
".",
"read",
"(",
")",
".",
"replace",
"(",
"\"%ROUTE%\"",
",",
"name",
".",
"lower",
"(",
")",
")",
".",
"replace",
"(",
"\"%NAV_TITLE%\"",
",",
"name",
".",
"capitalize",
"(",
")",
")",
"vd",
".",
"seek",
"(",
"0",
")",
"vd",
".",
"write",
"(",
"content",
")",
"vd",
".",
"truncate",
"(",
")",
"if",
"not",
"no_template",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"tpldest_dir",
")",
":",
"utils",
".",
"make_dirs",
"(",
"tpldest_dir",
")",
"copy_resource_file",
"(",
"tplsrc",
",",
"tpldest",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"*\"",
"*",
"80",
")"
]
| Create a new view and template page | [
"Create",
"a",
"new",
"view",
"and",
"template",
"page"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/cli.py#L184-L221 | train |
mardix/Mocha | mocha/cli.py | initdb | def initdb():
""" Sync database Create new tables etc... """
print("Syncing up database...")
cwd_to_sys_path()
if db and hasattr(db, "Model"):
db.create_all()
for m in db.Model.__subclasses__():
if hasattr(m, "initialize__"):
print("Sync up model: %s ..." % m.__name__)
getattr(m, "initialize__")()
print("Done") | python | def initdb():
""" Sync database Create new tables etc... """
print("Syncing up database...")
cwd_to_sys_path()
if db and hasattr(db, "Model"):
db.create_all()
for m in db.Model.__subclasses__():
if hasattr(m, "initialize__"):
print("Sync up model: %s ..." % m.__name__)
getattr(m, "initialize__")()
print("Done") | [
"def",
"initdb",
"(",
")",
":",
"print",
"(",
"\"Syncing up database...\"",
")",
"cwd_to_sys_path",
"(",
")",
"if",
"db",
"and",
"hasattr",
"(",
"db",
",",
"\"Model\"",
")",
":",
"db",
".",
"create_all",
"(",
")",
"for",
"m",
"in",
"db",
".",
"Model",
".",
"__subclasses__",
"(",
")",
":",
"if",
"hasattr",
"(",
"m",
",",
"\"initialize__\"",
")",
":",
"print",
"(",
"\"Sync up model: %s ...\"",
"%",
"m",
".",
"__name__",
")",
"getattr",
"(",
"m",
",",
"\"initialize__\"",
")",
"(",
")",
"print",
"(",
"\"Done\"",
")"
]
| Sync database Create new tables etc... | [
"Sync",
"database",
"Create",
"new",
"tables",
"etc",
"..."
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/cli.py#L239-L251 | train |
mardix/Mocha | mocha/cli.py | _set_flask_alembic | def _set_flask_alembic():
from flask_alembic import Alembic
""" Add the SQLAlchemy object in the global extension """
application.app.extensions["sqlalchemy"] = type('', (), {"db": db})
alembic = Alembic()
alembic.init_app(application.app)
return alembic | python | def _set_flask_alembic():
from flask_alembic import Alembic
""" Add the SQLAlchemy object in the global extension """
application.app.extensions["sqlalchemy"] = type('', (), {"db": db})
alembic = Alembic()
alembic.init_app(application.app)
return alembic | [
"def",
"_set_flask_alembic",
"(",
")",
":",
"from",
"flask_alembic",
"import",
"Alembic",
"application",
".",
"app",
".",
"extensions",
"[",
"\"sqlalchemy\"",
"]",
"=",
"type",
"(",
"''",
",",
"(",
")",
",",
"{",
"\"db\"",
":",
"db",
"}",
")",
"alembic",
"=",
"Alembic",
"(",
")",
"alembic",
".",
"init_app",
"(",
"application",
".",
"app",
")",
"return",
"alembic"
]
| Add the SQLAlchemy object in the global extension | [
"Add",
"the",
"SQLAlchemy",
"object",
"in",
"the",
"global",
"extension"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/cli.py#L254-L261 | train |
mardix/Mocha | mocha/cli.py | assets2s3 | def assets2s3():
""" Upload assets files to S3 """
import flask_s3
header("Assets2S3...")
print("")
print("Building assets files..." )
print("")
build_assets(application.app)
print("")
print("Uploading assets files to S3 ...")
flask_s3.create_all(application.app)
print("") | python | def assets2s3():
""" Upload assets files to S3 """
import flask_s3
header("Assets2S3...")
print("")
print("Building assets files..." )
print("")
build_assets(application.app)
print("")
print("Uploading assets files to S3 ...")
flask_s3.create_all(application.app)
print("") | [
"def",
"assets2s3",
"(",
")",
":",
"import",
"flask_s3",
"header",
"(",
"\"Assets2S3...\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Building assets files...\"",
")",
"print",
"(",
"\"\"",
")",
"build_assets",
"(",
"application",
".",
"app",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"Uploading assets files to S3 ...\"",
")",
"flask_s3",
".",
"create_all",
"(",
"application",
".",
"app",
")",
"print",
"(",
"\"\"",
")"
]
| Upload assets files to S3 | [
"Upload",
"assets",
"files",
"to",
"S3"
]
| bce481cb31a0972061dd99bc548701411dcb9de3 | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/cli.py#L285-L297 | train |
swharden/webinspect | webinspect/webinspect.py | launch | def launch(thing,title=False):
"""analyze a thing, create a nice HTML document, and launch it."""
html=htmlFromThing(thing,title=title)
if not html:
print("no HTML was generated.")
return
fname="%s/%s.html"%(tempfile.gettempdir(),str(time.time()))
with open(fname,'w') as f:
f.write(html)
webbrowser.open(fname) | python | def launch(thing,title=False):
"""analyze a thing, create a nice HTML document, and launch it."""
html=htmlFromThing(thing,title=title)
if not html:
print("no HTML was generated.")
return
fname="%s/%s.html"%(tempfile.gettempdir(),str(time.time()))
with open(fname,'w') as f:
f.write(html)
webbrowser.open(fname) | [
"def",
"launch",
"(",
"thing",
",",
"title",
"=",
"False",
")",
":",
"html",
"=",
"htmlFromThing",
"(",
"thing",
",",
"title",
"=",
"title",
")",
"if",
"not",
"html",
":",
"print",
"(",
"\"no HTML was generated.\"",
")",
"return",
"fname",
"=",
"\"%s/%s.html\"",
"%",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"str",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"html",
")",
"webbrowser",
".",
"open",
"(",
"fname",
")"
]
| analyze a thing, create a nice HTML document, and launch it. | [
"analyze",
"a",
"thing",
"create",
"a",
"nice",
"HTML",
"document",
"and",
"launch",
"it",
"."
]
| 432674b61666d66e5be330b61f9fad0b46dac84e | https://github.com/swharden/webinspect/blob/432674b61666d66e5be330b61f9fad0b46dac84e/webinspect/webinspect.py#L24-L33 | train |
swharden/webinspect | webinspect/webinspect.py | analyzeThing | def analyzeThing(originalThing2):
"""analyze an object and all its attirbutes. Returns a dictionary."""
originalThing = copy.copy(originalThing2)
things={}
for name in sorted(dir(originalThing)):
print("analyzing",name)
thing = copy.copy(originalThing)
if name in webinspect.blacklist or name.lower() in webinspect.blacklist:
item="DID NOT EVALUATE (this will appear as a string)"
else:
item=getattr(thing,name)
itemType=type(item).__name__
itemStr=thingToString(item)
itemEval=""
if "method" in itemStr:
if name in webinspect.blacklist or name.lower() in webinspect.blacklist:
itemEval="DID NOT EVALUATE"
else:
print("executing %s()"%name)
print("I'm about to try...")
try:
itemEval=thingToString(getattr(thing,name)())
except Exception as e:
exceptionToString(e)
#print("[%s] (%s) %s {%s}"%(name,itemType,itemStr,itemEval))
things[name]=[itemType,itemStr,itemEval]
return things | python | def analyzeThing(originalThing2):
"""analyze an object and all its attirbutes. Returns a dictionary."""
originalThing = copy.copy(originalThing2)
things={}
for name in sorted(dir(originalThing)):
print("analyzing",name)
thing = copy.copy(originalThing)
if name in webinspect.blacklist or name.lower() in webinspect.blacklist:
item="DID NOT EVALUATE (this will appear as a string)"
else:
item=getattr(thing,name)
itemType=type(item).__name__
itemStr=thingToString(item)
itemEval=""
if "method" in itemStr:
if name in webinspect.blacklist or name.lower() in webinspect.blacklist:
itemEval="DID NOT EVALUATE"
else:
print("executing %s()"%name)
print("I'm about to try...")
try:
itemEval=thingToString(getattr(thing,name)())
except Exception as e:
exceptionToString(e)
#print("[%s] (%s) %s {%s}"%(name,itemType,itemStr,itemEval))
things[name]=[itemType,itemStr,itemEval]
return things | [
"def",
"analyzeThing",
"(",
"originalThing2",
")",
":",
"originalThing",
"=",
"copy",
".",
"copy",
"(",
"originalThing2",
")",
"things",
"=",
"{",
"}",
"for",
"name",
"in",
"sorted",
"(",
"dir",
"(",
"originalThing",
")",
")",
":",
"print",
"(",
"\"analyzing\"",
",",
"name",
")",
"thing",
"=",
"copy",
".",
"copy",
"(",
"originalThing",
")",
"if",
"name",
"in",
"webinspect",
".",
"blacklist",
"or",
"name",
".",
"lower",
"(",
")",
"in",
"webinspect",
".",
"blacklist",
":",
"item",
"=",
"\"DID NOT EVALUATE (this will appear as a string)\"",
"else",
":",
"item",
"=",
"getattr",
"(",
"thing",
",",
"name",
")",
"itemType",
"=",
"type",
"(",
"item",
")",
".",
"__name__",
"itemStr",
"=",
"thingToString",
"(",
"item",
")",
"itemEval",
"=",
"\"\"",
"if",
"\"method\"",
"in",
"itemStr",
":",
"if",
"name",
"in",
"webinspect",
".",
"blacklist",
"or",
"name",
".",
"lower",
"(",
")",
"in",
"webinspect",
".",
"blacklist",
":",
"itemEval",
"=",
"\"DID NOT EVALUATE\"",
"else",
":",
"print",
"(",
"\"executing %s()\"",
"%",
"name",
")",
"print",
"(",
"\"I'm about to try...\"",
")",
"try",
":",
"itemEval",
"=",
"thingToString",
"(",
"getattr",
"(",
"thing",
",",
"name",
")",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"exceptionToString",
"(",
"e",
")",
"#print(\"[%s] (%s) %s {%s}\"%(name,itemType,itemStr,itemEval))\r",
"things",
"[",
"name",
"]",
"=",
"[",
"itemType",
",",
"itemStr",
",",
"itemEval",
"]",
"return",
"things"
]
| analyze an object and all its attirbutes. Returns a dictionary. | [
"analyze",
"an",
"object",
"and",
"all",
"its",
"attirbutes",
".",
"Returns",
"a",
"dictionary",
"."
]
| 432674b61666d66e5be330b61f9fad0b46dac84e | https://github.com/swharden/webinspect/blob/432674b61666d66e5be330b61f9fad0b46dac84e/webinspect/webinspect.py#L66-L94 | train |
swharden/webinspect | webinspect/webinspect.py | websafe | def websafe(s):
"""return a string with HTML-safe text"""
s=s.replace("<","<").replace(">",">")
s=s.replace(r'\x',r' \x')
s=s.replace("\n","<br>")
return s | python | def websafe(s):
"""return a string with HTML-safe text"""
s=s.replace("<","<").replace(">",">")
s=s.replace(r'\x',r' \x')
s=s.replace("\n","<br>")
return s | [
"def",
"websafe",
"(",
"s",
")",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"\"<\"",
",",
"\"<\"",
")",
".",
"replace",
"(",
"\">\"",
",",
"\">\"",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"r'\\x'",
",",
"r' \\x'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"<br>\"",
")",
"return",
"s"
]
| return a string with HTML-safe text | [
"return",
"a",
"string",
"with",
"HTML",
"-",
"safe",
"text"
]
| 432674b61666d66e5be330b61f9fad0b46dac84e | https://github.com/swharden/webinspect/blob/432674b61666d66e5be330b61f9fad0b46dac84e/webinspect/webinspect.py#L96-L101 | train |
ronhanson/python-tbx | tbx/text.py | slugify | def slugify(text, delim='-'):
"""Generates an slightly worse ASCII-only slug."""
punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+')
result = []
for word in punctuation_re.split(text.lower()):
word = normalize_text(word)
if word:
result.append(word)
return delim.join(result) | python | def slugify(text, delim='-'):
"""Generates an slightly worse ASCII-only slug."""
punctuation_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+')
result = []
for word in punctuation_re.split(text.lower()):
word = normalize_text(word)
if word:
result.append(word)
return delim.join(result) | [
"def",
"slugify",
"(",
"text",
",",
"delim",
"=",
"'-'",
")",
":",
"punctuation_re",
"=",
"re",
".",
"compile",
"(",
"r'[\\t !\"#$%&\\'()*\\-/<=>?@\\[\\\\\\]^_`{|},.:]+'",
")",
"result",
"=",
"[",
"]",
"for",
"word",
"in",
"punctuation_re",
".",
"split",
"(",
"text",
".",
"lower",
"(",
")",
")",
":",
"word",
"=",
"normalize_text",
"(",
"word",
")",
"if",
"word",
":",
"result",
".",
"append",
"(",
"word",
")",
"return",
"delim",
".",
"join",
"(",
"result",
")"
]
| Generates an slightly worse ASCII-only slug. | [
"Generates",
"an",
"slightly",
"worse",
"ASCII",
"-",
"only",
"slug",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L48-L57 | train |
ronhanson/python-tbx | tbx/text.py | javascript_escape | def javascript_escape(s, quote_double_quotes=True):
"""
Escape characters for javascript strings.
"""
ustring_re = re.compile(u"([\u0080-\uffff])")
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != six.text_type:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s)) | python | def javascript_escape(s, quote_double_quotes=True):
"""
Escape characters for javascript strings.
"""
ustring_re = re.compile(u"([\u0080-\uffff])")
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != six.text_type:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s)) | [
"def",
"javascript_escape",
"(",
"s",
",",
"quote_double_quotes",
"=",
"True",
")",
":",
"ustring_re",
"=",
"re",
".",
"compile",
"(",
"u\"([\\u0080-\\uffff])\"",
")",
"def",
"fix",
"(",
"match",
")",
":",
"return",
"r\"\\u%04x\"",
"%",
"ord",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"if",
"type",
"(",
"s",
")",
"==",
"str",
":",
"s",
"=",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
"elif",
"type",
"(",
"s",
")",
"!=",
"six",
".",
"text_type",
":",
"raise",
"TypeError",
"(",
"s",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\r'",
",",
"'\\\\r'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\n'",
",",
"'\\\\n'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"'\\t'",
",",
"'\\\\t'",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"\"'\"",
",",
"\"\\\\'\"",
")",
"if",
"quote_double_quotes",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'\"'",
",",
"'"'",
")",
"return",
"str",
"(",
"ustring_re",
".",
"sub",
"(",
"fix",
",",
"s",
")",
")"
]
| Escape characters for javascript strings. | [
"Escape",
"characters",
"for",
"javascript",
"strings",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L84-L104 | train |
ronhanson/python-tbx | tbx/text.py | seconds_to_hms_verbose | def seconds_to_hms_verbose(t):
"""
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
"""
hours = int((t / 3600))
mins = int((t / 60) % 60)
secs = int(t % 60)
return ' '.join([
(hours + ' hour' + ('s' if hours > 1 else '')) if hours > 0 else '',
(mins + ' minute' + ('s' if mins > 1 else '')) if mins > 0 else '',
(secs + ' second' + ('s' if secs > 1 else '')) if secs > 0 else ''
]) | python | def seconds_to_hms_verbose(t):
"""
Converts seconds float to 'H hours 8 minutes, 30 seconds' format
"""
hours = int((t / 3600))
mins = int((t / 60) % 60)
secs = int(t % 60)
return ' '.join([
(hours + ' hour' + ('s' if hours > 1 else '')) if hours > 0 else '',
(mins + ' minute' + ('s' if mins > 1 else '')) if mins > 0 else '',
(secs + ' second' + ('s' if secs > 1 else '')) if secs > 0 else ''
]) | [
"def",
"seconds_to_hms_verbose",
"(",
"t",
")",
":",
"hours",
"=",
"int",
"(",
"(",
"t",
"/",
"3600",
")",
")",
"mins",
"=",
"int",
"(",
"(",
"t",
"/",
"60",
")",
"%",
"60",
")",
"secs",
"=",
"int",
"(",
"t",
"%",
"60",
")",
"return",
"' '",
".",
"join",
"(",
"[",
"(",
"hours",
"+",
"' hour'",
"+",
"(",
"'s'",
"if",
"hours",
">",
"1",
"else",
"''",
")",
")",
"if",
"hours",
">",
"0",
"else",
"''",
",",
"(",
"mins",
"+",
"' minute'",
"+",
"(",
"'s'",
"if",
"mins",
">",
"1",
"else",
"''",
")",
")",
"if",
"mins",
">",
"0",
"else",
"''",
",",
"(",
"secs",
"+",
"' second'",
"+",
"(",
"'s'",
"if",
"secs",
">",
"1",
"else",
"''",
")",
")",
"if",
"secs",
">",
"0",
"else",
"''",
"]",
")"
]
| Converts seconds float to 'H hours 8 minutes, 30 seconds' format | [
"Converts",
"seconds",
"float",
"to",
"H",
"hours",
"8",
"minutes",
"30",
"seconds",
"format"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L158-L169 | train |
ronhanson/python-tbx | tbx/text.py | pretty_render | def pretty_render(data, format='text', indent=0):
"""
Render a dict based on a format
"""
if format == 'json':
return render_json(data)
elif format == 'html':
return render_html(data)
elif format == 'xml':
return render_xml(data)
else:
return dict_to_plaintext(data, indent=indent) | python | def pretty_render(data, format='text', indent=0):
"""
Render a dict based on a format
"""
if format == 'json':
return render_json(data)
elif format == 'html':
return render_html(data)
elif format == 'xml':
return render_xml(data)
else:
return dict_to_plaintext(data, indent=indent) | [
"def",
"pretty_render",
"(",
"data",
",",
"format",
"=",
"'text'",
",",
"indent",
"=",
"0",
")",
":",
"if",
"format",
"==",
"'json'",
":",
"return",
"render_json",
"(",
"data",
")",
"elif",
"format",
"==",
"'html'",
":",
"return",
"render_html",
"(",
"data",
")",
"elif",
"format",
"==",
"'xml'",
":",
"return",
"render_xml",
"(",
"data",
")",
"else",
":",
"return",
"dict_to_plaintext",
"(",
"data",
",",
"indent",
"=",
"indent",
")"
]
| Render a dict based on a format | [
"Render",
"a",
"dict",
"based",
"on",
"a",
"format"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L263-L274 | train |
ronhanson/python-tbx | tbx/text.py | dict_to_xml | def dict_to_xml(xml_dict):
"""
Converts a dictionary to an XML ElementTree Element
"""
import lxml.etree as etree
root_tag = list(xml_dict.keys())[0]
root = etree.Element(root_tag)
_dict_to_xml_recurse(root, xml_dict[root_tag])
return root | python | def dict_to_xml(xml_dict):
"""
Converts a dictionary to an XML ElementTree Element
"""
import lxml.etree as etree
root_tag = list(xml_dict.keys())[0]
root = etree.Element(root_tag)
_dict_to_xml_recurse(root, xml_dict[root_tag])
return root | [
"def",
"dict_to_xml",
"(",
"xml_dict",
")",
":",
"import",
"lxml",
".",
"etree",
"as",
"etree",
"root_tag",
"=",
"list",
"(",
"xml_dict",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"root",
"=",
"etree",
".",
"Element",
"(",
"root_tag",
")",
"_dict_to_xml_recurse",
"(",
"root",
",",
"xml_dict",
"[",
"root_tag",
"]",
")",
"return",
"root"
]
| Converts a dictionary to an XML ElementTree Element | [
"Converts",
"a",
"dictionary",
"to",
"an",
"XML",
"ElementTree",
"Element"
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L310-L318 | train |
ronhanson/python-tbx | tbx/text.py | xml_get_tag | def xml_get_tag(xml, tag, parent_tag=None, multi_line=False):
"""
Returns the tag data for the first instance of the named tag, or for all instances if multi is true.
If a parent tag is specified, then that will be required before the tag.
"""
expr_str = '[<:]' + tag + '.*?>(?P<matched_text>.+?)<'
if parent_tag:
expr_str = '[<:]' + parent_tag + '.*?>.*?' + expr_str
expr = re.compile(expr_str, re.DOTALL | re.IGNORECASE)
if multi_line:
return expr.findall(xml)
else:
if expr.search(xml):
return expr.search(xml).group('matched_text').strip()
else:
return None | python | def xml_get_tag(xml, tag, parent_tag=None, multi_line=False):
"""
Returns the tag data for the first instance of the named tag, or for all instances if multi is true.
If a parent tag is specified, then that will be required before the tag.
"""
expr_str = '[<:]' + tag + '.*?>(?P<matched_text>.+?)<'
if parent_tag:
expr_str = '[<:]' + parent_tag + '.*?>.*?' + expr_str
expr = re.compile(expr_str, re.DOTALL | re.IGNORECASE)
if multi_line:
return expr.findall(xml)
else:
if expr.search(xml):
return expr.search(xml).group('matched_text').strip()
else:
return None | [
"def",
"xml_get_tag",
"(",
"xml",
",",
"tag",
",",
"parent_tag",
"=",
"None",
",",
"multi_line",
"=",
"False",
")",
":",
"expr_str",
"=",
"'[<:]'",
"+",
"tag",
"+",
"'.*?>(?P<matched_text>.+?)<'",
"if",
"parent_tag",
":",
"expr_str",
"=",
"'[<:]'",
"+",
"parent_tag",
"+",
"'.*?>.*?'",
"+",
"expr_str",
"expr",
"=",
"re",
".",
"compile",
"(",
"expr_str",
",",
"re",
".",
"DOTALL",
"|",
"re",
".",
"IGNORECASE",
")",
"if",
"multi_line",
":",
"return",
"expr",
".",
"findall",
"(",
"xml",
")",
"else",
":",
"if",
"expr",
".",
"search",
"(",
"xml",
")",
":",
"return",
"expr",
".",
"search",
"(",
"xml",
")",
".",
"group",
"(",
"'matched_text'",
")",
".",
"strip",
"(",
")",
"else",
":",
"return",
"None"
]
| Returns the tag data for the first instance of the named tag, or for all instances if multi is true.
If a parent tag is specified, then that will be required before the tag. | [
"Returns",
"the",
"tag",
"data",
"for",
"the",
"first",
"instance",
"of",
"the",
"named",
"tag",
"or",
"for",
"all",
"instances",
"if",
"multi",
"is",
"true",
".",
"If",
"a",
"parent",
"tag",
"is",
"specified",
"then",
"that",
"will",
"be",
"required",
"before",
"the",
"tag",
"."
]
| 87f72ae0cadecafbcd144f1e930181fba77f6b83 | https://github.com/ronhanson/python-tbx/blob/87f72ae0cadecafbcd144f1e930181fba77f6b83/tbx/text.py#L505-L520 | train |
clement-alexandre/TotemBionet | totembionet/src/resource_table/resource_table.py | ResourceTable._build_table | def _build_table(self) -> Dict[State, Tuple[Multiplex, ...]]:
""" Private method which build the table which map a State to the active multiplex. """
result: Dict[State, Tuple[Multiplex, ...]] = {}
for state in self.influence_graph.all_states():
result[state] = tuple(multiplex for multiplex in self.influence_graph.multiplexes
if multiplex.is_active(state))
return result | python | def _build_table(self) -> Dict[State, Tuple[Multiplex, ...]]:
""" Private method which build the table which map a State to the active multiplex. """
result: Dict[State, Tuple[Multiplex, ...]] = {}
for state in self.influence_graph.all_states():
result[state] = tuple(multiplex for multiplex in self.influence_graph.multiplexes
if multiplex.is_active(state))
return result | [
"def",
"_build_table",
"(",
"self",
")",
"->",
"Dict",
"[",
"State",
",",
"Tuple",
"[",
"Multiplex",
",",
"...",
"]",
"]",
":",
"result",
":",
"Dict",
"[",
"State",
",",
"Tuple",
"[",
"Multiplex",
",",
"...",
"]",
"]",
"=",
"{",
"}",
"for",
"state",
"in",
"self",
".",
"influence_graph",
".",
"all_states",
"(",
")",
":",
"result",
"[",
"state",
"]",
"=",
"tuple",
"(",
"multiplex",
"for",
"multiplex",
"in",
"self",
".",
"influence_graph",
".",
"multiplexes",
"if",
"multiplex",
".",
"is_active",
"(",
"state",
")",
")",
"return",
"result"
]
| Private method which build the table which map a State to the active multiplex. | [
"Private",
"method",
"which",
"build",
"the",
"table",
"which",
"map",
"a",
"State",
"to",
"the",
"active",
"multiplex",
"."
]
| f37a2f9358c1ce49f21c4a868b904da5dcd4614f | https://github.com/clement-alexandre/TotemBionet/blob/f37a2f9358c1ce49f21c4a868b904da5dcd4614f/totembionet/src/resource_table/resource_table.py#L19-L25 | train |
abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | _to_base36 | def _to_base36(number):
"""
Convert a positive integer to a base36 string.
Taken from Stack Overflow and modified.
"""
if number < 0:
raise ValueError("Cannot encode negative numbers")
chars = ""
while number != 0:
number, i = divmod(number, 36) # 36-character alphabet
chars = _alphabet[i] + chars
return chars or "0" | python | def _to_base36(number):
"""
Convert a positive integer to a base36 string.
Taken from Stack Overflow and modified.
"""
if number < 0:
raise ValueError("Cannot encode negative numbers")
chars = ""
while number != 0:
number, i = divmod(number, 36) # 36-character alphabet
chars = _alphabet[i] + chars
return chars or "0" | [
"def",
"_to_base36",
"(",
"number",
")",
":",
"if",
"number",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot encode negative numbers\"",
")",
"chars",
"=",
"\"\"",
"while",
"number",
"!=",
"0",
":",
"number",
",",
"i",
"=",
"divmod",
"(",
"number",
",",
"36",
")",
"# 36-character alphabet",
"chars",
"=",
"_alphabet",
"[",
"i",
"]",
"+",
"chars",
"return",
"chars",
"or",
"\"0\""
]
| Convert a positive integer to a base36 string.
Taken from Stack Overflow and modified. | [
"Convert",
"a",
"positive",
"integer",
"to",
"a",
"base36",
"string",
"."
]
| 704d3f887cc8963769ffbb116eb7e6909deeaecd | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L20-L34 | train |
abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | _pad | def _pad(string, size):
"""
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
"""
strlen = len(string)
if strlen == size:
return string
if strlen < size:
return _padding[0:size-strlen] + string
return string[-size:] | python | def _pad(string, size):
"""
'Pad' a string with leading zeroes to fit the given size, truncating
if necessary.
"""
strlen = len(string)
if strlen == size:
return string
if strlen < size:
return _padding[0:size-strlen] + string
return string[-size:] | [
"def",
"_pad",
"(",
"string",
",",
"size",
")",
":",
"strlen",
"=",
"len",
"(",
"string",
")",
"if",
"strlen",
"==",
"size",
":",
"return",
"string",
"if",
"strlen",
"<",
"size",
":",
"return",
"_padding",
"[",
"0",
":",
"size",
"-",
"strlen",
"]",
"+",
"string",
"return",
"string",
"[",
"-",
"size",
":",
"]"
]
| 'Pad' a string with leading zeroes to fit the given size, truncating
if necessary. | [
"Pad",
"a",
"string",
"with",
"leading",
"zeroes",
"to",
"fit",
"the",
"given",
"size",
"truncating",
"if",
"necessary",
"."
]
| 704d3f887cc8963769ffbb116eb7e6909deeaecd | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L38-L48 | train |
abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | _random_block | def _random_block():
"""
Generate a random string of `BLOCK_SIZE` length.
"""
# TODO: Use a better RNG than random.randint
random_number = random.randint(0, DISCRETE_VALUES)
random_string = _to_base36(random_number)
return _pad(random_string, BLOCK_SIZE) | python | def _random_block():
"""
Generate a random string of `BLOCK_SIZE` length.
"""
# TODO: Use a better RNG than random.randint
random_number = random.randint(0, DISCRETE_VALUES)
random_string = _to_base36(random_number)
return _pad(random_string, BLOCK_SIZE) | [
"def",
"_random_block",
"(",
")",
":",
"# TODO: Use a better RNG than random.randint",
"random_number",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"DISCRETE_VALUES",
")",
"random_string",
"=",
"_to_base36",
"(",
"random_number",
")",
"return",
"_pad",
"(",
"random_string",
",",
"BLOCK_SIZE",
")"
]
| Generate a random string of `BLOCK_SIZE` length. | [
"Generate",
"a",
"random",
"string",
"of",
"BLOCK_SIZE",
"length",
"."
]
| 704d3f887cc8963769ffbb116eb7e6909deeaecd | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L51-L58 | train |
abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | get_process_fingerprint | def get_process_fingerprint():
"""
Extract a unique fingerprint for the current process, using a
combination of the process PID and the system's hostname.
"""
pid = os.getpid()
hostname = socket.gethostname()
padded_pid = _pad(_to_base36(pid), 2)
hostname_hash = sum([ord(x) for x in hostname]) + len(hostname) + 36
padded_hostname = _pad(_to_base36(hostname_hash), 2)
return padded_pid + padded_hostname | python | def get_process_fingerprint():
"""
Extract a unique fingerprint for the current process, using a
combination of the process PID and the system's hostname.
"""
pid = os.getpid()
hostname = socket.gethostname()
padded_pid = _pad(_to_base36(pid), 2)
hostname_hash = sum([ord(x) for x in hostname]) + len(hostname) + 36
padded_hostname = _pad(_to_base36(hostname_hash), 2)
return padded_pid + padded_hostname | [
"def",
"get_process_fingerprint",
"(",
")",
":",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
"hostname",
"=",
"socket",
".",
"gethostname",
"(",
")",
"padded_pid",
"=",
"_pad",
"(",
"_to_base36",
"(",
"pid",
")",
",",
"2",
")",
"hostname_hash",
"=",
"sum",
"(",
"[",
"ord",
"(",
"x",
")",
"for",
"x",
"in",
"hostname",
"]",
")",
"+",
"len",
"(",
"hostname",
")",
"+",
"36",
"padded_hostname",
"=",
"_pad",
"(",
"_to_base36",
"(",
"hostname_hash",
")",
",",
"2",
")",
"return",
"padded_pid",
"+",
"padded_hostname"
]
| Extract a unique fingerprint for the current process, using a
combination of the process PID and the system's hostname. | [
"Extract",
"a",
"unique",
"fingerprint",
"for",
"the",
"current",
"process",
"using",
"a",
"combination",
"of",
"the",
"process",
"PID",
"and",
"the",
"system",
"s",
"hostname",
"."
]
| 704d3f887cc8963769ffbb116eb7e6909deeaecd | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L64-L74 | train |
abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | CuidGenerator.counter | def counter(self):
"""
Rolling counter that ensures same-machine and same-time
cuids don't collide.
"""
self._counter += 1
if self._counter >= DISCRETE_VALUES:
self._counter = 0
return self._counter | python | def counter(self):
"""
Rolling counter that ensures same-machine and same-time
cuids don't collide.
"""
self._counter += 1
if self._counter >= DISCRETE_VALUES:
self._counter = 0
return self._counter | [
"def",
"counter",
"(",
"self",
")",
":",
"self",
".",
"_counter",
"+=",
"1",
"if",
"self",
".",
"_counter",
">=",
"DISCRETE_VALUES",
":",
"self",
".",
"_counter",
"=",
"0",
"return",
"self",
".",
"_counter"
]
| Rolling counter that ensures same-machine and same-time
cuids don't collide. | [
"Rolling",
"counter",
"that",
"ensures",
"same",
"-",
"machine",
"and",
"same",
"-",
"time",
"cuids",
"don",
"t",
"collide",
"."
]
| 704d3f887cc8963769ffbb116eb7e6909deeaecd | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L103-L111 | train |
abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/cuid.py | CuidGenerator.cuid | def cuid(self):
"""
Generate a full-length cuid as a string.
"""
# start with a hardcoded lowercase c
identifier = "c"
# add a timestamp in milliseconds since the epoch, in base 36
millis = int(time.time() * 1000)
identifier += _to_base36(millis)
# use a counter to ensure no collisions on the same machine
# in the same millisecond
count = _pad(_to_base36(self.counter), BLOCK_SIZE)
identifier += count
# add the process fingerprint
identifier += self.fingerprint
# add a couple of random blocks
identifier += _random_block()
identifier += _random_block()
return identifier | python | def cuid(self):
"""
Generate a full-length cuid as a string.
"""
# start with a hardcoded lowercase c
identifier = "c"
# add a timestamp in milliseconds since the epoch, in base 36
millis = int(time.time() * 1000)
identifier += _to_base36(millis)
# use a counter to ensure no collisions on the same machine
# in the same millisecond
count = _pad(_to_base36(self.counter), BLOCK_SIZE)
identifier += count
# add the process fingerprint
identifier += self.fingerprint
# add a couple of random blocks
identifier += _random_block()
identifier += _random_block()
return identifier | [
"def",
"cuid",
"(",
"self",
")",
":",
"# start with a hardcoded lowercase c",
"identifier",
"=",
"\"c\"",
"# add a timestamp in milliseconds since the epoch, in base 36",
"millis",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
"identifier",
"+=",
"_to_base36",
"(",
"millis",
")",
"# use a counter to ensure no collisions on the same machine",
"# in the same millisecond",
"count",
"=",
"_pad",
"(",
"_to_base36",
"(",
"self",
".",
"counter",
")",
",",
"BLOCK_SIZE",
")",
"identifier",
"+=",
"count",
"# add the process fingerprint",
"identifier",
"+=",
"self",
".",
"fingerprint",
"# add a couple of random blocks",
"identifier",
"+=",
"_random_block",
"(",
")",
"identifier",
"+=",
"_random_block",
"(",
")",
"return",
"identifier"
]
| Generate a full-length cuid as a string. | [
"Generate",
"a",
"full",
"-",
"length",
"cuid",
"as",
"a",
"string",
"."
]
| 704d3f887cc8963769ffbb116eb7e6909deeaecd | https://github.com/abnerjacobsen/tinydb-jsonorm/blob/704d3f887cc8963769ffbb116eb7e6909deeaecd/src/tinydb_jsonorm/cuid.py#L113-L132 | train |
PBR/MQ2 | MQ2/plugins/xls_plugin.py | read_excel_file | def read_excel_file(inputfile, sheet_name):
""" Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return
"""
workbook = xlrd.open_workbook(inputfile)
output = []
found = False
for sheet in workbook.sheets():
if sheet.name == sheet_name:
found = True
for row in range(sheet.nrows):
values = []
for col in range(sheet.ncols):
values.append(sheet.cell(row, col).value)
output.append(values)
if not found: # pragma: no cover
raise MQ2Exception('Invalid session identifier provided')
return output | python | def read_excel_file(inputfile, sheet_name):
""" Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return
"""
workbook = xlrd.open_workbook(inputfile)
output = []
found = False
for sheet in workbook.sheets():
if sheet.name == sheet_name:
found = True
for row in range(sheet.nrows):
values = []
for col in range(sheet.ncols):
values.append(sheet.cell(row, col).value)
output.append(values)
if not found: # pragma: no cover
raise MQ2Exception('Invalid session identifier provided')
return output | [
"def",
"read_excel_file",
"(",
"inputfile",
",",
"sheet_name",
")",
":",
"workbook",
"=",
"xlrd",
".",
"open_workbook",
"(",
"inputfile",
")",
"output",
"=",
"[",
"]",
"found",
"=",
"False",
"for",
"sheet",
"in",
"workbook",
".",
"sheets",
"(",
")",
":",
"if",
"sheet",
".",
"name",
"==",
"sheet_name",
":",
"found",
"=",
"True",
"for",
"row",
"in",
"range",
"(",
"sheet",
".",
"nrows",
")",
":",
"values",
"=",
"[",
"]",
"for",
"col",
"in",
"range",
"(",
"sheet",
".",
"ncols",
")",
":",
"values",
".",
"append",
"(",
"sheet",
".",
"cell",
"(",
"row",
",",
"col",
")",
".",
"value",
")",
"output",
".",
"append",
"(",
"values",
")",
"if",
"not",
"found",
":",
"# pragma: no cover",
"raise",
"MQ2Exception",
"(",
"'Invalid session identifier provided'",
")",
"return",
"output"
]
| Return a matrix containing all the information present in the
excel sheet of the specified excel document.
:arg inputfile: excel document to read
:arg sheetname: the name of the excel sheet to return | [
"Return",
"a",
"matrix",
"containing",
"all",
"the",
"information",
"present",
"in",
"the",
"excel",
"sheet",
"of",
"the",
"specified",
"excel",
"document",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/xls_plugin.py#L66-L87 | train |
PBR/MQ2 | MQ2/plugins/xls_plugin.py | XslPlugin.get_session_identifiers | def get_session_identifiers(cls, folder=None, inputfile=None):
""" Retrieve the list of session identifiers contained in the
data on the folder or the inputfile.
For this plugin, it returns the list of excel sheet available.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use
"""
sessions = []
if inputfile and folder:
raise MQ2Exception(
'You should specify either a folder or a file')
if folder:
if not os.path.isdir(folder):
return sessions
for root, dirs, files in os.walk(folder):
for filename in files:
filename = os.path.join(root, filename)
for ext in SUPPORTED_FILES:
if filename.endswith(ext):
wbook = xlrd.open_workbook(filename)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
elif inputfile:
if os.path.isdir(inputfile):
return sessions
for ext in SUPPORTED_FILES:
if inputfile.endswith(ext):
wbook = xlrd.open_workbook(inputfile)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
return sessions | python | def get_session_identifiers(cls, folder=None, inputfile=None):
""" Retrieve the list of session identifiers contained in the
data on the folder or the inputfile.
For this plugin, it returns the list of excel sheet available.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use
"""
sessions = []
if inputfile and folder:
raise MQ2Exception(
'You should specify either a folder or a file')
if folder:
if not os.path.isdir(folder):
return sessions
for root, dirs, files in os.walk(folder):
for filename in files:
filename = os.path.join(root, filename)
for ext in SUPPORTED_FILES:
if filename.endswith(ext):
wbook = xlrd.open_workbook(filename)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
elif inputfile:
if os.path.isdir(inputfile):
return sessions
for ext in SUPPORTED_FILES:
if inputfile.endswith(ext):
wbook = xlrd.open_workbook(inputfile)
for sheet in wbook.sheets():
if sheet.name not in sessions:
sessions.append(sheet.name)
return sessions | [
"def",
"get_session_identifiers",
"(",
"cls",
",",
"folder",
"=",
"None",
",",
"inputfile",
"=",
"None",
")",
":",
"sessions",
"=",
"[",
"]",
"if",
"inputfile",
"and",
"folder",
":",
"raise",
"MQ2Exception",
"(",
"'You should specify either a folder or a file'",
")",
"if",
"folder",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder",
")",
":",
"return",
"sessions",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"folder",
")",
":",
"for",
"filename",
"in",
"files",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
"for",
"ext",
"in",
"SUPPORTED_FILES",
":",
"if",
"filename",
".",
"endswith",
"(",
"ext",
")",
":",
"wbook",
"=",
"xlrd",
".",
"open_workbook",
"(",
"filename",
")",
"for",
"sheet",
"in",
"wbook",
".",
"sheets",
"(",
")",
":",
"if",
"sheet",
".",
"name",
"not",
"in",
"sessions",
":",
"sessions",
".",
"append",
"(",
"sheet",
".",
"name",
")",
"elif",
"inputfile",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"inputfile",
")",
":",
"return",
"sessions",
"for",
"ext",
"in",
"SUPPORTED_FILES",
":",
"if",
"inputfile",
".",
"endswith",
"(",
"ext",
")",
":",
"wbook",
"=",
"xlrd",
".",
"open_workbook",
"(",
"inputfile",
")",
"for",
"sheet",
"in",
"wbook",
".",
"sheets",
"(",
")",
":",
"if",
"sheet",
".",
"name",
"not",
"in",
"sessions",
":",
"sessions",
".",
"append",
"(",
"sheet",
".",
"name",
")",
"return",
"sessions"
]
| Retrieve the list of session identifiers contained in the
data on the folder or the inputfile.
For this plugin, it returns the list of excel sheet available.
:kwarg folder: the path to the folder containing the files to
check. This folder may contain sub-folders.
:kwarg inputfile: the path to the input file to use | [
"Retrieve",
"the",
"list",
"of",
"session",
"identifiers",
"contained",
"in",
"the",
"data",
"on",
"the",
"folder",
"or",
"the",
"inputfile",
".",
"For",
"this",
"plugin",
"it",
"returns",
"the",
"list",
"of",
"excel",
"sheet",
"available",
"."
]
| 6d84dea47e6751333004743f588f03158e35c28d | https://github.com/PBR/MQ2/blob/6d84dea47e6751333004743f588f03158e35c28d/MQ2/plugins/xls_plugin.py#L205-L240 | train |
cozy/python_cozy_management | cozy_management/helpers.py | file_rights | def file_rights(filepath, mode=None, uid=None, gid=None):
'''
Change file rights
'''
file_handle = os.open(filepath, os.O_RDONLY)
if mode:
os.fchmod(file_handle, mode)
if uid:
if not gid:
gid = 0
os.fchown(file_handle, uid, gid)
os.close(file_handle) | python | def file_rights(filepath, mode=None, uid=None, gid=None):
'''
Change file rights
'''
file_handle = os.open(filepath, os.O_RDONLY)
if mode:
os.fchmod(file_handle, mode)
if uid:
if not gid:
gid = 0
os.fchown(file_handle, uid, gid)
os.close(file_handle) | [
"def",
"file_rights",
"(",
"filepath",
",",
"mode",
"=",
"None",
",",
"uid",
"=",
"None",
",",
"gid",
"=",
"None",
")",
":",
"file_handle",
"=",
"os",
".",
"open",
"(",
"filepath",
",",
"os",
".",
"O_RDONLY",
")",
"if",
"mode",
":",
"os",
".",
"fchmod",
"(",
"file_handle",
",",
"mode",
")",
"if",
"uid",
":",
"if",
"not",
"gid",
":",
"gid",
"=",
"0",
"os",
".",
"fchown",
"(",
"file_handle",
",",
"uid",
",",
"gid",
")",
"os",
".",
"close",
"(",
"file_handle",
")"
]
| Change file rights | [
"Change",
"file",
"rights"
]
| 820cea58458ae3e067fa8cc2da38edbda4681dac | https://github.com/cozy/python_cozy_management/blob/820cea58458ae3e067fa8cc2da38edbda4681dac/cozy_management/helpers.py#L16-L27 | train |
DavidDoukhan/py_sonicvisualiser | py_sonicvisualiser/SVEnv.py | SVEnv.init_from_wave_file | def init_from_wave_file(wavpath):
"""Init a sonic visualiser environment structure based the analysis
of the main audio file. The audio file have to be encoded in wave
Args:
wavpath(str): the full path to the wavfile
"""
try:
samplerate, data = SW.read(wavpath)
nframes = data.shape[0]
except:
# scipy cannot handle 24 bit wav files
# and wave cannot handle 32 bit wav files
try:
w = wave.open(wavpath)
samplerate = w.getframerate()
nframes = w.getnframes()
except:
raise Exception('Cannot decode wavefile ' + wavpath)
return SVEnv(samplerate, nframes, wavpath) | python | def init_from_wave_file(wavpath):
"""Init a sonic visualiser environment structure based the analysis
of the main audio file. The audio file have to be encoded in wave
Args:
wavpath(str): the full path to the wavfile
"""
try:
samplerate, data = SW.read(wavpath)
nframes = data.shape[0]
except:
# scipy cannot handle 24 bit wav files
# and wave cannot handle 32 bit wav files
try:
w = wave.open(wavpath)
samplerate = w.getframerate()
nframes = w.getnframes()
except:
raise Exception('Cannot decode wavefile ' + wavpath)
return SVEnv(samplerate, nframes, wavpath) | [
"def",
"init_from_wave_file",
"(",
"wavpath",
")",
":",
"try",
":",
"samplerate",
",",
"data",
"=",
"SW",
".",
"read",
"(",
"wavpath",
")",
"nframes",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"except",
":",
"# scipy cannot handle 24 bit wav files",
"# and wave cannot handle 32 bit wav files",
"try",
":",
"w",
"=",
"wave",
".",
"open",
"(",
"wavpath",
")",
"samplerate",
"=",
"w",
".",
"getframerate",
"(",
")",
"nframes",
"=",
"w",
".",
"getnframes",
"(",
")",
"except",
":",
"raise",
"Exception",
"(",
"'Cannot decode wavefile '",
"+",
"wavpath",
")",
"return",
"SVEnv",
"(",
"samplerate",
",",
"nframes",
",",
"wavpath",
")"
]
| Init a sonic visualiser environment structure based the analysis
of the main audio file. The audio file have to be encoded in wave
Args:
wavpath(str): the full path to the wavfile | [
"Init",
"a",
"sonic",
"visualiser",
"environment",
"structure",
"based",
"the",
"analysis",
"of",
"the",
"main",
"audio",
"file",
".",
"The",
"audio",
"file",
"have",
"to",
"be",
"encoded",
"in",
"wave"
]
| ebe83bd7dffb0275393255dcbcc6671cf0ade4a5 | https://github.com/DavidDoukhan/py_sonicvisualiser/blob/ebe83bd7dffb0275393255dcbcc6671cf0ade4a5/py_sonicvisualiser/SVEnv.py#L79-L100 | train |
DavidDoukhan/py_sonicvisualiser | py_sonicvisualiser/SVEnv.py | SVEnv.add_continuous_annotations | def add_continuous_annotations(self, x, y, colourName='Purple', colour='#c832ff', name='', view=None, vscale=None, presentationName=None):
"""
add a continous annotation layer
Args:
x (float iterable): temporal indices of the dataset
y (float iterable): values of the dataset
Kwargs:
view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created
Returns:
<DOM Element: view>: the view used to store the spectrogram
"""
model = self.data.appendChild(self.doc.createElement('model'))
imodel = self.nbdata
for atname, atval in [('id', imodel + 1),
('dataset', imodel),
('name', name),
('sampleRate', self.samplerate),
('start', int(min(x) * self.samplerate)),
('end', int(max(x) * self.samplerate)),
('type', 'sparse'),
('dimensions', '2'),
('resolution', '1'),
('notifyOnAdd', 'true'),
('minimum', min(y)),
('maximum', max(y)),
('units', '')
]:
model.setAttribute(atname, str(atval))
# dataset = self.data.appendChild(self.doc.createElement('dataset'))
# dataset.setAttribute('id', str(imodel))
# dataset.setAttribute('dimensions', '2')
# self.nbdata += 2
# datasetnode = SVDataset2D(self.doc, str(imodel), self.samplerate)
# datasetnode.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y)
# data = dataset.appendChild(datasetnode)
dataset = self.data.appendChild(SVDataset2D(self.doc, str(imodel), self.samplerate))
dataset.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y)
self.nbdata += 2
###### add layers
valruler = self.__add_time_ruler()
vallayer = self.__add_val_layer(imodel + 1)
vallayer.setAttribute('colourName', colourName)
vallayer.setAttribute('colour', colour)
if presentationName:
vallayer.setAttribute('presentationName', presentationName)
if vscale is None:
vallayer.setAttribute('verticalScale', '0')
vallayer.setAttribute('scaleMinimum', str(min(y)))
vallayer.setAttribute('scaleMaximum', str(max(y)))
else:
vallayer.setAttribute('verticalScale', '0')
vallayer.setAttribute('scaleMinimum', str(vscale[0]))
vallayer.setAttribute('scaleMaximum', str(vscale[1]))
if view is None:
view = self.__add_view()
self.__add_layer_reference(view, valruler)
self.__add_layer_reference(view, vallayer)
return view | python | def add_continuous_annotations(self, x, y, colourName='Purple', colour='#c832ff', name='', view=None, vscale=None, presentationName=None):
"""
add a continous annotation layer
Args:
x (float iterable): temporal indices of the dataset
y (float iterable): values of the dataset
Kwargs:
view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created
Returns:
<DOM Element: view>: the view used to store the spectrogram
"""
model = self.data.appendChild(self.doc.createElement('model'))
imodel = self.nbdata
for atname, atval in [('id', imodel + 1),
('dataset', imodel),
('name', name),
('sampleRate', self.samplerate),
('start', int(min(x) * self.samplerate)),
('end', int(max(x) * self.samplerate)),
('type', 'sparse'),
('dimensions', '2'),
('resolution', '1'),
('notifyOnAdd', 'true'),
('minimum', min(y)),
('maximum', max(y)),
('units', '')
]:
model.setAttribute(atname, str(atval))
# dataset = self.data.appendChild(self.doc.createElement('dataset'))
# dataset.setAttribute('id', str(imodel))
# dataset.setAttribute('dimensions', '2')
# self.nbdata += 2
# datasetnode = SVDataset2D(self.doc, str(imodel), self.samplerate)
# datasetnode.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y)
# data = dataset.appendChild(datasetnode)
dataset = self.data.appendChild(SVDataset2D(self.doc, str(imodel), self.samplerate))
dataset.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y)
self.nbdata += 2
###### add layers
valruler = self.__add_time_ruler()
vallayer = self.__add_val_layer(imodel + 1)
vallayer.setAttribute('colourName', colourName)
vallayer.setAttribute('colour', colour)
if presentationName:
vallayer.setAttribute('presentationName', presentationName)
if vscale is None:
vallayer.setAttribute('verticalScale', '0')
vallayer.setAttribute('scaleMinimum', str(min(y)))
vallayer.setAttribute('scaleMaximum', str(max(y)))
else:
vallayer.setAttribute('verticalScale', '0')
vallayer.setAttribute('scaleMinimum', str(vscale[0]))
vallayer.setAttribute('scaleMaximum', str(vscale[1]))
if view is None:
view = self.__add_view()
self.__add_layer_reference(view, valruler)
self.__add_layer_reference(view, vallayer)
return view | [
"def",
"add_continuous_annotations",
"(",
"self",
",",
"x",
",",
"y",
",",
"colourName",
"=",
"'Purple'",
",",
"colour",
"=",
"'#c832ff'",
",",
"name",
"=",
"''",
",",
"view",
"=",
"None",
",",
"vscale",
"=",
"None",
",",
"presentationName",
"=",
"None",
")",
":",
"model",
"=",
"self",
".",
"data",
".",
"appendChild",
"(",
"self",
".",
"doc",
".",
"createElement",
"(",
"'model'",
")",
")",
"imodel",
"=",
"self",
".",
"nbdata",
"for",
"atname",
",",
"atval",
"in",
"[",
"(",
"'id'",
",",
"imodel",
"+",
"1",
")",
",",
"(",
"'dataset'",
",",
"imodel",
")",
",",
"(",
"'name'",
",",
"name",
")",
",",
"(",
"'sampleRate'",
",",
"self",
".",
"samplerate",
")",
",",
"(",
"'start'",
",",
"int",
"(",
"min",
"(",
"x",
")",
"*",
"self",
".",
"samplerate",
")",
")",
",",
"(",
"'end'",
",",
"int",
"(",
"max",
"(",
"x",
")",
"*",
"self",
".",
"samplerate",
")",
")",
",",
"(",
"'type'",
",",
"'sparse'",
")",
",",
"(",
"'dimensions'",
",",
"'2'",
")",
",",
"(",
"'resolution'",
",",
"'1'",
")",
",",
"(",
"'notifyOnAdd'",
",",
"'true'",
")",
",",
"(",
"'minimum'",
",",
"min",
"(",
"y",
")",
")",
",",
"(",
"'maximum'",
",",
"max",
"(",
"y",
")",
")",
",",
"(",
"'units'",
",",
"''",
")",
"]",
":",
"model",
".",
"setAttribute",
"(",
"atname",
",",
"str",
"(",
"atval",
")",
")",
"# dataset = self.data.appendChild(self.doc.createElement('dataset'))",
"# dataset.setAttribute('id', str(imodel))",
"# dataset.setAttribute('dimensions', '2')",
"# self.nbdata += 2",
"# datasetnode = SVDataset2D(self.doc, str(imodel), self.samplerate)",
"# datasetnode.set_data_from_iterable(map(int, np.array(x) * self.samplerate), y)",
"# data = dataset.appendChild(datasetnode)",
"dataset",
"=",
"self",
".",
"data",
".",
"appendChild",
"(",
"SVDataset2D",
"(",
"self",
".",
"doc",
",",
"str",
"(",
"imodel",
")",
",",
"self",
".",
"samplerate",
")",
")",
"dataset",
".",
"set_data_from_iterable",
"(",
"map",
"(",
"int",
",",
"np",
".",
"array",
"(",
"x",
")",
"*",
"self",
".",
"samplerate",
")",
",",
"y",
")",
"self",
".",
"nbdata",
"+=",
"2",
"###### add layers",
"valruler",
"=",
"self",
".",
"__add_time_ruler",
"(",
")",
"vallayer",
"=",
"self",
".",
"__add_val_layer",
"(",
"imodel",
"+",
"1",
")",
"vallayer",
".",
"setAttribute",
"(",
"'colourName'",
",",
"colourName",
")",
"vallayer",
".",
"setAttribute",
"(",
"'colour'",
",",
"colour",
")",
"if",
"presentationName",
":",
"vallayer",
".",
"setAttribute",
"(",
"'presentationName'",
",",
"presentationName",
")",
"if",
"vscale",
"is",
"None",
":",
"vallayer",
".",
"setAttribute",
"(",
"'verticalScale'",
",",
"'0'",
")",
"vallayer",
".",
"setAttribute",
"(",
"'scaleMinimum'",
",",
"str",
"(",
"min",
"(",
"y",
")",
")",
")",
"vallayer",
".",
"setAttribute",
"(",
"'scaleMaximum'",
",",
"str",
"(",
"max",
"(",
"y",
")",
")",
")",
"else",
":",
"vallayer",
".",
"setAttribute",
"(",
"'verticalScale'",
",",
"'0'",
")",
"vallayer",
".",
"setAttribute",
"(",
"'scaleMinimum'",
",",
"str",
"(",
"vscale",
"[",
"0",
"]",
")",
")",
"vallayer",
".",
"setAttribute",
"(",
"'scaleMaximum'",
",",
"str",
"(",
"vscale",
"[",
"1",
"]",
")",
")",
"if",
"view",
"is",
"None",
":",
"view",
"=",
"self",
".",
"__add_view",
"(",
")",
"self",
".",
"__add_layer_reference",
"(",
"view",
",",
"valruler",
")",
"self",
".",
"__add_layer_reference",
"(",
"view",
",",
"vallayer",
")",
"return",
"view"
]
| add a continous annotation layer
Args:
x (float iterable): temporal indices of the dataset
y (float iterable): values of the dataset
Kwargs:
view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created
Returns:
<DOM Element: view>: the view used to store the spectrogram | [
"add",
"a",
"continous",
"annotation",
"layer"
]
| ebe83bd7dffb0275393255dcbcc6671cf0ade4a5 | https://github.com/DavidDoukhan/py_sonicvisualiser/blob/ebe83bd7dffb0275393255dcbcc6671cf0ade4a5/py_sonicvisualiser/SVEnv.py#L145-L212 | train |
DavidDoukhan/py_sonicvisualiser | py_sonicvisualiser/SVEnv.py | SVEnv.add_interval_annotations | def add_interval_annotations(self, temp_idx, durations, labels, values=None, colourName='Purple', colour='#c832ff', name='', view=None, presentationName = None):
"""
add a labelled interval annotation layer
Args:
temp_idx (float iterable): The temporal indices of invervals
durations (float iterable): intervals durations
labels (string iterable): interval labels
values (int iterable): interval numeric values, if set to None, values are set to 0
Kwargs:
view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created
"""
model = self.data.appendChild(self.doc.createElement('model'))
imodel = self.nbdata
for atname, atval in [('id', imodel + 1),
('dataset', imodel),
('name', name),
('sampleRate', self.samplerate),
('type', 'sparse'),
('dimensions', '3'),
('subtype', 'region'),
('resolution', '1'),
('notifyOnAdd', 'true'),
('units', ''),
('valueQuantization', '0')
]:
model.setAttribute(atname, str(atval))
dataset = self.data.appendChild(SVDataset3D(self.doc, str(imodel), self.samplerate))
if values is None:
values = ([0] * len(temp_idx))
dataset.set_data_from_iterable(map(int, np.array(temp_idx) * self.samplerate), values, map(int, np.array(durations) * self.samplerate), labels)
# dataset = self.data.appendChild(self.doc.createElement('dataset'))
# dataset.setAttribute('id', str(imodel))
# dataset.setAttribute('dimensions', '3')
self.nbdata+= 2
valruler = self.__add_time_ruler()
vallayer = self.__add_region_layer(imodel + 1, name)
vallayer.setAttribute('colourName', colourName)
vallayer.setAttribute('colour', colour)
if presentationName:
vallayer.setAttribute('presentationName', presentationName)
if view is None:
view = self.__add_view()
self.__add_layer_reference(view, valruler)
self.__add_layer_reference(view, vallayer)
# if values is None:
# values = ([0] * len(temp_idx))
# for t, d, l, v in zip(temp_idx, durations, labels, values):
# point = dataset.appendChild(self.doc.createElement('point'))
# point.setAttribute('label', l)
# point.setAttribute('frame', str(int(t * self.samplerate)))
# point.setAttribute('duration', str(int(d * self.samplerate)))
# point.setAttribute('value', str(v))
return view | python | def add_interval_annotations(self, temp_idx, durations, labels, values=None, colourName='Purple', colour='#c832ff', name='', view=None, presentationName = None):
"""
add a labelled interval annotation layer
Args:
temp_idx (float iterable): The temporal indices of invervals
durations (float iterable): intervals durations
labels (string iterable): interval labels
values (int iterable): interval numeric values, if set to None, values are set to 0
Kwargs:
view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created
"""
model = self.data.appendChild(self.doc.createElement('model'))
imodel = self.nbdata
for atname, atval in [('id', imodel + 1),
('dataset', imodel),
('name', name),
('sampleRate', self.samplerate),
('type', 'sparse'),
('dimensions', '3'),
('subtype', 'region'),
('resolution', '1'),
('notifyOnAdd', 'true'),
('units', ''),
('valueQuantization', '0')
]:
model.setAttribute(atname, str(atval))
dataset = self.data.appendChild(SVDataset3D(self.doc, str(imodel), self.samplerate))
if values is None:
values = ([0] * len(temp_idx))
dataset.set_data_from_iterable(map(int, np.array(temp_idx) * self.samplerate), values, map(int, np.array(durations) * self.samplerate), labels)
# dataset = self.data.appendChild(self.doc.createElement('dataset'))
# dataset.setAttribute('id', str(imodel))
# dataset.setAttribute('dimensions', '3')
self.nbdata+= 2
valruler = self.__add_time_ruler()
vallayer = self.__add_region_layer(imodel + 1, name)
vallayer.setAttribute('colourName', colourName)
vallayer.setAttribute('colour', colour)
if presentationName:
vallayer.setAttribute('presentationName', presentationName)
if view is None:
view = self.__add_view()
self.__add_layer_reference(view, valruler)
self.__add_layer_reference(view, vallayer)
# if values is None:
# values = ([0] * len(temp_idx))
# for t, d, l, v in zip(temp_idx, durations, labels, values):
# point = dataset.appendChild(self.doc.createElement('point'))
# point.setAttribute('label', l)
# point.setAttribute('frame', str(int(t * self.samplerate)))
# point.setAttribute('duration', str(int(d * self.samplerate)))
# point.setAttribute('value', str(v))
return view | [
"def",
"add_interval_annotations",
"(",
"self",
",",
"temp_idx",
",",
"durations",
",",
"labels",
",",
"values",
"=",
"None",
",",
"colourName",
"=",
"'Purple'",
",",
"colour",
"=",
"'#c832ff'",
",",
"name",
"=",
"''",
",",
"view",
"=",
"None",
",",
"presentationName",
"=",
"None",
")",
":",
"model",
"=",
"self",
".",
"data",
".",
"appendChild",
"(",
"self",
".",
"doc",
".",
"createElement",
"(",
"'model'",
")",
")",
"imodel",
"=",
"self",
".",
"nbdata",
"for",
"atname",
",",
"atval",
"in",
"[",
"(",
"'id'",
",",
"imodel",
"+",
"1",
")",
",",
"(",
"'dataset'",
",",
"imodel",
")",
",",
"(",
"'name'",
",",
"name",
")",
",",
"(",
"'sampleRate'",
",",
"self",
".",
"samplerate",
")",
",",
"(",
"'type'",
",",
"'sparse'",
")",
",",
"(",
"'dimensions'",
",",
"'3'",
")",
",",
"(",
"'subtype'",
",",
"'region'",
")",
",",
"(",
"'resolution'",
",",
"'1'",
")",
",",
"(",
"'notifyOnAdd'",
",",
"'true'",
")",
",",
"(",
"'units'",
",",
"''",
")",
",",
"(",
"'valueQuantization'",
",",
"'0'",
")",
"]",
":",
"model",
".",
"setAttribute",
"(",
"atname",
",",
"str",
"(",
"atval",
")",
")",
"dataset",
"=",
"self",
".",
"data",
".",
"appendChild",
"(",
"SVDataset3D",
"(",
"self",
".",
"doc",
",",
"str",
"(",
"imodel",
")",
",",
"self",
".",
"samplerate",
")",
")",
"if",
"values",
"is",
"None",
":",
"values",
"=",
"(",
"[",
"0",
"]",
"*",
"len",
"(",
"temp_idx",
")",
")",
"dataset",
".",
"set_data_from_iterable",
"(",
"map",
"(",
"int",
",",
"np",
".",
"array",
"(",
"temp_idx",
")",
"*",
"self",
".",
"samplerate",
")",
",",
"values",
",",
"map",
"(",
"int",
",",
"np",
".",
"array",
"(",
"durations",
")",
"*",
"self",
".",
"samplerate",
")",
",",
"labels",
")",
"# dataset = self.data.appendChild(self.doc.createElement('dataset'))",
"# dataset.setAttribute('id', str(imodel))",
"# dataset.setAttribute('dimensions', '3')",
"self",
".",
"nbdata",
"+=",
"2",
"valruler",
"=",
"self",
".",
"__add_time_ruler",
"(",
")",
"vallayer",
"=",
"self",
".",
"__add_region_layer",
"(",
"imodel",
"+",
"1",
",",
"name",
")",
"vallayer",
".",
"setAttribute",
"(",
"'colourName'",
",",
"colourName",
")",
"vallayer",
".",
"setAttribute",
"(",
"'colour'",
",",
"colour",
")",
"if",
"presentationName",
":",
"vallayer",
".",
"setAttribute",
"(",
"'presentationName'",
",",
"presentationName",
")",
"if",
"view",
"is",
"None",
":",
"view",
"=",
"self",
".",
"__add_view",
"(",
")",
"self",
".",
"__add_layer_reference",
"(",
"view",
",",
"valruler",
")",
"self",
".",
"__add_layer_reference",
"(",
"view",
",",
"vallayer",
")",
"# if values is None:",
"# values = ([0] * len(temp_idx))",
"# for t, d, l, v in zip(temp_idx, durations, labels, values):",
"# point = dataset.appendChild(self.doc.createElement('point'))",
"# point.setAttribute('label', l)",
"# point.setAttribute('frame', str(int(t * self.samplerate)))",
"# point.setAttribute('duration', str(int(d * self.samplerate)))",
"# point.setAttribute('value', str(v))",
"return",
"view"
]
| add a labelled interval annotation layer
Args:
temp_idx (float iterable): The temporal indices of invervals
durations (float iterable): intervals durations
labels (string iterable): interval labels
values (int iterable): interval numeric values, if set to None, values are set to 0
Kwargs:
view (<DOM Element: view>): environment view used to display the spectrogram, if set to None, a new view is created | [
"add",
"a",
"labelled",
"interval",
"annotation",
"layer"
]
| ebe83bd7dffb0275393255dcbcc6671cf0ade4a5 | https://github.com/DavidDoukhan/py_sonicvisualiser/blob/ebe83bd7dffb0275393255dcbcc6671cf0ade4a5/py_sonicvisualiser/SVEnv.py#L215-L277 | train |
DsixTools/python-smeftrunner | smeftrunner/classes.py | SMEFT.load_initial | def load_initial(self, streams):
"""Load the initial values for parameters and Wilson coefficients from
one or several files.
`streams` should be a tuple of file-like objects strings."""
d = {}
for stream in streams:
s = io.load(stream)
if 'BLOCK' not in s:
raise ValueError("No BLOCK found")
d.update(s['BLOCK'])
d = {'BLOCK': d}
C = io.wc_lha2dict(d)
sm = io.sm_lha2dict(d)
C.update(sm)
C = definitions.symmetrize(C)
self.C_in = C | python | def load_initial(self, streams):
"""Load the initial values for parameters and Wilson coefficients from
one or several files.
`streams` should be a tuple of file-like objects strings."""
d = {}
for stream in streams:
s = io.load(stream)
if 'BLOCK' not in s:
raise ValueError("No BLOCK found")
d.update(s['BLOCK'])
d = {'BLOCK': d}
C = io.wc_lha2dict(d)
sm = io.sm_lha2dict(d)
C.update(sm)
C = definitions.symmetrize(C)
self.C_in = C | [
"def",
"load_initial",
"(",
"self",
",",
"streams",
")",
":",
"d",
"=",
"{",
"}",
"for",
"stream",
"in",
"streams",
":",
"s",
"=",
"io",
".",
"load",
"(",
"stream",
")",
"if",
"'BLOCK'",
"not",
"in",
"s",
":",
"raise",
"ValueError",
"(",
"\"No BLOCK found\"",
")",
"d",
".",
"update",
"(",
"s",
"[",
"'BLOCK'",
"]",
")",
"d",
"=",
"{",
"'BLOCK'",
":",
"d",
"}",
"C",
"=",
"io",
".",
"wc_lha2dict",
"(",
"d",
")",
"sm",
"=",
"io",
".",
"sm_lha2dict",
"(",
"d",
")",
"C",
".",
"update",
"(",
"sm",
")",
"C",
"=",
"definitions",
".",
"symmetrize",
"(",
"C",
")",
"self",
".",
"C_in",
"=",
"C"
]
| Load the initial values for parameters and Wilson coefficients from
one or several files.
`streams` should be a tuple of file-like objects strings. | [
"Load",
"the",
"initial",
"values",
"for",
"parameters",
"and",
"Wilson",
"coefficients",
"from",
"one",
"or",
"several",
"files",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L31-L47 | train |
DsixTools/python-smeftrunner | smeftrunner/classes.py | SMEFT.load_wcxf | def load_wcxf(self, stream, get_smpar=True):
"""Load the initial values for Wilson coefficients from
a file-like object or a string in WCxf format.
Note that Standard Model parameters have to be provided separately
and are assumed to be in the weak basis used for the Warsaw basis as
defined in WCxf, i.e. in the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
wc = wcxf.WC.load(stream)
self.set_initial_wcxf(wc, get_smpar=get_smpar) | python | def load_wcxf(self, stream, get_smpar=True):
"""Load the initial values for Wilson coefficients from
a file-like object or a string in WCxf format.
Note that Standard Model parameters have to be provided separately
and are assumed to be in the weak basis used for the Warsaw basis as
defined in WCxf, i.e. in the basis where the down-type and charged
lepton mass matrices are diagonal."""
import wcxf
wc = wcxf.WC.load(stream)
self.set_initial_wcxf(wc, get_smpar=get_smpar) | [
"def",
"load_wcxf",
"(",
"self",
",",
"stream",
",",
"get_smpar",
"=",
"True",
")",
":",
"import",
"wcxf",
"wc",
"=",
"wcxf",
".",
"WC",
".",
"load",
"(",
"stream",
")",
"self",
".",
"set_initial_wcxf",
"(",
"wc",
",",
"get_smpar",
"=",
"get_smpar",
")"
]
| Load the initial values for Wilson coefficients from
a file-like object or a string in WCxf format.
Note that Standard Model parameters have to be provided separately
and are assumed to be in the weak basis used for the Warsaw basis as
defined in WCxf, i.e. in the basis where the down-type and charged
lepton mass matrices are diagonal. | [
"Load",
"the",
"initial",
"values",
"for",
"Wilson",
"coefficients",
"from",
"a",
"file",
"-",
"like",
"object",
"or",
"a",
"string",
"in",
"WCxf",
"format",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L105-L115 | train |
DsixTools/python-smeftrunner | smeftrunner/classes.py | SMEFT.dump_wcxf | def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs):
"""Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
wc = self.get_wcxf(C_out, scale_out)
return wc.dump(fmt=fmt, stream=stream, **kwargs) | python | def dump_wcxf(self, C_out, scale_out, fmt='yaml', stream=None, **kwargs):
"""Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal."""
wc = self.get_wcxf(C_out, scale_out)
return wc.dump(fmt=fmt, stream=stream, **kwargs) | [
"def",
"dump_wcxf",
"(",
"self",
",",
"C_out",
",",
"scale_out",
",",
"fmt",
"=",
"'yaml'",
",",
"stream",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"wc",
"=",
"self",
".",
"get_wcxf",
"(",
"C_out",
",",
"scale_out",
")",
"return",
"wc",
".",
"dump",
"(",
"fmt",
"=",
"fmt",
",",
"stream",
"=",
"stream",
",",
"*",
"*",
"kwargs",
")"
]
| Return a string representation of the Wilson coefficients `C_out`
in WCxf format. If `stream` is specified, export it to a file.
`fmt` defaults to `yaml`, but can also be `json`.
Note that the Wilson coefficients are rotated into the Warsaw basis
as defined in WCxf, i.e. to the basis where the down-type and charged
lepton mass matrices are diagonal. | [
"Return",
"a",
"string",
"representation",
"of",
"the",
"Wilson",
"coefficients",
"C_out",
"in",
"WCxf",
"format",
".",
"If",
"stream",
"is",
"specified",
"export",
"it",
"to",
"a",
"file",
".",
"fmt",
"defaults",
"to",
"yaml",
"but",
"can",
"also",
"be",
"json",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L158-L167 | train |
DsixTools/python-smeftrunner | smeftrunner/classes.py | SMEFT.rgevolve_leadinglog | def rgevolve_leadinglog(self, scale_out):
"""Compute the leading logarithmix approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`.
"""
self._check_initial()
return rge.smeft_evolve_leadinglog(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out) | python | def rgevolve_leadinglog(self, scale_out):
"""Compute the leading logarithmix approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`.
"""
self._check_initial()
return rge.smeft_evolve_leadinglog(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out) | [
"def",
"rgevolve_leadinglog",
"(",
"self",
",",
"scale_out",
")",
":",
"self",
".",
"_check_initial",
"(",
")",
"return",
"rge",
".",
"smeft_evolve_leadinglog",
"(",
"C_in",
"=",
"self",
".",
"C_in",
",",
"scale_high",
"=",
"self",
".",
"scale_high",
",",
"scale_in",
"=",
"self",
".",
"scale_in",
",",
"scale_out",
"=",
"scale_out",
")"
]
| Compute the leading logarithmix approximation to the solution
of the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients.
Much faster but less precise that `rgevolve`. | [
"Compute",
"the",
"leading",
"logarithmix",
"approximation",
"to",
"the",
"solution",
"of",
"the",
"SMEFT",
"RGEs",
"from",
"the",
"initial",
"scale",
"to",
"scale_out",
".",
"Returns",
"a",
"dictionary",
"with",
"parameters",
"and",
"Wilson",
"coefficients",
".",
"Much",
"faster",
"but",
"less",
"precise",
"that",
"rgevolve",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L181-L191 | train |
DsixTools/python-smeftrunner | smeftrunner/classes.py | SMEFT._check_initial | def _check_initial(self):
"""Check if initial values and scale as well as the new physics scale
have been set."""
if self.C_in is None:
raise Exception("You have to specify the initial conditions first.")
if self.scale_in is None:
raise Exception("You have to specify the initial scale first.")
if self.scale_high is None:
raise Exception("You have to specify the high scale first.") | python | def _check_initial(self):
"""Check if initial values and scale as well as the new physics scale
have been set."""
if self.C_in is None:
raise Exception("You have to specify the initial conditions first.")
if self.scale_in is None:
raise Exception("You have to specify the initial scale first.")
if self.scale_high is None:
raise Exception("You have to specify the high scale first.") | [
"def",
"_check_initial",
"(",
"self",
")",
":",
"if",
"self",
".",
"C_in",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"You have to specify the initial conditions first.\"",
")",
"if",
"self",
".",
"scale_in",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"You have to specify the initial scale first.\"",
")",
"if",
"self",
".",
"scale_high",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"You have to specify the high scale first.\"",
")"
]
| Check if initial values and scale as well as the new physics scale
have been set. | [
"Check",
"if",
"initial",
"values",
"and",
"scale",
"as",
"well",
"as",
"the",
"new",
"physics",
"scale",
"have",
"been",
"set",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L193-L201 | train |
DsixTools/python-smeftrunner | smeftrunner/classes.py | SMEFT.rotate_defaultbasis | def rotate_defaultbasis(self, C):
"""Rotate all parameters to the basis where the running down-type quark
and charged lepton mass matrices are diagonal and where the running
up-type quark mass matrix has the form V.S, with V unitary and S real
diagonal, and where the CKM and PMNS matrices have the standard
phase convention."""
v = sqrt(2*C['m2'].real/C['Lambda'].real)
Mep = v/sqrt(2) * (C['Ge'] - C['ephi'] * v**2/self.scale_high**2/2)
Mup = v/sqrt(2) * (C['Gu'] - C['uphi'] * v**2/self.scale_high**2/2)
Mdp = v/sqrt(2) * (C['Gd'] - C['dphi'] * v**2/self.scale_high**2/2)
Mnup = -v**2 * C['llphiphi']
UeL, Me, UeR = ckmutil.diag.msvd(Mep)
UuL, Mu, UuR = ckmutil.diag.msvd(Mup)
UdL, Md, UdR = ckmutil.diag.msvd(Mdp)
Unu, Mnu = ckmutil.diag.mtakfac(Mnup)
UuL, UdL, UuR, UdR = ckmutil.phases.rephase_standard(UuL, UdL, UuR, UdR)
Unu, UeL, UeR = ckmutil.phases.rephase_pmns_standard(Unu, UeL, UeR)
return definitions.flavor_rotation(C, Uq=UdL, Uu=UuR, Ud=UdR, Ul=UeL, Ue=UeR) | python | def rotate_defaultbasis(self, C):
"""Rotate all parameters to the basis where the running down-type quark
and charged lepton mass matrices are diagonal and where the running
up-type quark mass matrix has the form V.S, with V unitary and S real
diagonal, and where the CKM and PMNS matrices have the standard
phase convention."""
v = sqrt(2*C['m2'].real/C['Lambda'].real)
Mep = v/sqrt(2) * (C['Ge'] - C['ephi'] * v**2/self.scale_high**2/2)
Mup = v/sqrt(2) * (C['Gu'] - C['uphi'] * v**2/self.scale_high**2/2)
Mdp = v/sqrt(2) * (C['Gd'] - C['dphi'] * v**2/self.scale_high**2/2)
Mnup = -v**2 * C['llphiphi']
UeL, Me, UeR = ckmutil.diag.msvd(Mep)
UuL, Mu, UuR = ckmutil.diag.msvd(Mup)
UdL, Md, UdR = ckmutil.diag.msvd(Mdp)
Unu, Mnu = ckmutil.diag.mtakfac(Mnup)
UuL, UdL, UuR, UdR = ckmutil.phases.rephase_standard(UuL, UdL, UuR, UdR)
Unu, UeL, UeR = ckmutil.phases.rephase_pmns_standard(Unu, UeL, UeR)
return definitions.flavor_rotation(C, Uq=UdL, Uu=UuR, Ud=UdR, Ul=UeL, Ue=UeR) | [
"def",
"rotate_defaultbasis",
"(",
"self",
",",
"C",
")",
":",
"v",
"=",
"sqrt",
"(",
"2",
"*",
"C",
"[",
"'m2'",
"]",
".",
"real",
"/",
"C",
"[",
"'Lambda'",
"]",
".",
"real",
")",
"Mep",
"=",
"v",
"/",
"sqrt",
"(",
"2",
")",
"*",
"(",
"C",
"[",
"'Ge'",
"]",
"-",
"C",
"[",
"'ephi'",
"]",
"*",
"v",
"**",
"2",
"/",
"self",
".",
"scale_high",
"**",
"2",
"/",
"2",
")",
"Mup",
"=",
"v",
"/",
"sqrt",
"(",
"2",
")",
"*",
"(",
"C",
"[",
"'Gu'",
"]",
"-",
"C",
"[",
"'uphi'",
"]",
"*",
"v",
"**",
"2",
"/",
"self",
".",
"scale_high",
"**",
"2",
"/",
"2",
")",
"Mdp",
"=",
"v",
"/",
"sqrt",
"(",
"2",
")",
"*",
"(",
"C",
"[",
"'Gd'",
"]",
"-",
"C",
"[",
"'dphi'",
"]",
"*",
"v",
"**",
"2",
"/",
"self",
".",
"scale_high",
"**",
"2",
"/",
"2",
")",
"Mnup",
"=",
"-",
"v",
"**",
"2",
"*",
"C",
"[",
"'llphiphi'",
"]",
"UeL",
",",
"Me",
",",
"UeR",
"=",
"ckmutil",
".",
"diag",
".",
"msvd",
"(",
"Mep",
")",
"UuL",
",",
"Mu",
",",
"UuR",
"=",
"ckmutil",
".",
"diag",
".",
"msvd",
"(",
"Mup",
")",
"UdL",
",",
"Md",
",",
"UdR",
"=",
"ckmutil",
".",
"diag",
".",
"msvd",
"(",
"Mdp",
")",
"Unu",
",",
"Mnu",
"=",
"ckmutil",
".",
"diag",
".",
"mtakfac",
"(",
"Mnup",
")",
"UuL",
",",
"UdL",
",",
"UuR",
",",
"UdR",
"=",
"ckmutil",
".",
"phases",
".",
"rephase_standard",
"(",
"UuL",
",",
"UdL",
",",
"UuR",
",",
"UdR",
")",
"Unu",
",",
"UeL",
",",
"UeR",
"=",
"ckmutil",
".",
"phases",
".",
"rephase_pmns_standard",
"(",
"Unu",
",",
"UeL",
",",
"UeR",
")",
"return",
"definitions",
".",
"flavor_rotation",
"(",
"C",
",",
"Uq",
"=",
"UdL",
",",
"Uu",
"=",
"UuR",
",",
"Ud",
"=",
"UdR",
",",
"Ul",
"=",
"UeL",
",",
"Ue",
"=",
"UeR",
")"
]
| Rotate all parameters to the basis where the running down-type quark
and charged lepton mass matrices are diagonal and where the running
up-type quark mass matrix has the form V.S, with V unitary and S real
diagonal, and where the CKM and PMNS matrices have the standard
phase convention. | [
"Rotate",
"all",
"parameters",
"to",
"the",
"basis",
"where",
"the",
"running",
"down",
"-",
"type",
"quark",
"and",
"charged",
"lepton",
"mass",
"matrices",
"are",
"diagonal",
"and",
"where",
"the",
"running",
"up",
"-",
"type",
"quark",
"mass",
"matrix",
"has",
"the",
"form",
"V",
".",
"S",
"with",
"V",
"unitary",
"and",
"S",
"real",
"diagonal",
"and",
"where",
"the",
"CKM",
"and",
"PMNS",
"matrices",
"have",
"the",
"standard",
"phase",
"convention",
"."
]
| 4c9130e53ad4f7bbb526657a82150ca9d57c4b37 | https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L203-L220 | train |
TheGhouls/oct | oct/results/models.py | set_database | def set_database(db_url, proxy, config):
"""Initialize the peewee database with the given configuration
If the given db_url is a regular file, it will be used as sqlite database
:param str db_url: the connection string for database or path if sqlite file
:param peewee.Proxy proxy: the peewee proxy to initialise
:param dict config: the configuration dictionnary
"""
db_config = config.get('results_database', {}).get('params', {})
if 'testing' in config and config['testing'] is True:
database = connect('sqlite:////tmp/results.sqlite', check_same_thread=False, threadlocals=True)
else:
if os.path.isfile(db_url) or os.path.isdir(os.path.dirname(db_url)):
db_url = "sqlite:///" + db_url
db_config.update(check_same_thread=False, threadlocals=True)
database = connect(db_url, **db_config)
proxy.initialize(database) | python | def set_database(db_url, proxy, config):
"""Initialize the peewee database with the given configuration
If the given db_url is a regular file, it will be used as sqlite database
:param str db_url: the connection string for database or path if sqlite file
:param peewee.Proxy proxy: the peewee proxy to initialise
:param dict config: the configuration dictionnary
"""
db_config = config.get('results_database', {}).get('params', {})
if 'testing' in config and config['testing'] is True:
database = connect('sqlite:////tmp/results.sqlite', check_same_thread=False, threadlocals=True)
else:
if os.path.isfile(db_url) or os.path.isdir(os.path.dirname(db_url)):
db_url = "sqlite:///" + db_url
db_config.update(check_same_thread=False, threadlocals=True)
database = connect(db_url, **db_config)
proxy.initialize(database) | [
"def",
"set_database",
"(",
"db_url",
",",
"proxy",
",",
"config",
")",
":",
"db_config",
"=",
"config",
".",
"get",
"(",
"'results_database'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'params'",
",",
"{",
"}",
")",
"if",
"'testing'",
"in",
"config",
"and",
"config",
"[",
"'testing'",
"]",
"is",
"True",
":",
"database",
"=",
"connect",
"(",
"'sqlite:////tmp/results.sqlite'",
",",
"check_same_thread",
"=",
"False",
",",
"threadlocals",
"=",
"True",
")",
"else",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"db_url",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"db_url",
")",
")",
":",
"db_url",
"=",
"\"sqlite:///\"",
"+",
"db_url",
"db_config",
".",
"update",
"(",
"check_same_thread",
"=",
"False",
",",
"threadlocals",
"=",
"True",
")",
"database",
"=",
"connect",
"(",
"db_url",
",",
"*",
"*",
"db_config",
")",
"proxy",
".",
"initialize",
"(",
"database",
")"
]
| Initialize the peewee database with the given configuration
If the given db_url is a regular file, it will be used as sqlite database
:param str db_url: the connection string for database or path if sqlite file
:param peewee.Proxy proxy: the peewee proxy to initialise
:param dict config: the configuration dictionnary | [
"Initialize",
"the",
"peewee",
"database",
"with",
"the",
"given",
"configuration"
]
| 7e9bddeb3b8495a26442b1c86744e9fb187fe88f | https://github.com/TheGhouls/oct/blob/7e9bddeb3b8495a26442b1c86744e9fb187fe88f/oct/results/models.py#L65-L83 | train |
Kortemme-Lab/klab | klab/cluster/simple_qtop.py | sh | def sh(cmd):
"""
Run the given command in a shell.
The command should be a single string containing a shell command. If the
command contains the names of any local variables enclosed in braces, the
actual values of the named variables will be filled in. (Note that this
works on variables defined in the calling scope, which is a little bit
magical.) Regular braces must be escaped as you would with str.format().
Also be aware that this approach is vulnerable to shell injection attacks.
"""
# Figure out what local variables are defined in the calling scope.
import inspect
frame = inspect.currentframe()
try: locals = frame.f_back.f_locals
finally: del frame
# Run the given command in a shell. Return everything written to stdout if
# the command returns an error code of 0, otherwise raise an exception.
from subprocess import Popen, PIPE, CalledProcessError
process = Popen(cmd.format(**locals), shell=True, stdout=PIPE)
stdout, unused_stderr = process.communicate()
retcode = process.poll()
if retcode:
error = subprocess.CalledProcessError(retcode, cmd)
error.output = stdout
raise error
return stdout.strip() | python | def sh(cmd):
"""
Run the given command in a shell.
The command should be a single string containing a shell command. If the
command contains the names of any local variables enclosed in braces, the
actual values of the named variables will be filled in. (Note that this
works on variables defined in the calling scope, which is a little bit
magical.) Regular braces must be escaped as you would with str.format().
Also be aware that this approach is vulnerable to shell injection attacks.
"""
# Figure out what local variables are defined in the calling scope.
import inspect
frame = inspect.currentframe()
try: locals = frame.f_back.f_locals
finally: del frame
# Run the given command in a shell. Return everything written to stdout if
# the command returns an error code of 0, otherwise raise an exception.
from subprocess import Popen, PIPE, CalledProcessError
process = Popen(cmd.format(**locals), shell=True, stdout=PIPE)
stdout, unused_stderr = process.communicate()
retcode = process.poll()
if retcode:
error = subprocess.CalledProcessError(retcode, cmd)
error.output = stdout
raise error
return stdout.strip() | [
"def",
"sh",
"(",
"cmd",
")",
":",
"# Figure out what local variables are defined in the calling scope.",
"import",
"inspect",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
"try",
":",
"locals",
"=",
"frame",
".",
"f_back",
".",
"f_locals",
"finally",
":",
"del",
"frame",
"# Run the given command in a shell. Return everything written to stdout if",
"# the command returns an error code of 0, otherwise raise an exception.",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
",",
"CalledProcessError",
"process",
"=",
"Popen",
"(",
"cmd",
".",
"format",
"(",
"*",
"*",
"locals",
")",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"PIPE",
")",
"stdout",
",",
"unused_stderr",
"=",
"process",
".",
"communicate",
"(",
")",
"retcode",
"=",
"process",
".",
"poll",
"(",
")",
"if",
"retcode",
":",
"error",
"=",
"subprocess",
".",
"CalledProcessError",
"(",
"retcode",
",",
"cmd",
")",
"error",
".",
"output",
"=",
"stdout",
"raise",
"error",
"return",
"stdout",
".",
"strip",
"(",
")"
]
| Run the given command in a shell.
The command should be a single string containing a shell command. If the
command contains the names of any local variables enclosed in braces, the
actual values of the named variables will be filled in. (Note that this
works on variables defined in the calling scope, which is a little bit
magical.) Regular braces must be escaped as you would with str.format().
Also be aware that this approach is vulnerable to shell injection attacks. | [
"Run",
"the",
"given",
"command",
"in",
"a",
"shell",
"."
]
| 6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b | https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/cluster/simple_qtop.py#L24-L54 | train |
uw-it-aca/uw-restclients-sws | uw_sws/curriculum.py | get_curricula_by_department | def get_curricula_by_department(
department, future_terms=0, view_unpublished=False):
"""
Returns a list of restclients.Curriculum models, for the passed
Department model.
"""
if not isinstance(future_terms, int):
raise ValueError(future_terms)
if future_terms < 0 or future_terms > 2:
raise ValueError(future_terms)
view_unpublished = "true" if view_unpublished else "false"
url = "{}?{}".format(
curriculum_search_url_prefix,
urlencode([("department_abbreviation", department.label,),
("future_terms", future_terms,),
("view_unpublished", view_unpublished,)]))
return _json_to_curricula(get_resource(url)) | python | def get_curricula_by_department(
department, future_terms=0, view_unpublished=False):
"""
Returns a list of restclients.Curriculum models, for the passed
Department model.
"""
if not isinstance(future_terms, int):
raise ValueError(future_terms)
if future_terms < 0 or future_terms > 2:
raise ValueError(future_terms)
view_unpublished = "true" if view_unpublished else "false"
url = "{}?{}".format(
curriculum_search_url_prefix,
urlencode([("department_abbreviation", department.label,),
("future_terms", future_terms,),
("view_unpublished", view_unpublished,)]))
return _json_to_curricula(get_resource(url)) | [
"def",
"get_curricula_by_department",
"(",
"department",
",",
"future_terms",
"=",
"0",
",",
"view_unpublished",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"future_terms",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"future_terms",
")",
"if",
"future_terms",
"<",
"0",
"or",
"future_terms",
">",
"2",
":",
"raise",
"ValueError",
"(",
"future_terms",
")",
"view_unpublished",
"=",
"\"true\"",
"if",
"view_unpublished",
"else",
"\"false\"",
"url",
"=",
"\"{}?{}\"",
".",
"format",
"(",
"curriculum_search_url_prefix",
",",
"urlencode",
"(",
"[",
"(",
"\"department_abbreviation\"",
",",
"department",
".",
"label",
",",
")",
",",
"(",
"\"future_terms\"",
",",
"future_terms",
",",
")",
",",
"(",
"\"view_unpublished\"",
",",
"view_unpublished",
",",
")",
"]",
")",
")",
"return",
"_json_to_curricula",
"(",
"get_resource",
"(",
"url",
")",
")"
]
| Returns a list of restclients.Curriculum models, for the passed
Department model. | [
"Returns",
"a",
"list",
"of",
"restclients",
".",
"Curriculum",
"models",
"for",
"the",
"passed",
"Department",
"model",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/curriculum.py#L14-L33 | train |
uw-it-aca/uw-restclients-sws | uw_sws/curriculum.py | get_curricula_by_term | def get_curricula_by_term(term, view_unpublished=False):
"""
Returns a list of restclients.Curriculum models, for the passed
Term model.
"""
view_unpublished = "true" if view_unpublished else "false"
url = "{}?{}".format(
curriculum_search_url_prefix,
urlencode([
("quarter", term.quarter.lower(),),
("year", term.year,),
("view_unpublished", view_unpublished,)]))
return _json_to_curricula(get_resource(url)) | python | def get_curricula_by_term(term, view_unpublished=False):
"""
Returns a list of restclients.Curriculum models, for the passed
Term model.
"""
view_unpublished = "true" if view_unpublished else "false"
url = "{}?{}".format(
curriculum_search_url_prefix,
urlencode([
("quarter", term.quarter.lower(),),
("year", term.year,),
("view_unpublished", view_unpublished,)]))
return _json_to_curricula(get_resource(url)) | [
"def",
"get_curricula_by_term",
"(",
"term",
",",
"view_unpublished",
"=",
"False",
")",
":",
"view_unpublished",
"=",
"\"true\"",
"if",
"view_unpublished",
"else",
"\"false\"",
"url",
"=",
"\"{}?{}\"",
".",
"format",
"(",
"curriculum_search_url_prefix",
",",
"urlencode",
"(",
"[",
"(",
"\"quarter\"",
",",
"term",
".",
"quarter",
".",
"lower",
"(",
")",
",",
")",
",",
"(",
"\"year\"",
",",
"term",
".",
"year",
",",
")",
",",
"(",
"\"view_unpublished\"",
",",
"view_unpublished",
",",
")",
"]",
")",
")",
"return",
"_json_to_curricula",
"(",
"get_resource",
"(",
"url",
")",
")"
]
| Returns a list of restclients.Curriculum models, for the passed
Term model. | [
"Returns",
"a",
"list",
"of",
"restclients",
".",
"Curriculum",
"models",
"for",
"the",
"passed",
"Term",
"model",
"."
]
| 4d36776dcca36855fc15c1b8fe7650ae045194cf | https://github.com/uw-it-aca/uw-restclients-sws/blob/4d36776dcca36855fc15c1b8fe7650ae045194cf/uw_sws/curriculum.py#L36-L48 | train |
AtmaHou/atma | bleu.py | BP | def BP(candidate, references):
"""
calculate brevity penalty
"""
c = len(candidate)
ref_lens = (len(reference) for reference in references)
r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))
if c > r:
return 1
else:
return math.exp(1 - r / c) | python | def BP(candidate, references):
"""
calculate brevity penalty
"""
c = len(candidate)
ref_lens = (len(reference) for reference in references)
r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))
if c > r:
return 1
else:
return math.exp(1 - r / c) | [
"def",
"BP",
"(",
"candidate",
",",
"references",
")",
":",
"c",
"=",
"len",
"(",
"candidate",
")",
"ref_lens",
"=",
"(",
"len",
"(",
"reference",
")",
"for",
"reference",
"in",
"references",
")",
"r",
"=",
"min",
"(",
"ref_lens",
",",
"key",
"=",
"lambda",
"ref_len",
":",
"(",
"abs",
"(",
"ref_len",
"-",
"c",
")",
",",
"ref_len",
")",
")",
"if",
"c",
">",
"r",
":",
"return",
"1",
"else",
":",
"return",
"math",
".",
"exp",
"(",
"1",
"-",
"r",
"/",
"c",
")"
]
| calculate brevity penalty | [
"calculate",
"brevity",
"penalty"
]
| 41cd8ea9443a9c3b2dd71432f46f44a0f83093c7 | https://github.com/AtmaHou/atma/blob/41cd8ea9443a9c3b2dd71432f46f44a0f83093c7/bleu.py#L10-L21 | train |
AtmaHou/atma | bleu.py | MP | def MP(candidate, references, n):
"""
calculate modified precision
"""
counts = Counter(ngrams(candidate, n))
if not counts:
return 0
max_counts = {}
for reference in references:
reference_counts = Counter(ngrams(reference, n))
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())
return sum(clipped_counts.values()) / sum(counts.values()) | python | def MP(candidate, references, n):
"""
calculate modified precision
"""
counts = Counter(ngrams(candidate, n))
if not counts:
return 0
max_counts = {}
for reference in references:
reference_counts = Counter(ngrams(reference, n))
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())
return sum(clipped_counts.values()) / sum(counts.values()) | [
"def",
"MP",
"(",
"candidate",
",",
"references",
",",
"n",
")",
":",
"counts",
"=",
"Counter",
"(",
"ngrams",
"(",
"candidate",
",",
"n",
")",
")",
"if",
"not",
"counts",
":",
"return",
"0",
"max_counts",
"=",
"{",
"}",
"for",
"reference",
"in",
"references",
":",
"reference_counts",
"=",
"Counter",
"(",
"ngrams",
"(",
"reference",
",",
"n",
")",
")",
"for",
"ngram",
"in",
"counts",
":",
"max_counts",
"[",
"ngram",
"]",
"=",
"max",
"(",
"max_counts",
".",
"get",
"(",
"ngram",
",",
"0",
")",
",",
"reference_counts",
"[",
"ngram",
"]",
")",
"clipped_counts",
"=",
"dict",
"(",
"(",
"ngram",
",",
"min",
"(",
"count",
",",
"max_counts",
"[",
"ngram",
"]",
")",
")",
"for",
"ngram",
",",
"count",
"in",
"counts",
".",
"items",
"(",
")",
")",
"return",
"sum",
"(",
"clipped_counts",
".",
"values",
"(",
")",
")",
"/",
"sum",
"(",
"counts",
".",
"values",
"(",
")",
")"
]
| calculate modified precision | [
"calculate",
"modified",
"precision"
]
| 41cd8ea9443a9c3b2dd71432f46f44a0f83093c7 | https://github.com/AtmaHou/atma/blob/41cd8ea9443a9c3b2dd71432f46f44a0f83093c7/bleu.py#L24-L40 | train |
trendels/rhino | rhino/mapper.py | template2regex | def template2regex(template, ranges=None):
"""Convert a URL template to a regular expression.
Converts a template, such as /{name}/ to a regular expression, e.g.
/(?P<name>[^/]+)/ and a list of the named parameters found in the template
(e.g. ['name']). Ranges are given after a colon in a template name to
indicate a restriction on the characters that can appear there. For
example, in the template:
"/user/{id:alpha}"
The `id` must contain only characters from a-zA-Z. Other characters there
will cause the pattern not to match.
The ranges parameter is an optional dictionary that maps range names to
regular expressions. New range names can be added, or old range names can
be redefined using this parameter.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2regex("{fred}")
('^(?P<fred>[^/]+)$', ['fred'])
"""
if len(template) and -1 < template.find('|') < len(template) - 1:
raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template))
if ranges is None:
ranges = DEFAULT_RANGES
anchor = True
state = S_PATH
if len(template) and template[-1] == '|':
anchor = False
params = []
bracketdepth = 0
result = ['^']
name = ""
pattern = "[^/]+"
rangename = None
for c in template_splitter.split(template):
if state == S_PATH:
if len(c) > 1:
result.append(re.escape(c))
elif c == '[':
result.append("(")
bracketdepth += 1
elif c == ']':
bracketdepth -= 1
if bracketdepth < 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
result.append(")?")
elif c == '{':
name = ""
state = S_TEMPLATE
elif c == '}':
raise InvalidTemplateError("Mismatched braces in %s" % template)
elif c == '|':
pass
else:
result.append(re.escape(c))
else:
if c == '}':
if rangename and rangename in ranges:
result.append("(?P<%s>%s)" % (name, ranges[rangename]))
else:
result.append("(?P<%s>%s)" % (name, pattern))
params.append(name)
state = S_PATH
rangename = None
else:
name = c
if name.find(":") > -1:
name, rangename = name.split(":")
if bracketdepth != 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
if state == S_TEMPLATE:
raise InvalidTemplateError("Mismatched braces in %s" % template)
if anchor:
result.append('$')
return "".join(result), params | python | def template2regex(template, ranges=None):
"""Convert a URL template to a regular expression.
Converts a template, such as /{name}/ to a regular expression, e.g.
/(?P<name>[^/]+)/ and a list of the named parameters found in the template
(e.g. ['name']). Ranges are given after a colon in a template name to
indicate a restriction on the characters that can appear there. For
example, in the template:
"/user/{id:alpha}"
The `id` must contain only characters from a-zA-Z. Other characters there
will cause the pattern not to match.
The ranges parameter is an optional dictionary that maps range names to
regular expressions. New range names can be added, or old range names can
be redefined using this parameter.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2regex("{fred}")
('^(?P<fred>[^/]+)$', ['fred'])
"""
if len(template) and -1 < template.find('|') < len(template) - 1:
raise InvalidTemplateError("'|' may only appear at the end, found at position %d in %s" % (template.find('|'), template))
if ranges is None:
ranges = DEFAULT_RANGES
anchor = True
state = S_PATH
if len(template) and template[-1] == '|':
anchor = False
params = []
bracketdepth = 0
result = ['^']
name = ""
pattern = "[^/]+"
rangename = None
for c in template_splitter.split(template):
if state == S_PATH:
if len(c) > 1:
result.append(re.escape(c))
elif c == '[':
result.append("(")
bracketdepth += 1
elif c == ']':
bracketdepth -= 1
if bracketdepth < 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
result.append(")?")
elif c == '{':
name = ""
state = S_TEMPLATE
elif c == '}':
raise InvalidTemplateError("Mismatched braces in %s" % template)
elif c == '|':
pass
else:
result.append(re.escape(c))
else:
if c == '}':
if rangename and rangename in ranges:
result.append("(?P<%s>%s)" % (name, ranges[rangename]))
else:
result.append("(?P<%s>%s)" % (name, pattern))
params.append(name)
state = S_PATH
rangename = None
else:
name = c
if name.find(":") > -1:
name, rangename = name.split(":")
if bracketdepth != 0:
raise InvalidTemplateError("Mismatched brackets in %s" % template)
if state == S_TEMPLATE:
raise InvalidTemplateError("Mismatched braces in %s" % template)
if anchor:
result.append('$')
return "".join(result), params | [
"def",
"template2regex",
"(",
"template",
",",
"ranges",
"=",
"None",
")",
":",
"if",
"len",
"(",
"template",
")",
"and",
"-",
"1",
"<",
"template",
".",
"find",
"(",
"'|'",
")",
"<",
"len",
"(",
"template",
")",
"-",
"1",
":",
"raise",
"InvalidTemplateError",
"(",
"\"'|' may only appear at the end, found at position %d in %s\"",
"%",
"(",
"template",
".",
"find",
"(",
"'|'",
")",
",",
"template",
")",
")",
"if",
"ranges",
"is",
"None",
":",
"ranges",
"=",
"DEFAULT_RANGES",
"anchor",
"=",
"True",
"state",
"=",
"S_PATH",
"if",
"len",
"(",
"template",
")",
"and",
"template",
"[",
"-",
"1",
"]",
"==",
"'|'",
":",
"anchor",
"=",
"False",
"params",
"=",
"[",
"]",
"bracketdepth",
"=",
"0",
"result",
"=",
"[",
"'^'",
"]",
"name",
"=",
"\"\"",
"pattern",
"=",
"\"[^/]+\"",
"rangename",
"=",
"None",
"for",
"c",
"in",
"template_splitter",
".",
"split",
"(",
"template",
")",
":",
"if",
"state",
"==",
"S_PATH",
":",
"if",
"len",
"(",
"c",
")",
">",
"1",
":",
"result",
".",
"append",
"(",
"re",
".",
"escape",
"(",
"c",
")",
")",
"elif",
"c",
"==",
"'['",
":",
"result",
".",
"append",
"(",
"\"(\"",
")",
"bracketdepth",
"+=",
"1",
"elif",
"c",
"==",
"']'",
":",
"bracketdepth",
"-=",
"1",
"if",
"bracketdepth",
"<",
"0",
":",
"raise",
"InvalidTemplateError",
"(",
"\"Mismatched brackets in %s\"",
"%",
"template",
")",
"result",
".",
"append",
"(",
"\")?\"",
")",
"elif",
"c",
"==",
"'{'",
":",
"name",
"=",
"\"\"",
"state",
"=",
"S_TEMPLATE",
"elif",
"c",
"==",
"'}'",
":",
"raise",
"InvalidTemplateError",
"(",
"\"Mismatched braces in %s\"",
"%",
"template",
")",
"elif",
"c",
"==",
"'|'",
":",
"pass",
"else",
":",
"result",
".",
"append",
"(",
"re",
".",
"escape",
"(",
"c",
")",
")",
"else",
":",
"if",
"c",
"==",
"'}'",
":",
"if",
"rangename",
"and",
"rangename",
"in",
"ranges",
":",
"result",
".",
"append",
"(",
"\"(?P<%s>%s)\"",
"%",
"(",
"name",
",",
"ranges",
"[",
"rangename",
"]",
")",
")",
"else",
":",
"result",
".",
"append",
"(",
"\"(?P<%s>%s)\"",
"%",
"(",
"name",
",",
"pattern",
")",
")",
"params",
".",
"append",
"(",
"name",
")",
"state",
"=",
"S_PATH",
"rangename",
"=",
"None",
"else",
":",
"name",
"=",
"c",
"if",
"name",
".",
"find",
"(",
"\":\"",
")",
">",
"-",
"1",
":",
"name",
",",
"rangename",
"=",
"name",
".",
"split",
"(",
"\":\"",
")",
"if",
"bracketdepth",
"!=",
"0",
":",
"raise",
"InvalidTemplateError",
"(",
"\"Mismatched brackets in %s\"",
"%",
"template",
")",
"if",
"state",
"==",
"S_TEMPLATE",
":",
"raise",
"InvalidTemplateError",
"(",
"\"Mismatched braces in %s\"",
"%",
"template",
")",
"if",
"anchor",
":",
"result",
".",
"append",
"(",
"'$'",
")",
"return",
"\"\"",
".",
"join",
"(",
"result",
")",
",",
"params"
]
| Convert a URL template to a regular expression.
Converts a template, such as /{name}/ to a regular expression, e.g.
/(?P<name>[^/]+)/ and a list of the named parameters found in the template
(e.g. ['name']). Ranges are given after a colon in a template name to
indicate a restriction on the characters that can appear there. For
example, in the template:
"/user/{id:alpha}"
The `id` must contain only characters from a-zA-Z. Other characters there
will cause the pattern not to match.
The ranges parameter is an optional dictionary that maps range names to
regular expressions. New range names can be added, or old range names can
be redefined using this parameter.
Example:
>>> import rhino.mapper
>>> rhino.mapper.template2regex("{fred}")
('^(?P<fred>[^/]+)$', ['fred']) | [
"Convert",
"a",
"URL",
"template",
"to",
"a",
"regular",
"expression",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L119-L199 | train |
trendels/rhino | rhino/mapper.py | Context.add_callback | def add_callback(self, phase, fn):
"""Adds a callback to the context.
The `phase` determines when and if the callback is executed, and which
positional arguments are passed in:
'enter'
: Called from `rhino.Resource`, after a handler for the current
request has been resolved, but before the handler is called.
Arguments: request
'leave'
: Called from `rhino.Resource`, after the handler has returned
successfully.
Arguments: request, response
'finalize'
: Called from `Mapper`, before the WSGI response is finalized.
Arguments: request, response
'teardown'
: Called from `Mapper`, before control is passed back to the
WSGI layer.
Arguments: -
'close'
: Called when the WSGI server calls `close()` on the response
iterator.
Arguments: -
'teardown' callbacks are guaranteed to be called at the end of every
request, and are suitable for cleanup tasks like closing database
handles, etc. If a teardown callback raises an exception, it is
logged to the server log but does not cause other teardown callbacks
to be skipped.
'enter', 'leave' and 'finalize' callbacks are only called if no
exception occured before they are reached, including exceptions raised
in other callbacks.
Whether or not 'close' callbacks are called depends on whether
a WSGI response could be generated successfully, and if the WSGI
server calls '.close()' on the returned iterator, as required by the
spec. If that happens, all 'close' callbacks are called regardless
of exceptions, like 'teardown' callbacks.
"""
try:
self.__callbacks[phase].append(fn)
except KeyError:
raise KeyError("Invalid callback phase '%s'. Must be one of %s" % (phase, _callback_phases)) | python | def add_callback(self, phase, fn):
"""Adds a callback to the context.
The `phase` determines when and if the callback is executed, and which
positional arguments are passed in:
'enter'
: Called from `rhino.Resource`, after a handler for the current
request has been resolved, but before the handler is called.
Arguments: request
'leave'
: Called from `rhino.Resource`, after the handler has returned
successfully.
Arguments: request, response
'finalize'
: Called from `Mapper`, before the WSGI response is finalized.
Arguments: request, response
'teardown'
: Called from `Mapper`, before control is passed back to the
WSGI layer.
Arguments: -
'close'
: Called when the WSGI server calls `close()` on the response
iterator.
Arguments: -
'teardown' callbacks are guaranteed to be called at the end of every
request, and are suitable for cleanup tasks like closing database
handles, etc. If a teardown callback raises an exception, it is
logged to the server log but does not cause other teardown callbacks
to be skipped.
'enter', 'leave' and 'finalize' callbacks are only called if no
exception occured before they are reached, including exceptions raised
in other callbacks.
Whether or not 'close' callbacks are called depends on whether
a WSGI response could be generated successfully, and if the WSGI
server calls '.close()' on the returned iterator, as required by the
spec. If that happens, all 'close' callbacks are called regardless
of exceptions, like 'teardown' callbacks.
"""
try:
self.__callbacks[phase].append(fn)
except KeyError:
raise KeyError("Invalid callback phase '%s'. Must be one of %s" % (phase, _callback_phases)) | [
"def",
"add_callback",
"(",
"self",
",",
"phase",
",",
"fn",
")",
":",
"try",
":",
"self",
".",
"__callbacks",
"[",
"phase",
"]",
".",
"append",
"(",
"fn",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Invalid callback phase '%s'. Must be one of %s\"",
"%",
"(",
"phase",
",",
"_callback_phases",
")",
")"
]
| Adds a callback to the context.
The `phase` determines when and if the callback is executed, and which
positional arguments are passed in:
'enter'
: Called from `rhino.Resource`, after a handler for the current
request has been resolved, but before the handler is called.
Arguments: request
'leave'
: Called from `rhino.Resource`, after the handler has returned
successfully.
Arguments: request, response
'finalize'
: Called from `Mapper`, before the WSGI response is finalized.
Arguments: request, response
'teardown'
: Called from `Mapper`, before control is passed back to the
WSGI layer.
Arguments: -
'close'
: Called when the WSGI server calls `close()` on the response
iterator.
Arguments: -
'teardown' callbacks are guaranteed to be called at the end of every
request, and are suitable for cleanup tasks like closing database
handles, etc. If a teardown callback raises an exception, it is
logged to the server log but does not cause other teardown callbacks
to be skipped.
'enter', 'leave' and 'finalize' callbacks are only called if no
exception occured before they are reached, including exceptions raised
in other callbacks.
Whether or not 'close' callbacks are called depends on whether
a WSGI response could be generated successfully, and if the WSGI
server calls '.close()' on the returned iterator, as required by the
spec. If that happens, all 'close' callbacks are called regardless
of exceptions, like 'teardown' callbacks. | [
"Adds",
"a",
"callback",
"to",
"the",
"context",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L336-L390 | train |
trendels/rhino | rhino/mapper.py | Context.add_property | def add_property(self, name, fn, cached=True):
"""Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level.
"""
if name in self.__properties:
raise KeyError("Trying to add a property '%s' that already exists on this %s object." % (name, self.__class__.__name__))
self.__properties[name] = (fn, cached) | python | def add_property(self, name, fn, cached=True):
"""Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level.
"""
if name in self.__properties:
raise KeyError("Trying to add a property '%s' that already exists on this %s object." % (name, self.__class__.__name__))
self.__properties[name] = (fn, cached) | [
"def",
"add_property",
"(",
"self",
",",
"name",
",",
"fn",
",",
"cached",
"=",
"True",
")",
":",
"if",
"name",
"in",
"self",
".",
"__properties",
":",
"raise",
"KeyError",
"(",
"\"Trying to add a property '%s' that already exists on this %s object.\"",
"%",
"(",
"name",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"__properties",
"[",
"name",
"]",
"=",
"(",
"fn",
",",
"cached",
")"
]
| Adds a property to the Context.
See `Mapper.add_ctx_property`, which uses this method to install
the properties added on the Mapper level. | [
"Adds",
"a",
"property",
"to",
"the",
"Context",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L405-L413 | train |
trendels/rhino | rhino/mapper.py | Route.path | def path(self, args, kw):
"""Builds the URL path fragment for this route."""
params = self._pop_params(args, kw)
if args or kw:
raise InvalidArgumentError("Extra parameters (%s, %s) when building path for %s" % (args, kw, self.template))
return self.build_url(**params) | python | def path(self, args, kw):
"""Builds the URL path fragment for this route."""
params = self._pop_params(args, kw)
if args or kw:
raise InvalidArgumentError("Extra parameters (%s, %s) when building path for %s" % (args, kw, self.template))
return self.build_url(**params) | [
"def",
"path",
"(",
"self",
",",
"args",
",",
"kw",
")",
":",
"params",
"=",
"self",
".",
"_pop_params",
"(",
"args",
",",
"kw",
")",
"if",
"args",
"or",
"kw",
":",
"raise",
"InvalidArgumentError",
"(",
"\"Extra parameters (%s, %s) when building path for %s\"",
"%",
"(",
"args",
",",
"kw",
",",
"self",
".",
"template",
")",
")",
"return",
"self",
".",
"build_url",
"(",
"*",
"*",
"params",
")"
]
| Builds the URL path fragment for this route. | [
"Builds",
"the",
"URL",
"path",
"fragment",
"for",
"this",
"route",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L477-L482 | train |
trendels/rhino | rhino/mapper.py | Mapper.add | def add(self, template, resource, name=None):
"""Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object.
"""
# Special case for standalone handler functions
if hasattr(resource, '_rhino_meta'):
route = Route(
template, Resource(resource), name=name, ranges=self.ranges)
else:
route = Route(
template, resource, name=name, ranges=self.ranges)
obj_id = id(resource)
if obj_id not in self._lookup:
# It's ok to have multiple routes for the same object id, the
# lookup will return the first one.
self._lookup[obj_id] = route
if name is not None:
if name in self.named_routes:
raise InvalidArgumentError("A route named '%s' already exists in this %s object."
% (name, self.__class__.__name__))
self.named_routes[name] = route
self.routes.append(route) | python | def add(self, template, resource, name=None):
"""Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object.
"""
# Special case for standalone handler functions
if hasattr(resource, '_rhino_meta'):
route = Route(
template, Resource(resource), name=name, ranges=self.ranges)
else:
route = Route(
template, resource, name=name, ranges=self.ranges)
obj_id = id(resource)
if obj_id not in self._lookup:
# It's ok to have multiple routes for the same object id, the
# lookup will return the first one.
self._lookup[obj_id] = route
if name is not None:
if name in self.named_routes:
raise InvalidArgumentError("A route named '%s' already exists in this %s object."
% (name, self.__class__.__name__))
self.named_routes[name] = route
self.routes.append(route) | [
"def",
"add",
"(",
"self",
",",
"template",
",",
"resource",
",",
"name",
"=",
"None",
")",
":",
"# Special case for standalone handler functions",
"if",
"hasattr",
"(",
"resource",
",",
"'_rhino_meta'",
")",
":",
"route",
"=",
"Route",
"(",
"template",
",",
"Resource",
"(",
"resource",
")",
",",
"name",
"=",
"name",
",",
"ranges",
"=",
"self",
".",
"ranges",
")",
"else",
":",
"route",
"=",
"Route",
"(",
"template",
",",
"resource",
",",
"name",
"=",
"name",
",",
"ranges",
"=",
"self",
".",
"ranges",
")",
"obj_id",
"=",
"id",
"(",
"resource",
")",
"if",
"obj_id",
"not",
"in",
"self",
".",
"_lookup",
":",
"# It's ok to have multiple routes for the same object id, the",
"# lookup will return the first one.",
"self",
".",
"_lookup",
"[",
"obj_id",
"]",
"=",
"route",
"if",
"name",
"is",
"not",
"None",
":",
"if",
"name",
"in",
"self",
".",
"named_routes",
":",
"raise",
"InvalidArgumentError",
"(",
"\"A route named '%s' already exists in this %s object.\"",
"%",
"(",
"name",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"named_routes",
"[",
"name",
"]",
"=",
"route",
"self",
".",
"routes",
".",
"append",
"(",
"route",
")"
]
| Add a route to a resource.
The optional `name` assigns a name to this route that can be used when
building URLs. The name must be unique within this Mapper object. | [
"Add",
"a",
"route",
"to",
"a",
"resource",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L553-L576 | train |
trendels/rhino | rhino/mapper.py | Mapper.add_ctx_property | def add_ctx_property(self, name, fn, cached=True):
"""Install a context property.
A context property is a factory function whos return value will be
available as a property named `name` on `Context` objects passing
through this mapper. The result will be cached unless `cached` is
False.
The factory function will be called without arguments, or with the
context object if it requests an argument named 'ctx'.
"""
if name in [item[0] for item in self._ctx_properties]:
raise InvalidArgumentError("A context property name '%s' already exists." % name)
self._ctx_properties.append([name, (fn, cached)]) | python | def add_ctx_property(self, name, fn, cached=True):
"""Install a context property.
A context property is a factory function whos return value will be
available as a property named `name` on `Context` objects passing
through this mapper. The result will be cached unless `cached` is
False.
The factory function will be called without arguments, or with the
context object if it requests an argument named 'ctx'.
"""
if name in [item[0] for item in self._ctx_properties]:
raise InvalidArgumentError("A context property name '%s' already exists." % name)
self._ctx_properties.append([name, (fn, cached)]) | [
"def",
"add_ctx_property",
"(",
"self",
",",
"name",
",",
"fn",
",",
"cached",
"=",
"True",
")",
":",
"if",
"name",
"in",
"[",
"item",
"[",
"0",
"]",
"for",
"item",
"in",
"self",
".",
"_ctx_properties",
"]",
":",
"raise",
"InvalidArgumentError",
"(",
"\"A context property name '%s' already exists.\"",
"%",
"name",
")",
"self",
".",
"_ctx_properties",
".",
"append",
"(",
"[",
"name",
",",
"(",
"fn",
",",
"cached",
")",
"]",
")"
]
| Install a context property.
A context property is a factory function whos return value will be
available as a property named `name` on `Context` objects passing
through this mapper. The result will be cached unless `cached` is
False.
The factory function will be called without arguments, or with the
context object if it requests an argument named 'ctx'. | [
"Install",
"a",
"context",
"property",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L614-L627 | train |
trendels/rhino | rhino/mapper.py | Mapper.path | def path(self, target, args, kw):
"""Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path.
"""
if type(target) in string_types:
if ':' in target:
# Build path a nested route name
prefix, rest = target.split(':', 1)
route = self.named_routes[prefix]
prefix_params = route._pop_params(args, kw)
prefix_path = route.path([], prefix_params)
next_mapper = route.resource
return prefix_path + next_mapper.path(rest, args, kw)
else:
# Build path for a named route
return self.named_routes[target].path(args, kw)
elif isinstance(target, Route):
# Build path for a route instance, used by build_url('.')
for route in self.routes:
if route is target:
return route.path(args, kw)
raise InvalidArgumentError("Route '%s' not found in this %s object." % (target, self.__class__.__name__))
else:
# Build path for resource by object id
target_id = id(target)
if target_id in self._lookup:
return self._lookup[target_id].path(args, kw)
raise InvalidArgumentError("No Route found for target '%s' in this %s object." % (target, self.__class__.__name__)) | python | def path(self, target, args, kw):
"""Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path.
"""
if type(target) in string_types:
if ':' in target:
# Build path a nested route name
prefix, rest = target.split(':', 1)
route = self.named_routes[prefix]
prefix_params = route._pop_params(args, kw)
prefix_path = route.path([], prefix_params)
next_mapper = route.resource
return prefix_path + next_mapper.path(rest, args, kw)
else:
# Build path for a named route
return self.named_routes[target].path(args, kw)
elif isinstance(target, Route):
# Build path for a route instance, used by build_url('.')
for route in self.routes:
if route is target:
return route.path(args, kw)
raise InvalidArgumentError("Route '%s' not found in this %s object." % (target, self.__class__.__name__))
else:
# Build path for resource by object id
target_id = id(target)
if target_id in self._lookup:
return self._lookup[target_id].path(args, kw)
raise InvalidArgumentError("No Route found for target '%s' in this %s object." % (target, self.__class__.__name__)) | [
"def",
"path",
"(",
"self",
",",
"target",
",",
"args",
",",
"kw",
")",
":",
"if",
"type",
"(",
"target",
")",
"in",
"string_types",
":",
"if",
"':'",
"in",
"target",
":",
"# Build path a nested route name",
"prefix",
",",
"rest",
"=",
"target",
".",
"split",
"(",
"':'",
",",
"1",
")",
"route",
"=",
"self",
".",
"named_routes",
"[",
"prefix",
"]",
"prefix_params",
"=",
"route",
".",
"_pop_params",
"(",
"args",
",",
"kw",
")",
"prefix_path",
"=",
"route",
".",
"path",
"(",
"[",
"]",
",",
"prefix_params",
")",
"next_mapper",
"=",
"route",
".",
"resource",
"return",
"prefix_path",
"+",
"next_mapper",
".",
"path",
"(",
"rest",
",",
"args",
",",
"kw",
")",
"else",
":",
"# Build path for a named route",
"return",
"self",
".",
"named_routes",
"[",
"target",
"]",
".",
"path",
"(",
"args",
",",
"kw",
")",
"elif",
"isinstance",
"(",
"target",
",",
"Route",
")",
":",
"# Build path for a route instance, used by build_url('.')",
"for",
"route",
"in",
"self",
".",
"routes",
":",
"if",
"route",
"is",
"target",
":",
"return",
"route",
".",
"path",
"(",
"args",
",",
"kw",
")",
"raise",
"InvalidArgumentError",
"(",
"\"Route '%s' not found in this %s object.\"",
"%",
"(",
"target",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"else",
":",
"# Build path for resource by object id",
"target_id",
"=",
"id",
"(",
"target",
")",
"if",
"target_id",
"in",
"self",
".",
"_lookup",
":",
"return",
"self",
".",
"_lookup",
"[",
"target_id",
"]",
".",
"path",
"(",
"args",
",",
"kw",
")",
"raise",
"InvalidArgumentError",
"(",
"\"No Route found for target '%s' in this %s object.\"",
"%",
"(",
"target",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")"
]
| Build a URL path fragment for a resource or route.
Possible values for `target`:
A string that does not start with a '.' and does not contain ':'.
: Looks up the route of the same name on this mapper and returns it's
path.
A string of the form 'a:b', 'a:b:c', etc.
: Follows the route to nested mappers by splitting off consecutive
segments. Returns the path of the route found by looking up the
final segment on the last mapper.
A `Route` object
: Returns the path for the route.
A resource that was added previously
: Looks up the first route that points to this resource and
returns its path. | [
"Build",
"a",
"URL",
"path",
"fragment",
"for",
"a",
"resource",
"or",
"route",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L629-L673 | train |
trendels/rhino | rhino/mapper.py | Mapper.wsgi | def wsgi(self, environ, start_response):
"""Implements the mapper's WSGI interface."""
request = Request(environ)
ctx = Context(request)
try:
try:
response = self(request, ctx)
ctx._run_callbacks('finalize', (request, response))
response = response.conditional_to(request)
except HTTPException as e:
response = e.response
except Exception:
self.handle_error(request, ctx)
response = InternalServerError().response
response.add_callback(lambda: ctx._run_callbacks('close'))
return response(environ, start_response)
finally:
ctx._run_callbacks('teardown', log_errors=True) | python | def wsgi(self, environ, start_response):
"""Implements the mapper's WSGI interface."""
request = Request(environ)
ctx = Context(request)
try:
try:
response = self(request, ctx)
ctx._run_callbacks('finalize', (request, response))
response = response.conditional_to(request)
except HTTPException as e:
response = e.response
except Exception:
self.handle_error(request, ctx)
response = InternalServerError().response
response.add_callback(lambda: ctx._run_callbacks('close'))
return response(environ, start_response)
finally:
ctx._run_callbacks('teardown', log_errors=True) | [
"def",
"wsgi",
"(",
"self",
",",
"environ",
",",
"start_response",
")",
":",
"request",
"=",
"Request",
"(",
"environ",
")",
"ctx",
"=",
"Context",
"(",
"request",
")",
"try",
":",
"try",
":",
"response",
"=",
"self",
"(",
"request",
",",
"ctx",
")",
"ctx",
".",
"_run_callbacks",
"(",
"'finalize'",
",",
"(",
"request",
",",
"response",
")",
")",
"response",
"=",
"response",
".",
"conditional_to",
"(",
"request",
")",
"except",
"HTTPException",
"as",
"e",
":",
"response",
"=",
"e",
".",
"response",
"except",
"Exception",
":",
"self",
".",
"handle_error",
"(",
"request",
",",
"ctx",
")",
"response",
"=",
"InternalServerError",
"(",
")",
".",
"response",
"response",
".",
"add_callback",
"(",
"lambda",
":",
"ctx",
".",
"_run_callbacks",
"(",
"'close'",
")",
")",
"return",
"response",
"(",
"environ",
",",
"start_response",
")",
"finally",
":",
"ctx",
".",
"_run_callbacks",
"(",
"'teardown'",
",",
"log_errors",
"=",
"True",
")"
]
| Implements the mapper's WSGI interface. | [
"Implements",
"the",
"mapper",
"s",
"WSGI",
"interface",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L675-L693 | train |
trendels/rhino | rhino/mapper.py | Mapper.start_server | def start_server(self, host='localhost', port=9000, app=None):
"""Start a `wsgiref.simple_server` based server to run this mapper."""
from wsgiref.simple_server import make_server
if app is None:
app = self.wsgi
server = make_server(host, port, app)
server_addr = "%s:%s" % (server.server_name, server.server_port)
print "Server listening at http://%s/" % server_addr
server.serve_forever() | python | def start_server(self, host='localhost', port=9000, app=None):
"""Start a `wsgiref.simple_server` based server to run this mapper."""
from wsgiref.simple_server import make_server
if app is None:
app = self.wsgi
server = make_server(host, port, app)
server_addr = "%s:%s" % (server.server_name, server.server_port)
print "Server listening at http://%s/" % server_addr
server.serve_forever() | [
"def",
"start_server",
"(",
"self",
",",
"host",
"=",
"'localhost'",
",",
"port",
"=",
"9000",
",",
"app",
"=",
"None",
")",
":",
"from",
"wsgiref",
".",
"simple_server",
"import",
"make_server",
"if",
"app",
"is",
"None",
":",
"app",
"=",
"self",
".",
"wsgi",
"server",
"=",
"make_server",
"(",
"host",
",",
"port",
",",
"app",
")",
"server_addr",
"=",
"\"%s:%s\"",
"%",
"(",
"server",
".",
"server_name",
",",
"server",
".",
"server_port",
")",
"print",
"\"Server listening at http://%s/\"",
"%",
"server_addr",
"server",
".",
"serve_forever",
"(",
")"
]
| Start a `wsgiref.simple_server` based server to run this mapper. | [
"Start",
"a",
"wsgiref",
".",
"simple_server",
"based",
"server",
"to",
"run",
"this",
"mapper",
"."
]
| f1f0ef21b6080a2bd130b38b5bef163074c94aed | https://github.com/trendels/rhino/blob/f1f0ef21b6080a2bd130b38b5bef163074c94aed/rhino/mapper.py#L726-L734 | train |
safarijv/sbo-sphinx | sbo_sphinx/apidoc.py | generate_docs | def generate_docs(app):
"""
Run sphinx-apidoc to generate Python API documentation for the project.
"""
config = app.config
config_dir = app.env.srcdir
source_root = os.path.join(config_dir, config.apidoc_source_root)
output_root = os.path.join(config_dir, config.apidoc_output_root)
execution_dir = os.path.join(config_dir, '..')
# Remove any files generated by earlier builds
cleanup(output_root)
command = ['sphinx-apidoc', '-f', '-o', output_root, source_root]
# Exclude anything else we were specifically asked to
for exclude in config.apidoc_exclude:
command.append(os.path.join(source_root, exclude))
process = Popen(command, cwd=execution_dir)
process.wait() | python | def generate_docs(app):
"""
Run sphinx-apidoc to generate Python API documentation for the project.
"""
config = app.config
config_dir = app.env.srcdir
source_root = os.path.join(config_dir, config.apidoc_source_root)
output_root = os.path.join(config_dir, config.apidoc_output_root)
execution_dir = os.path.join(config_dir, '..')
# Remove any files generated by earlier builds
cleanup(output_root)
command = ['sphinx-apidoc', '-f', '-o', output_root, source_root]
# Exclude anything else we were specifically asked to
for exclude in config.apidoc_exclude:
command.append(os.path.join(source_root, exclude))
process = Popen(command, cwd=execution_dir)
process.wait() | [
"def",
"generate_docs",
"(",
"app",
")",
":",
"config",
"=",
"app",
".",
"config",
"config_dir",
"=",
"app",
".",
"env",
".",
"srcdir",
"source_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"config",
".",
"apidoc_source_root",
")",
"output_root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"config",
".",
"apidoc_output_root",
")",
"execution_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config_dir",
",",
"'..'",
")",
"# Remove any files generated by earlier builds",
"cleanup",
"(",
"output_root",
")",
"command",
"=",
"[",
"'sphinx-apidoc'",
",",
"'-f'",
",",
"'-o'",
",",
"output_root",
",",
"source_root",
"]",
"# Exclude anything else we were specifically asked to",
"for",
"exclude",
"in",
"config",
".",
"apidoc_exclude",
":",
"command",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"source_root",
",",
"exclude",
")",
")",
"process",
"=",
"Popen",
"(",
"command",
",",
"cwd",
"=",
"execution_dir",
")",
"process",
".",
"wait",
"(",
")"
]
| Run sphinx-apidoc to generate Python API documentation for the project. | [
"Run",
"sphinx",
"-",
"apidoc",
"to",
"generate",
"Python",
"API",
"documentation",
"for",
"the",
"project",
"."
]
| 7a8efb7c49488131c90c19ef1a1563f595630a36 | https://github.com/safarijv/sbo-sphinx/blob/7a8efb7c49488131c90c19ef1a1563f595630a36/sbo_sphinx/apidoc.py#L33-L51 | train |
safarijv/sbo-sphinx | sbo_sphinx/apidoc.py | cleanup | def cleanup(output_root):
"""Remove any reST files which were generated by this extension"""
if os.path.exists(output_root):
if os.path.isdir(output_root):
rmtree(output_root)
else:
os.remove(output_root) | python | def cleanup(output_root):
"""Remove any reST files which were generated by this extension"""
if os.path.exists(output_root):
if os.path.isdir(output_root):
rmtree(output_root)
else:
os.remove(output_root) | [
"def",
"cleanup",
"(",
"output_root",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_root",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"output_root",
")",
":",
"rmtree",
"(",
"output_root",
")",
"else",
":",
"os",
".",
"remove",
"(",
"output_root",
")"
]
| Remove any reST files which were generated by this extension | [
"Remove",
"any",
"reST",
"files",
"which",
"were",
"generated",
"by",
"this",
"extension"
]
| 7a8efb7c49488131c90c19ef1a1563f595630a36 | https://github.com/safarijv/sbo-sphinx/blob/7a8efb7c49488131c90c19ef1a1563f595630a36/sbo_sphinx/apidoc.py#L54-L60 | train |
jreese/aioslack | aioslack/types.py | Auto.build | def build(cls: Type[T], data: Generic) -> T:
"""Build objects from dictionaries, recursively."""
fields = fields_dict(cls)
kwargs: Dict[str, Any] = {}
for key, value in data.items():
if key in fields:
if isinstance(value, Mapping):
t = fields[key].type
if issubclass(t, Auto):
value = t.build(value)
else:
value = Auto.generate(value, name=key.title())
kwargs[key] = value
else:
log.debug(f"got unknown attribute {key} for {cls.__name__}")
return cls(**kwargs) | python | def build(cls: Type[T], data: Generic) -> T:
"""Build objects from dictionaries, recursively."""
fields = fields_dict(cls)
kwargs: Dict[str, Any] = {}
for key, value in data.items():
if key in fields:
if isinstance(value, Mapping):
t = fields[key].type
if issubclass(t, Auto):
value = t.build(value)
else:
value = Auto.generate(value, name=key.title())
kwargs[key] = value
else:
log.debug(f"got unknown attribute {key} for {cls.__name__}")
return cls(**kwargs) | [
"def",
"build",
"(",
"cls",
":",
"Type",
"[",
"T",
"]",
",",
"data",
":",
"Generic",
")",
"->",
"T",
":",
"fields",
"=",
"fields_dict",
"(",
"cls",
")",
"kwargs",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"fields",
":",
"if",
"isinstance",
"(",
"value",
",",
"Mapping",
")",
":",
"t",
"=",
"fields",
"[",
"key",
"]",
".",
"type",
"if",
"issubclass",
"(",
"t",
",",
"Auto",
")",
":",
"value",
"=",
"t",
".",
"build",
"(",
"value",
")",
"else",
":",
"value",
"=",
"Auto",
".",
"generate",
"(",
"value",
",",
"name",
"=",
"key",
".",
"title",
"(",
")",
")",
"kwargs",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"log",
".",
"debug",
"(",
"f\"got unknown attribute {key} for {cls.__name__}\"",
")",
"return",
"cls",
"(",
"*",
"*",
"kwargs",
")"
]
| Build objects from dictionaries, recursively. | [
"Build",
"objects",
"from",
"dictionaries",
"recursively",
"."
]
| 5e705f557dde9e81903d84ffb2896ec0a074ad5c | https://github.com/jreese/aioslack/blob/5e705f557dde9e81903d84ffb2896ec0a074ad5c/aioslack/types.py#L36-L51 | train |
jreese/aioslack | aioslack/types.py | Auto.generate | def generate(
cls: Type[T], data: Generic, name: str = None, *, recursive: bool = True
) -> T:
"""Build dataclasses and objects from dictionaries, recursively."""
if name is None:
name = cls.__name__
kls = make_class(name, {k: ib(default=None) for k in data}, bases=(cls,))
data = {
k: (
cls.generate(v, k.title())
if recursive and isinstance(v, Mapping)
else v
)
for k, v in data.items()
}
return kls(**data) | python | def generate(
cls: Type[T], data: Generic, name: str = None, *, recursive: bool = True
) -> T:
"""Build dataclasses and objects from dictionaries, recursively."""
if name is None:
name = cls.__name__
kls = make_class(name, {k: ib(default=None) for k in data}, bases=(cls,))
data = {
k: (
cls.generate(v, k.title())
if recursive and isinstance(v, Mapping)
else v
)
for k, v in data.items()
}
return kls(**data) | [
"def",
"generate",
"(",
"cls",
":",
"Type",
"[",
"T",
"]",
",",
"data",
":",
"Generic",
",",
"name",
":",
"str",
"=",
"None",
",",
"*",
",",
"recursive",
":",
"bool",
"=",
"True",
")",
"->",
"T",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"cls",
".",
"__name__",
"kls",
"=",
"make_class",
"(",
"name",
",",
"{",
"k",
":",
"ib",
"(",
"default",
"=",
"None",
")",
"for",
"k",
"in",
"data",
"}",
",",
"bases",
"=",
"(",
"cls",
",",
")",
")",
"data",
"=",
"{",
"k",
":",
"(",
"cls",
".",
"generate",
"(",
"v",
",",
"k",
".",
"title",
"(",
")",
")",
"if",
"recursive",
"and",
"isinstance",
"(",
"v",
",",
"Mapping",
")",
"else",
"v",
")",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
"}",
"return",
"kls",
"(",
"*",
"*",
"data",
")"
]
| Build dataclasses and objects from dictionaries, recursively. | [
"Build",
"dataclasses",
"and",
"objects",
"from",
"dictionaries",
"recursively",
"."
]
| 5e705f557dde9e81903d84ffb2896ec0a074ad5c | https://github.com/jreese/aioslack/blob/5e705f557dde9e81903d84ffb2896ec0a074ad5c/aioslack/types.py#L54-L69 | train |
adaptive-learning/proso-apps | proso_models/models.py | emit_answer_event | def emit_answer_event(sender, instance, **kwargs):
"""
Save answer event to log file.
"""
if not issubclass(sender, Answer) or not kwargs['created']:
return
logger = get_events_logger()
logger.emit('answer', {
"user_id": instance.user_id,
"is_correct": instance.item_asked_id == instance.item_answered_id,
"context_id": [instance.context_id] if instance.context_id else [],
"item_id": instance.item_id,
"response_time_ms": instance.response_time,
"params": {
"session_id": instance.session_id,
"guess": instance.guess,
"practice_set_id": instance.practice_set_id,
"config_id": instance.config_id,
}}
) | python | def emit_answer_event(sender, instance, **kwargs):
"""
Save answer event to log file.
"""
if not issubclass(sender, Answer) or not kwargs['created']:
return
logger = get_events_logger()
logger.emit('answer', {
"user_id": instance.user_id,
"is_correct": instance.item_asked_id == instance.item_answered_id,
"context_id": [instance.context_id] if instance.context_id else [],
"item_id": instance.item_id,
"response_time_ms": instance.response_time,
"params": {
"session_id": instance.session_id,
"guess": instance.guess,
"practice_set_id": instance.practice_set_id,
"config_id": instance.config_id,
}}
) | [
"def",
"emit_answer_event",
"(",
"sender",
",",
"instance",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"issubclass",
"(",
"sender",
",",
"Answer",
")",
"or",
"not",
"kwargs",
"[",
"'created'",
"]",
":",
"return",
"logger",
"=",
"get_events_logger",
"(",
")",
"logger",
".",
"emit",
"(",
"'answer'",
",",
"{",
"\"user_id\"",
":",
"instance",
".",
"user_id",
",",
"\"is_correct\"",
":",
"instance",
".",
"item_asked_id",
"==",
"instance",
".",
"item_answered_id",
",",
"\"context_id\"",
":",
"[",
"instance",
".",
"context_id",
"]",
"if",
"instance",
".",
"context_id",
"else",
"[",
"]",
",",
"\"item_id\"",
":",
"instance",
".",
"item_id",
",",
"\"response_time_ms\"",
":",
"instance",
".",
"response_time",
",",
"\"params\"",
":",
"{",
"\"session_id\"",
":",
"instance",
".",
"session_id",
",",
"\"guess\"",
":",
"instance",
".",
"guess",
",",
"\"practice_set_id\"",
":",
"instance",
".",
"practice_set_id",
",",
"\"config_id\"",
":",
"instance",
".",
"config_id",
",",
"}",
"}",
")"
]
| Save answer event to log file. | [
"Save",
"answer",
"event",
"to",
"log",
"file",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L1217-L1236 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.get_all_available_leaves | def get_all_available_leaves(self, language=None, forbidden_item_ids=None):
"""
Get all available leaves.
"""
return self.get_all_leaves(language=language, forbidden_item_ids=forbidden_item_ids) | python | def get_all_available_leaves(self, language=None, forbidden_item_ids=None):
"""
Get all available leaves.
"""
return self.get_all_leaves(language=language, forbidden_item_ids=forbidden_item_ids) | [
"def",
"get_all_available_leaves",
"(",
"self",
",",
"language",
"=",
"None",
",",
"forbidden_item_ids",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_all_leaves",
"(",
"language",
"=",
"language",
",",
"forbidden_item_ids",
"=",
"forbidden_item_ids",
")"
]
| Get all available leaves. | [
"Get",
"all",
"available",
"leaves",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L330-L334 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.get_children_graph | def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key
"""
if forbidden_item_ids is None:
forbidden_item_ids = set()
def _children(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('children')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children')
return {
item.id: sorted([
_item.id for _item in item.children.all()
if _item.active and _item.id not in forbidden_item_ids
])
for item in items if item.id not in forbidden_item_ids
}
if item_ids is None:
return self._reachable_graph(None, _children, language=language)
else:
graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids)
return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids)) | python | def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key
"""
if forbidden_item_ids is None:
forbidden_item_ids = set()
def _children(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('children')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children')
return {
item.id: sorted([
_item.id for _item in item.children.all()
if _item.active and _item.id not in forbidden_item_ids
])
for item in items if item.id not in forbidden_item_ids
}
if item_ids is None:
return self._reachable_graph(None, _children, language=language)
else:
graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids)
return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids)) | [
"def",
"get_children_graph",
"(",
"self",
",",
"item_ids",
"=",
"None",
",",
"language",
"=",
"None",
",",
"forbidden_item_ids",
"=",
"None",
")",
":",
"if",
"forbidden_item_ids",
"is",
"None",
":",
"forbidden_item_ids",
"=",
"set",
"(",
")",
"def",
"_children",
"(",
"item_ids",
")",
":",
"if",
"item_ids",
"is",
"None",
":",
"items",
"=",
"Item",
".",
"objects",
".",
"filter",
"(",
"active",
"=",
"True",
")",
".",
"prefetch_related",
"(",
"'children'",
")",
"else",
":",
"item_ids",
"=",
"[",
"ii",
"for",
"iis",
"in",
"item_ids",
".",
"values",
"(",
")",
"for",
"ii",
"in",
"iis",
"]",
"items",
"=",
"Item",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"item_ids",
",",
"active",
"=",
"True",
")",
".",
"prefetch_related",
"(",
"'children'",
")",
"return",
"{",
"item",
".",
"id",
":",
"sorted",
"(",
"[",
"_item",
".",
"id",
"for",
"_item",
"in",
"item",
".",
"children",
".",
"all",
"(",
")",
"if",
"_item",
".",
"active",
"and",
"_item",
".",
"id",
"not",
"in",
"forbidden_item_ids",
"]",
")",
"for",
"item",
"in",
"items",
"if",
"item",
".",
"id",
"not",
"in",
"forbidden_item_ids",
"}",
"if",
"item_ids",
"is",
"None",
":",
"return",
"self",
".",
"_reachable_graph",
"(",
"None",
",",
"_children",
",",
"language",
"=",
"language",
")",
"else",
":",
"graph",
"=",
"self",
".",
"get_children_graph",
"(",
"None",
",",
"language",
",",
"forbidden_item_ids",
"=",
"forbidden_item_ids",
")",
"return",
"self",
".",
"_subset_graph",
"(",
"graph",
",",
"set",
"(",
"item_ids",
")",
"-",
"set",
"(",
"forbidden_item_ids",
")",
")"
]
| Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key | [
"Get",
"a",
"subgraph",
"of",
"items",
"reachable",
"from",
"the",
"given",
"set",
"of",
"items",
"through",
"the",
"child",
"relation",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L436-L471 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.get_parents_graph | def get_parents_graph(self, item_ids, language=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'parent' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (parent items), root items are
referenced by None key
"""
def _parents(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('parents')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents')
return {item.id: sorted([_item.id for _item in item.parents.all()]) for item in items}
return self._reachable_graph(item_ids, _parents, language=language)
if item_ids is None:
return self._reachable_graph(None, _parents, language=language)
else:
graph = self.get_parents_graph(None, language)
return self._subset_graph(graph, item_ids) | python | def get_parents_graph(self, item_ids, language=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'parent' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (parent items), root items are
referenced by None key
"""
def _parents(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('parents')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents')
return {item.id: sorted([_item.id for _item in item.parents.all()]) for item in items}
return self._reachable_graph(item_ids, _parents, language=language)
if item_ids is None:
return self._reachable_graph(None, _parents, language=language)
else:
graph = self.get_parents_graph(None, language)
return self._subset_graph(graph, item_ids) | [
"def",
"get_parents_graph",
"(",
"self",
",",
"item_ids",
",",
"language",
"=",
"None",
")",
":",
"def",
"_parents",
"(",
"item_ids",
")",
":",
"if",
"item_ids",
"is",
"None",
":",
"items",
"=",
"Item",
".",
"objects",
".",
"filter",
"(",
"active",
"=",
"True",
")",
".",
"prefetch_related",
"(",
"'parents'",
")",
"else",
":",
"item_ids",
"=",
"[",
"ii",
"for",
"iis",
"in",
"item_ids",
".",
"values",
"(",
")",
"for",
"ii",
"in",
"iis",
"]",
"items",
"=",
"Item",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"item_ids",
",",
"active",
"=",
"True",
")",
".",
"prefetch_related",
"(",
"'parents'",
")",
"return",
"{",
"item",
".",
"id",
":",
"sorted",
"(",
"[",
"_item",
".",
"id",
"for",
"_item",
"in",
"item",
".",
"parents",
".",
"all",
"(",
")",
"]",
")",
"for",
"item",
"in",
"items",
"}",
"return",
"self",
".",
"_reachable_graph",
"(",
"item_ids",
",",
"_parents",
",",
"language",
"=",
"language",
")",
"if",
"item_ids",
"is",
"None",
":",
"return",
"self",
".",
"_reachable_graph",
"(",
"None",
",",
"_parents",
",",
"language",
"=",
"language",
")",
"else",
":",
"graph",
"=",
"self",
".",
"get_parents_graph",
"(",
"None",
",",
"language",
")",
"return",
"self",
".",
"_subset_graph",
"(",
"graph",
",",
"item_ids",
")"
]
| Get a subgraph of items reachable from the given set of items through
the 'parent' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (parent items), root items are
referenced by None key | [
"Get",
"a",
"subgraph",
"of",
"items",
"reachable",
"from",
"the",
"given",
"set",
"of",
"items",
"through",
"the",
"parent",
"relation",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L478-L505 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.get_graph | def get_graph(self, item_ids, language=None):
"""
Get a subgraph of items reachable from the given set of items through
any relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (parent items), root items are
referenced by None key
"""
def _related(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('parents', 'children')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children')
return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items}
if item_ids is None:
return self._reachable_graph(None, _related, language=language)
else:
graph = self.get_graph(None, language)
return self._subset_graph(graph, item_ids) | python | def get_graph(self, item_ids, language=None):
"""
Get a subgraph of items reachable from the given set of items through
any relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (parent items), root items are
referenced by None key
"""
def _related(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('parents', 'children')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('parents', 'children')
return {item.id: sorted([_item.id for rel in [item.parents.all(), item.children.all()] for _item in rel]) for item in items}
if item_ids is None:
return self._reachable_graph(None, _related, language=language)
else:
graph = self.get_graph(None, language)
return self._subset_graph(graph, item_ids) | [
"def",
"get_graph",
"(",
"self",
",",
"item_ids",
",",
"language",
"=",
"None",
")",
":",
"def",
"_related",
"(",
"item_ids",
")",
":",
"if",
"item_ids",
"is",
"None",
":",
"items",
"=",
"Item",
".",
"objects",
".",
"filter",
"(",
"active",
"=",
"True",
")",
".",
"prefetch_related",
"(",
"'parents'",
",",
"'children'",
")",
"else",
":",
"item_ids",
"=",
"[",
"ii",
"for",
"iis",
"in",
"item_ids",
".",
"values",
"(",
")",
"for",
"ii",
"in",
"iis",
"]",
"items",
"=",
"Item",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"item_ids",
",",
"active",
"=",
"True",
")",
".",
"prefetch_related",
"(",
"'parents'",
",",
"'children'",
")",
"return",
"{",
"item",
".",
"id",
":",
"sorted",
"(",
"[",
"_item",
".",
"id",
"for",
"rel",
"in",
"[",
"item",
".",
"parents",
".",
"all",
"(",
")",
",",
"item",
".",
"children",
".",
"all",
"(",
")",
"]",
"for",
"_item",
"in",
"rel",
"]",
")",
"for",
"item",
"in",
"items",
"}",
"if",
"item_ids",
"is",
"None",
":",
"return",
"self",
".",
"_reachable_graph",
"(",
"None",
",",
"_related",
",",
"language",
"=",
"language",
")",
"else",
":",
"graph",
"=",
"self",
".",
"get_graph",
"(",
"None",
",",
"language",
")",
"return",
"self",
".",
"_subset_graph",
"(",
"graph",
",",
"item_ids",
")"
]
| Get a subgraph of items reachable from the given set of items through
any relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (parent items), root items are
referenced by None key | [
"Get",
"a",
"subgraph",
"of",
"items",
"reachable",
"from",
"the",
"given",
"set",
"of",
"items",
"through",
"any",
"relation",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L507-L532 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.translate_item_ids | def translate_item_ids(self, item_ids, language, is_nested=None):
"""
Translate a list of item ids to JSON objects which reference them.
Args:
item_ids (list[int]): item ids
language (str): language used for further filtering (some objects
for different languages share the same item)
is_nested (function): mapping from item ids to booleans, where the
boolean value indicates whether the item is nested
Returns:
dict: item id -> JSON object
"""
if is_nested is None:
def is_nested_fun(x):
return True
elif isinstance(is_nested, bool):
def is_nested_fun(x):
return is_nested
else:
is_nested_fun = is_nested
all_item_type_ids = ItemType.objects.get_all_item_type_ids()
groupped = proso.list.group_by(item_ids, by=lambda item_id: all_item_type_ids[item_id])
result = {}
for item_type_id, items in groupped.items():
with timeit('translating item type {}'.format(item_type_id)):
item_type = ItemType.objects.get_all_types()[item_type_id]
model = ItemType.objects.get_model(item_type_id)
kwargs = {'{}__in'.format(item_type['foreign_key']): items}
if 'language' in item_type:
kwargs[item_type['language']] = language
if any([not is_nested_fun(item_id) for item_id in items]) and hasattr(model.objects, 'prepare_related'):
objs = model.objects.prepare_related()
elif hasattr(model.objects, 'prepare'):
objs = model.objects.prepare()
else:
objs = model.objects
for obj in objs.filter(**kwargs):
item_id = getattr(obj, item_type['foreign_key'])
result[item_id] = obj.to_json(nested=is_nested_fun(item_id))
return result | python | def translate_item_ids(self, item_ids, language, is_nested=None):
"""
Translate a list of item ids to JSON objects which reference them.
Args:
item_ids (list[int]): item ids
language (str): language used for further filtering (some objects
for different languages share the same item)
is_nested (function): mapping from item ids to booleans, where the
boolean value indicates whether the item is nested
Returns:
dict: item id -> JSON object
"""
if is_nested is None:
def is_nested_fun(x):
return True
elif isinstance(is_nested, bool):
def is_nested_fun(x):
return is_nested
else:
is_nested_fun = is_nested
all_item_type_ids = ItemType.objects.get_all_item_type_ids()
groupped = proso.list.group_by(item_ids, by=lambda item_id: all_item_type_ids[item_id])
result = {}
for item_type_id, items in groupped.items():
with timeit('translating item type {}'.format(item_type_id)):
item_type = ItemType.objects.get_all_types()[item_type_id]
model = ItemType.objects.get_model(item_type_id)
kwargs = {'{}__in'.format(item_type['foreign_key']): items}
if 'language' in item_type:
kwargs[item_type['language']] = language
if any([not is_nested_fun(item_id) for item_id in items]) and hasattr(model.objects, 'prepare_related'):
objs = model.objects.prepare_related()
elif hasattr(model.objects, 'prepare'):
objs = model.objects.prepare()
else:
objs = model.objects
for obj in objs.filter(**kwargs):
item_id = getattr(obj, item_type['foreign_key'])
result[item_id] = obj.to_json(nested=is_nested_fun(item_id))
return result | [
"def",
"translate_item_ids",
"(",
"self",
",",
"item_ids",
",",
"language",
",",
"is_nested",
"=",
"None",
")",
":",
"if",
"is_nested",
"is",
"None",
":",
"def",
"is_nested_fun",
"(",
"x",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"is_nested",
",",
"bool",
")",
":",
"def",
"is_nested_fun",
"(",
"x",
")",
":",
"return",
"is_nested",
"else",
":",
"is_nested_fun",
"=",
"is_nested",
"all_item_type_ids",
"=",
"ItemType",
".",
"objects",
".",
"get_all_item_type_ids",
"(",
")",
"groupped",
"=",
"proso",
".",
"list",
".",
"group_by",
"(",
"item_ids",
",",
"by",
"=",
"lambda",
"item_id",
":",
"all_item_type_ids",
"[",
"item_id",
"]",
")",
"result",
"=",
"{",
"}",
"for",
"item_type_id",
",",
"items",
"in",
"groupped",
".",
"items",
"(",
")",
":",
"with",
"timeit",
"(",
"'translating item type {}'",
".",
"format",
"(",
"item_type_id",
")",
")",
":",
"item_type",
"=",
"ItemType",
".",
"objects",
".",
"get_all_types",
"(",
")",
"[",
"item_type_id",
"]",
"model",
"=",
"ItemType",
".",
"objects",
".",
"get_model",
"(",
"item_type_id",
")",
"kwargs",
"=",
"{",
"'{}__in'",
".",
"format",
"(",
"item_type",
"[",
"'foreign_key'",
"]",
")",
":",
"items",
"}",
"if",
"'language'",
"in",
"item_type",
":",
"kwargs",
"[",
"item_type",
"[",
"'language'",
"]",
"]",
"=",
"language",
"if",
"any",
"(",
"[",
"not",
"is_nested_fun",
"(",
"item_id",
")",
"for",
"item_id",
"in",
"items",
"]",
")",
"and",
"hasattr",
"(",
"model",
".",
"objects",
",",
"'prepare_related'",
")",
":",
"objs",
"=",
"model",
".",
"objects",
".",
"prepare_related",
"(",
")",
"elif",
"hasattr",
"(",
"model",
".",
"objects",
",",
"'prepare'",
")",
":",
"objs",
"=",
"model",
".",
"objects",
".",
"prepare",
"(",
")",
"else",
":",
"objs",
"=",
"model",
".",
"objects",
"for",
"obj",
"in",
"objs",
".",
"filter",
"(",
"*",
"*",
"kwargs",
")",
":",
"item_id",
"=",
"getattr",
"(",
"obj",
",",
"item_type",
"[",
"'foreign_key'",
"]",
")",
"result",
"[",
"item_id",
"]",
"=",
"obj",
".",
"to_json",
"(",
"nested",
"=",
"is_nested_fun",
"(",
"item_id",
")",
")",
"return",
"result"
]
| Translate a list of item ids to JSON objects which reference them.
Args:
item_ids (list[int]): item ids
language (str): language used for further filtering (some objects
for different languages share the same item)
is_nested (function): mapping from item ids to booleans, where the
boolean value indicates whether the item is nested
Returns:
dict: item id -> JSON object | [
"Translate",
"a",
"list",
"of",
"item",
"ids",
"to",
"JSON",
"objects",
"which",
"reference",
"them",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L606-L647 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.get_leaves | def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get mapping of items to their reachable leaves. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (reachable leaves)
"""
forbidden_item_ids = set() if forbidden_item_ids is None else set(forbidden_item_ids)
children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)
counts = self.get_children_counts(active=None)
if item_ids is None:
# not leaves
item_ids = set(children.keys())
def _get_leaves(item_id):
leaves = set()
def __search(item_ids):
result = set(flatten([children.get(item_id, []) for item_id in item_ids]))
new_leaves = {item_id for item_id in result if item_id not in children.keys()}
leaves.update(new_leaves)
return result - new_leaves
fixed_point(
is_zero=lambda to_visit: len(to_visit) == 0,
minus=lambda to_visit, visited: to_visit - visited,
plus=lambda visited_x, visited_y: visited_x | visited_y,
f=__search,
x={item_id}
)
leaves = {leaf for leaf in leaves if counts[leaf] == 0}
if len(leaves) > 0:
return leaves
if counts[item_id] == 0 and item_id not in forbidden_item_ids:
return {item_id}
return set()
return {item_id: _get_leaves(item_id) for item_id in item_ids} | python | def get_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get mapping of items to their reachable leaves. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (reachable leaves)
"""
forbidden_item_ids = set() if forbidden_item_ids is None else set(forbidden_item_ids)
children = self.get_children_graph(item_ids, language=language, forbidden_item_ids=forbidden_item_ids)
counts = self.get_children_counts(active=None)
if item_ids is None:
# not leaves
item_ids = set(children.keys())
def _get_leaves(item_id):
leaves = set()
def __search(item_ids):
result = set(flatten([children.get(item_id, []) for item_id in item_ids]))
new_leaves = {item_id for item_id in result if item_id not in children.keys()}
leaves.update(new_leaves)
return result - new_leaves
fixed_point(
is_zero=lambda to_visit: len(to_visit) == 0,
minus=lambda to_visit, visited: to_visit - visited,
plus=lambda visited_x, visited_y: visited_x | visited_y,
f=__search,
x={item_id}
)
leaves = {leaf for leaf in leaves if counts[leaf] == 0}
if len(leaves) > 0:
return leaves
if counts[item_id] == 0 and item_id not in forbidden_item_ids:
return {item_id}
return set()
return {item_id: _get_leaves(item_id) for item_id in item_ids} | [
"def",
"get_leaves",
"(",
"self",
",",
"item_ids",
"=",
"None",
",",
"language",
"=",
"None",
",",
"forbidden_item_ids",
"=",
"None",
")",
":",
"forbidden_item_ids",
"=",
"set",
"(",
")",
"if",
"forbidden_item_ids",
"is",
"None",
"else",
"set",
"(",
"forbidden_item_ids",
")",
"children",
"=",
"self",
".",
"get_children_graph",
"(",
"item_ids",
",",
"language",
"=",
"language",
",",
"forbidden_item_ids",
"=",
"forbidden_item_ids",
")",
"counts",
"=",
"self",
".",
"get_children_counts",
"(",
"active",
"=",
"None",
")",
"if",
"item_ids",
"is",
"None",
":",
"# not leaves",
"item_ids",
"=",
"set",
"(",
"children",
".",
"keys",
"(",
")",
")",
"def",
"_get_leaves",
"(",
"item_id",
")",
":",
"leaves",
"=",
"set",
"(",
")",
"def",
"__search",
"(",
"item_ids",
")",
":",
"result",
"=",
"set",
"(",
"flatten",
"(",
"[",
"children",
".",
"get",
"(",
"item_id",
",",
"[",
"]",
")",
"for",
"item_id",
"in",
"item_ids",
"]",
")",
")",
"new_leaves",
"=",
"{",
"item_id",
"for",
"item_id",
"in",
"result",
"if",
"item_id",
"not",
"in",
"children",
".",
"keys",
"(",
")",
"}",
"leaves",
".",
"update",
"(",
"new_leaves",
")",
"return",
"result",
"-",
"new_leaves",
"fixed_point",
"(",
"is_zero",
"=",
"lambda",
"to_visit",
":",
"len",
"(",
"to_visit",
")",
"==",
"0",
",",
"minus",
"=",
"lambda",
"to_visit",
",",
"visited",
":",
"to_visit",
"-",
"visited",
",",
"plus",
"=",
"lambda",
"visited_x",
",",
"visited_y",
":",
"visited_x",
"|",
"visited_y",
",",
"f",
"=",
"__search",
",",
"x",
"=",
"{",
"item_id",
"}",
")",
"leaves",
"=",
"{",
"leaf",
"for",
"leaf",
"in",
"leaves",
"if",
"counts",
"[",
"leaf",
"]",
"==",
"0",
"}",
"if",
"len",
"(",
"leaves",
")",
">",
"0",
":",
"return",
"leaves",
"if",
"counts",
"[",
"item_id",
"]",
"==",
"0",
"and",
"item_id",
"not",
"in",
"forbidden_item_ids",
":",
"return",
"{",
"item_id",
"}",
"return",
"set",
"(",
")",
"return",
"{",
"item_id",
":",
"_get_leaves",
"(",
"item_id",
")",
"for",
"item_id",
"in",
"item_ids",
"}"
]
| Get mapping of items to their reachable leaves. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (reachable leaves) | [
"Get",
"mapping",
"of",
"items",
"to",
"their",
"reachable",
"leaves",
".",
"Leaves",
"having",
"inactive",
"relations",
"to",
"other",
"items",
"are",
"omitted",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L651-L694 | train |
adaptive-learning/proso-apps | proso_models/models.py | ItemManager.get_all_leaves | def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values()))) | python | def get_all_leaves(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items
"""
return sorted(set(flatten(self.get_leaves(item_ids, language=language, forbidden_item_ids=forbidden_item_ids).values()))) | [
"def",
"get_all_leaves",
"(",
"self",
",",
"item_ids",
"=",
"None",
",",
"language",
"=",
"None",
",",
"forbidden_item_ids",
"=",
"None",
")",
":",
"return",
"sorted",
"(",
"set",
"(",
"flatten",
"(",
"self",
".",
"get_leaves",
"(",
"item_ids",
",",
"language",
"=",
"language",
",",
"forbidden_item_ids",
"=",
"forbidden_item_ids",
")",
".",
"values",
"(",
")",
")",
")",
")"
]
| Get all leaves reachable from the given set of items. Leaves having
inactive relations to other items are omitted.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
set: leaf items which are reachable from the given set of items | [
"Get",
"all",
"leaves",
"reachable",
"from",
"the",
"given",
"set",
"of",
"items",
".",
"Leaves",
"having",
"inactive",
"relations",
"to",
"other",
"items",
"are",
"omitted",
"."
]
| 8278c72e498d6ef8d392cc47b48473f4ec037142 | https://github.com/adaptive-learning/proso-apps/blob/8278c72e498d6ef8d392cc47b48473f4ec037142/proso_models/models.py#L697-L710 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.