Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def set_name_lists(ethnicity=None):
"""Set three globally available lists of names."""
if not ethnicity: ethnicity = random.choice(get_ethnicities())
print("Loading names from: " + ethnicity)
filename = names_dir + ethnicity + '.json'
try:
with open(filename, 'r') as injson:
data = json.load(injson)
except:
return 'Unable to read from file: ' + filename
else:
names = [ tuple(name.split(',')) for name in data ]
random.shuffle(names)
global female_names
female_names = [name for name,gender,*desc in names if gender == 'girl']
global male_names
male_names = [name for name,gender,*desc in names if gender == 'boy']
global nb_names
nb_names = [name for name,gender,*desc in names if gender == 'boygirl'] |
def set_chromosomes(self, chromosomes=None):
"""This model uses the XY sex-determination system. Sex != gender.
Assign either XX or XY randomly with a 50/50 chance of each, unless
<chromosomes> are passed as an argument.
"""
if chromosomes and chromosomes in valid_chromosomes:
self.chromosomes = chromosomes
else:
self.chromosomes = random.choice([XX, XY]) |
def set_gender(self, gender=None):
"""This model recognizes that sex chromosomes don't always line up with
gender. Assign M, F, or NB according to the probabilities in p_gender.
"""
if gender and gender in genders:
self.gender = gender
else:
if not self.chromosomes: self.set_chromosomes()
self.gender = npchoice(genders, 1, p=p_gender[self.chromosomes])[0] |
def set_inherited_traits(self, egg_donor, sperm_donor):
"""Accept either strings or Gods as inputs."""
if type(egg_donor) == str:
self.reproduce_asexually(egg_donor, sperm_donor)
else:
self.reproduce_sexually(egg_donor, sperm_donor) |
def reproduce_asexually(self, egg_word, sperm_word):
"""Produce two gametes, an egg and a sperm, from the input strings.
Combine them to produce a genome a la sexual reproduction.
"""
egg = self.generate_gamete(egg_word)
sperm = self.generate_gamete(sperm_word)
self.genome = list(set(egg + sperm)) # Eliminate duplicates
self.generation = 1
self.divinity = god |
def reproduce_sexually(self, egg_donor, sperm_donor):
"""Produce two gametes, an egg and a sperm, from input Gods. Combine
them to produce a genome a la sexual reproduction. Assign divinity
according to probabilities in p_divinity. The more divine the parents,
the more divine their offspring.
"""
egg_word = random.choice(egg_donor.genome)
egg = self.generate_gamete(egg_word)
sperm_word = random.choice(sperm_donor.genome)
sperm = self.generate_gamete(sperm_word)
self.genome = list(set(egg + sperm)) # Eliminate duplicates
self.parents = [egg_donor.name, sperm_donor.name]
self.generation = max(egg_donor.generation, sperm_donor.generation) + 1
sum_ = egg_donor.divinity + sperm_donor.divinity
self.divinity = int(npchoice(divinities, 1, p=p_divinity[sum_])[0]) |
def set_name(self):
"""Pick a random name from the lists loaded with the model. For Gods that
identify as neither M nor F, the model attempts to retrieve an androgynous
name. Note: not all of the scraped name lists contain androgynous names.
"""
if not self.gender: self.set_gender()
name = ''
if self.gender == female:
name = names.female_names.pop()
elif self.gender == male:
name = names.male_names.pop()
else:
try:
name = names.nb_names.pop()
except:
# No androgynous names available
name = names.male_names.pop()
self.name = name |
def set_epithet(self):
"""Divine an appropriate epithet for this God. (See what I did there?)"""
if self.divinity == human:
obsession = random.choice(self.genome)
if self.gender == female:
self.epithet = 'ordinary woman'
elif self.gender == male:
self.epithet = 'ordinary man'
else:
self.epithet = 'ordinary human being'
self.epithet += ' who loves ' + obsession
return # Return early. The rest of the function deals with gods.
if self.gender == female:
title = 'Goddess'
elif self.gender == male:
title = 'God'
else:
title = 'Divine Being'
if self.divinity == demi_god:
title = 'Semi-' + title if self.gender == non_binary else 'Demi-' + title
num_domains = npchoice([1,2,3,4], 1, p=[0.05, 0.35, 0.55, 0.05])[0]
if num_domains == 1:
template = '%s of %s'
if num_domains == 2:
template = '%s of %s and %s'
elif num_domains == 3:
template = '%s of %s, %s, and %s' # Oxford comma, the most divine punctuation.
elif num_domains == 4:
template = '%s of %s, %s, %s, and %s'
self.domains = [d.title() for d in random.sample(self.genome, num_domains)]
# Put it all together
self.epithet = template % (title, *self.domains) |
def generate_gamete(self, egg_or_sperm_word):
"""Extract 23 'chromosomes' aka words from 'gene pool' aka list of tokens
by searching the list of tokens for words that are related to the given
egg_or_sperm_word.
"""
p_rate_of_mutation = [0.9, 0.1]
should_use_mutant_pool = (npchoice([0,1], 1, p=p_rate_of_mutation)[0] == 1)
if should_use_mutant_pool:
pool = tokens.secondary_tokens
else:
pool = tokens.primary_tokens
return get_matches(egg_or_sperm_word, pool, 23) |
def print_parents(self):
"""Print parents' names and epithets."""
if self.gender == female:
title = 'Daughter'
elif self.gender == male:
title = 'Son'
else:
title = 'Child'
p1 = self.parents[0]
p2 = self.parents[1]
template = '%s of %s, the %s, and %s, the %s.'
print(template % (title, p1.name, p1.epithet, p2.name, p2.epithet)) |
def instance(self, counter=None, pipeline_counter=None):
"""Returns all the information regarding a specific stage run
See the `Go stage instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-stage-instance
Args:
counter (int): The stage instance to fetch.
If falsey returns the latest stage instance from :meth:`history`.
pipeline_counter (int): The pipeline instance for which to fetch
the stage. If falsey returns the latest pipeline instance.
Returns:
Response: :class:`gocd.api.response.Response` object
"""
pipeline_counter = pipeline_counter or self.pipeline_counter
pipeline_instance = None
if not pipeline_counter:
pipeline_instance = self.server.pipeline(self.pipeline_name).instance()
self.pipeline_counter = int(pipeline_instance['counter'])
if not counter:
if pipeline_instance is None:
pipeline_instance = (
self.server
.pipeline(self.pipeline_name)
.instance(pipeline_counter)
)
for stages in pipeline_instance['stages']:
if stages['name'] == self.stage_name:
return self.instance(
counter=int(stages['counter']),
pipeline_counter=pipeline_counter
)
return self._get('/instance/{pipeline_counter:d}/{counter:d}'
.format(pipeline_counter=pipeline_counter, counter=counter)) |
def is_json(self):
"""
Returns:
bool: True if `content_type` is `application/json`
"""
return (self.content_type.startswith('application/json') or
re.match(r'application/vnd.go.cd.v(\d+)\+json', self.content_type)) |
def payload(self):
"""
Returns:
`str` when not json.
`dict` when json.
"""
if self.is_json:
if not self._body_parsed:
if hasattr(self._body, 'decode'):
body = self._body.decode('utf-8')
else:
body = self._body
self._body_parsed = json.loads(body)
return self._body_parsed
else:
return self._body |
def request(self, path, data=None, headers=None, method=None):
"""Performs a HTTP request to the Go server
Args:
path (str): The full path on the Go server to request.
This includes any query string attributes.
data (str, dict, bool, optional): If any data is present this
request will become a POST request.
headers (dict, optional): Headers to set for this particular
request
Raises:
HTTPError: when the HTTP request fails.
Returns:
file like object: The response from a
:func:`urllib2.urlopen` call
"""
if isinstance(data, str):
data = data.encode('utf-8')
response = urlopen(self._request(path, data=data, headers=headers, method=method))
self._set_session_cookie(response)
return response |
def add_logged_in_session(self, response=None):
"""Make the request appear to be coming from a browser
This is to interact with older parts of Go that doesn't have a
proper API call to be made. What will be done:
1. If no response passed in a call to `go/api/pipelines.xml` is
made to get a valid session
2. `JSESSIONID` will be populated from this request
3. A request to `go/pipelines` will be so the
`authenticity_token` (CSRF) can be extracted. It will then
silently be injected into `post_args` on any POST calls that
doesn't start with `go/api` from this point.
Args:
response: a :class:`Response` object from a previously successful
API call. So we won't have to query `go/api/pipelines.xml`
unnecessarily.
Raises:
HTTPError: when the HTTP request fails.
AuthenticationFailed: when failing to get the `session_id`
or the `authenticity_token`.
"""
if not response:
response = self.get('go/api/pipelines.xml')
self._set_session_cookie(response)
if not self._session_id:
raise AuthenticationFailed('No session id extracted from request.')
response = self.get('go/pipelines')
match = re.search(
r'name="authenticity_token".+?value="([^"]+)',
response.read().decode('utf-8')
)
if match:
self._authenticity_token = match.group(1)
else:
raise AuthenticationFailed('Authenticity token not found on page') |
def stage(self, pipeline_name, stage_name, pipeline_counter=None):
"""Returns an instance of :class:`Stage`
Args:
pipeline_name (str): Name of the pipeline the stage belongs to
stage_name (str): Name of the stage to act on
pipeline_counter (int): The pipeline instance the stage is for.
Returns:
Stage: an instantiated :class:`Stage`.
"""
return Stage(self, pipeline_name, stage_name, pipeline_counter=pipeline_counter) |
def flatten(d):
"""Return a dict as a list of lists.
>>> flatten({"a": "b"})
[['a', 'b']]
>>> flatten({"a": [1, 2, 3]})
[['a', [1, 2, 3]]]
>>> flatten({"a": {"b": "c"}})
[['a', 'b', 'c']]
>>> flatten({"a": {"b": {"c": "e"}}})
[['a', 'b', 'c', 'e']]
>>> flatten({"a": {"b": "c", "d": "e"}})
[['a', 'b', 'c'], ['a', 'd', 'e']]
>>> flatten({"a": {"b": "c", "d": "e"}, "b": {"c": "d"}})
[['a', 'b', 'c'], ['a', 'd', 'e'], ['b', 'c', 'd']]
"""
if not isinstance(d, dict):
return [[d]]
returned = []
for key, value in d.items():
# Each key, value is treated as a row.
nested = flatten(value)
for nest in nested:
current_row = [key]
current_row.extend(nest)
returned.append(current_row)
return returned |
def instance(self, counter=None):
"""Returns all the information regarding a specific pipeline run
See the `Go pipeline instance documentation`__ for examples.
.. __: http://api.go.cd/current/#get-pipeline-instance
Args:
counter (int): The pipeline instance to fetch.
If falsey returns the latest pipeline instance from :meth:`history`.
Returns:
Response: :class:`gocd.api.response.Response` object
"""
if not counter:
history = self.history()
if not history:
return history
else:
return Response._from_json(history['pipelines'][0])
return self._get('/instance/{counter:d}'.format(counter=counter)) |
def schedule(self, variables=None, secure_variables=None, materials=None,
return_new_instance=False, backoff_time=1.0):
"""Schedule a pipeline run
Aliased as :meth:`run`, :meth:`schedule`, and :meth:`trigger`.
Args:
variables (dict, optional): Variables to set/override
secure_variables (dict, optional): Secure variables to set/override
materials (dict, optional): Material revisions to be used for
this pipeline run. The exact format for this is a bit iffy,
have a look at the official
`Go pipeline scheduling documentation`__ or inspect a call
from triggering manually in the UI.
return_new_instance (bool): Returns a :meth:`history` compatible
response for the newly scheduled instance. This is primarily so
users easily can get the new instance number. **Note:** This is done
in a very naive way, it just checks that the instance number is
higher than before the pipeline was triggered.
backoff_time (float): How long between each check for
:arg:`return_new_instance`.
.. __: http://api.go.cd/current/#scheduling-pipelines
Returns:
Response: :class:`gocd.api.response.Response` object
"""
scheduling_args = dict(
variables=variables,
secure_variables=secure_variables,
material_fingerprint=materials,
headers={"Confirm": True},
)
scheduling_args = dict((k, v) for k, v in scheduling_args.items() if v is not None)
# TODO: Replace this with whatever is the official way as soon as gocd#990 is fixed.
# https://github.com/gocd/gocd/issues/990
if return_new_instance:
pipelines = self.history()['pipelines']
if len(pipelines) == 0:
last_run = None
else:
last_run = pipelines[0]['counter']
response = self._post('/schedule', ok_status=202, **scheduling_args)
if not response:
return response
max_tries = 10
while max_tries > 0:
current = self.instance()
if not last_run and current:
return current
elif last_run and current['counter'] > last_run:
return current
else:
time.sleep(backoff_time)
max_tries -= 1
# I can't come up with a scenario in testing where this would happen, but it seems
# better than returning None.
return response
else:
return self._post('/schedule', ok_status=202, **scheduling_args) |
def artifact(self, counter, stage, job, stage_counter=1):
"""Helper to instantiate an :class:`gocd.api.artifact.Artifact` object
Args:
counter (int): The pipeline counter to get the artifact for
stage: Stage name
job: Job name
stage_counter: Defaults to 1
Returns:
Artifact: :class:`gocd.api.artifact.Artifact` object
"""
return Artifact(self.server, self.name, counter, stage, job, stage_counter) |
def console_output(self, instance=None):
"""Yields the output and metadata from all jobs in the pipeline
Args:
instance: The result of a :meth:`instance` call, if not supplied
the latest of the pipeline will be used.
Yields:
tuple: (metadata (dict), output (str)).
metadata contains:
- pipeline
- pipeline_counter
- stage
- stage_counter
- job
- job_result
"""
if instance is None:
instance = self.instance()
for stage in instance['stages']:
for job in stage['jobs']:
if job['result'] not in self.final_results:
continue
artifact = self.artifact(
instance['counter'],
stage['name'],
job['name'],
stage['counter']
)
output = artifact.get('cruise-output/console.log')
yield (
{
'pipeline': self.name,
'pipeline_counter': instance['counter'],
'stage': stage['name'],
'stage_counter': stage['counter'],
'job': job['name'],
'job_result': job['result'],
},
output.body
) |
def stage(self, name, pipeline_counter=None):
"""Helper to instantiate a :class:`gocd.api.stage.Stage` object
Args:
name: The name of the stage
pipeline_counter:
Returns:
"""
return Stage(
self.server,
pipeline_name=self.name,
stage_name=name,
pipeline_counter=pipeline_counter,
) |
def edit(self, config, etag):
"""Update template config for specified template name.
.. __: https://api.go.cd/current/#edit-template-config
Returns:
Response: :class:`gocd.api.response.Response` object
"""
data = self._json_encode(config)
headers = self._default_headers()
if etag is not None:
headers["If-Match"] = etag
return self._request(self.name,
ok_status=None,
data=data,
headers=headers,
method="PUT") |
def create(self, config):
"""Create template config for specified template name.
.. __: https://api.go.cd/current/#create-template-config
Returns:
Response: :class:`gocd.api.response.Response` object
"""
assert config["name"] == self.name, "Given config is not for this template"
data = self._json_encode(config)
headers = self._default_headers()
return self._request("",
ok_status=None,
data=data,
headers=headers) |
def delete(self):
"""Delete template config for specified template name.
.. __: https://api.go.cd/current/#delete-a-template
Returns:
Response: :class:`gocd.api.response.Response` object
"""
headers = self._default_headers()
return self._request(self.name,
ok_status=None,
data=None,
headers=headers,
method="DELETE") |
def pipelines(self):
"""Returns a set of all pipelines from the last response
Returns:
set: Response success: all the pipelines available in the response
Response failure: an empty set
"""
if not self.response:
return set()
elif self._pipelines is None and self.response:
self._pipelines = set()
for group in self.response.payload:
for pipeline in group['pipelines']:
self._pipelines.add(pipeline['name'])
return self._pipelines |
def get_directory(self, path_to_directory, timeout=30, backoff=0.4, max_wait=4):
"""Gets an artifact directory by its path.
See the `Go artifact directory documentation`__ for example responses.
.. __: http://api.go.cd/current/#get-artifact-directory
.. note::
Getting a directory relies on Go creating a zip file of the
directory in question. Because of this Go will zip the file in
the background and return a 202 Accepted response. It's then up
to the client to check again later and get the final file.
To work with normal assumptions this :meth:`get_directory` will
retry itself up to ``timeout`` seconds to get a 200 response to
return. At that point it will then return the response as is, no
matter whether it's still 202 or 200. The retry is done with an
exponential backoff with a max value between retries. See the
``backoff`` and ``max_wait`` variables.
If you want to handle the retry logic yourself then use :meth:`get`
and add '.zip' as a suffix on the directory.
Args:
path_to_directory (str): The path to the directory to get.
It can be nested eg ``target/dist.zip``
timeout (int): How many seconds we will wait in total for a
successful response from Go when we're receiving 202
backoff (float): The initial value used for backoff, raises
exponentially until it reaches ``max_wait``
max_wait (int): The max time between retries
Returns:
Response: :class:`gocd.api.response.Response` object
A successful response is a zip-file.
"""
response = None
started_at = None
time_elapsed = 0
i = 0
while time_elapsed < timeout:
response = self._get('{0}.zip'.format(path_to_directory))
if response:
break
else:
if started_at is None:
started_at = time.time()
time.sleep(min(backoff * (2 ** i), max_wait))
i += 1
time_elapsed = time.time() - started_at
return response |
def ask(self):
"""
Return the wait time in seconds required to retrieve the
item currently at the head of the queue.
Note that there is no guarantee that a call to `get()` will
succeed even if `ask()` returns 0. By the time the calling
thread reacts, other threads may have caused a different
item to be at the head of the queue.
"""
with self.mutex:
if not len(self.queue):
raise Empty
utcnow = dt.datetime.utcnow()
if self.queue[0][0] <= utcnow:
self.ready.notify()
return 0
return (self.queue[0][0] - utcnow).total_seconds() |
def p_transition(p):
"""
transition : START_KWD KEY NULL_KWD FLOAT
transition : KEY KEY NULL_KWD FLOAT
transition : KEY END_KWD NULL_KWD FLOAT
transition : START_KWD KEY KEY FLOAT
transition : KEY KEY KEY FLOAT
transition : KEY END_KWD KEY FLOAT
transition : START_KWD KEY NULL_KWD INTEGER
transition : KEY KEY NULL_KWD INTEGER
transition : KEY END_KWD NULL_KWD INTEGER
transition : START_KWD KEY KEY INTEGER
transition : KEY KEY KEY INTEGER
transition : KEY END_KWD KEY INTEGER
transition : START_KWD KEY NULL_KWD KEY
transition : KEY KEY NULL_KWD KEY
transition : KEY END_KWD NULL_KWD KEY
transition : START_KWD KEY KEY KEY
transition : KEY KEY KEY KEY
transition : KEY END_KWD KEY KEY
"""
p[3] = None if p[3] == 'NULL' else p[3]
if p[4] == 'error':
p[0] = MarionetteTransition(p[1], p[2], p[3], 0, True)
else:
p[0] = MarionetteTransition(p[1], p[2], p[3], p[4], False) |
def p_action_blocks(p):
"""
action_blocks : action_blocks action_block
"""
if isinstance(p[1], list):
if isinstance(p[1][0], list):
p[0] = p[1][0] + [p[2]]
else:
p[0] = p[1] + p[2]
else:
p[0] = [p[1], p[2]] |
def p_action_block(p):
"""
action_block : ACTION_KWD KEY COLON actions
"""
p[0] = []
for i in range(len(p[4])):
p[0] += [marionette_tg.action.MarionetteAction(p[2], p[4][i][0],
p[4][i][1],
p[4][i][2],
p[4][i][3],
p[4][i][4])] |
def p_action(p):
"""
action : CLIENT_KWD KEY DOT KEY LPAREN args RPAREN
action : SERVER_KWD KEY DOT KEY LPAREN args RPAREN
action : CLIENT_KWD KEY DOT KEY LPAREN args RPAREN IF_KWD REGEX_MATCH_INCOMING_KWD LPAREN p_string_arg RPAREN
action : SERVER_KWD KEY DOT KEY LPAREN args RPAREN IF_KWD REGEX_MATCH_INCOMING_KWD LPAREN p_string_arg RPAREN
"""
if len(p)==8:
p[0] = [p[1], p[2], p[4], p[6], None]
elif len(p)==13:
p[0] = [p[1], p[2], p[4], p[6], p[11]] |
def config_loader(app, **kwargs_config):
"""Configuration loader.
Adds support for loading templates from the Flask application's instance
folder (``<instance_folder>/templates``).
"""
# This is the only place customize the Flask application right after
# it has been created, but before all extensions etc are loaded.
local_templates_path = os.path.join(app.instance_path, 'templates')
if os.path.exists(local_templates_path):
# Let's customize the template loader to look into packages
# and application templates folders.
app.jinja_loader = ChoiceLoader([
FileSystemLoader(local_templates_path),
app.jinja_loader,
])
app.jinja_options = dict(
app.jinja_options,
cache_size=1000,
bytecode_cache=BytecodeCache(app)
)
invenio_config_loader(app, **kwargs_config) |
def app_class():
"""Create Flask application class.
Invenio-Files-REST needs to patch the Werkzeug form parsing in order to
support streaming large file uploads. This is done by subclassing the Flask
application class.
"""
try:
pkg_resources.get_distribution('invenio-files-rest')
from invenio_files_rest.app import Flask as FlaskBase
except pkg_resources.DistributionNotFound:
from flask import Flask as FlaskBase
# Add Host header validation via APP_ALLOWED_HOSTS configuration variable.
class Request(TrustedHostsMixin, FlaskBase.request_class):
pass
class Flask(FlaskBase):
request_class = Request
return Flask |
def init_app(self, app, **kwargs):
"""Initialize application object.
:param app: An instance of :class:`~flask.Flask`.
"""
# Init the configuration
self.init_config(app)
# Enable Rate limiter
self.limiter = Limiter(app, key_func=get_ipaddr)
# Enable secure HTTP headers
if app.config['APP_ENABLE_SECURE_HEADERS']:
self.talisman = Talisman(
app, **app.config.get('APP_DEFAULT_SECURE_HEADERS', {})
)
# Enable PING view
if app.config['APP_HEALTH_BLUEPRINT_ENABLED']:
blueprint = Blueprint('invenio_app_ping', __name__)
@blueprint.route('/ping')
def ping():
"""Load balancer ping view."""
return 'OK'
ping.talisman_view_options = {'force_https': False}
app.register_blueprint(blueprint)
requestid_header = app.config.get('APP_REQUESTID_HEADER')
if requestid_header:
@app.before_request
def set_request_id():
"""Extracts a request id from an HTTP header."""
request_id = request.headers.get(requestid_header)
if request_id:
# Capped at 200 to protect against malicious clients
# sending very large headers.
g.request_id = request_id[:200]
# If installed register the Flask-DebugToolbar extension
try:
from flask_debugtoolbar import DebugToolbarExtension
app.extensions['flask-debugtoolbar'] = DebugToolbarExtension(app)
except ImportError:
app.logger.debug('Flask-DebugToolbar extension not installed.')
# Register self
app.extensions['invenio-app'] = self |
def init_config(self, app):
"""Initialize configuration.
:param app: An instance of :class:`~flask.Flask`.
"""
config_apps = ['APP_', 'RATELIMIT_']
flask_talisman_debug_mode = ["'unsafe-inline'"]
for k in dir(config):
if any([k.startswith(prefix) for prefix in config_apps]):
app.config.setdefault(k, getattr(config, k))
if app.config['DEBUG']:
app.config.setdefault('APP_DEFAULT_SECURE_HEADERS', {})
headers = app.config['APP_DEFAULT_SECURE_HEADERS']
# ensure `content_security_policy` is not set to {}
if headers.get('content_security_policy') != {}:
headers.setdefault('content_security_policy', {})
csp = headers['content_security_policy']
# ensure `default-src` is not set to []
if csp.get('default-src') != []:
csp.setdefault('default-src', [])
# add default `content_security_policy` value when debug
csp['default-src'] += flask_talisman_debug_mode |
def remove_leading(needle, haystack):
"""Remove leading needle string (if exists).
>>> remove_leading('Test', 'TestThisAndThat')
'ThisAndThat'
>>> remove_leading('Test', 'ArbitraryName')
'ArbitraryName'
"""
if haystack[:len(needle)] == needle:
return haystack[len(needle):]
return haystack |
def remove_trailing(needle, haystack):
"""Remove trailing needle string (if exists).
>>> remove_trailing('Test', 'ThisAndThatTest')
'ThisAndThat'
>>> remove_trailing('Test', 'ArbitraryName')
'ArbitraryName'
"""
if haystack[-len(needle):] == needle:
return haystack[:-len(needle)]
return haystack |
def camel2word(string):
"""Covert name from CamelCase to "Normal case".
>>> camel2word('CamelCase')
'Camel case'
>>> camel2word('CaseWithSpec')
'Case with spec'
"""
def wordize(match):
return ' ' + match.group(1).lower()
return string[0] + re.sub(r'([A-Z])', wordize, string[1:]) |
def complete_english(string):
"""
>>> complete_english('dont do this')
"don't do this"
>>> complete_english('doesnt is matched as well')
"doesn't is matched as well"
"""
for x, y in [("dont", "don't"),
("doesnt", "doesn't"),
("wont", "won't"),
("wasnt", "wasn't")]:
string = string.replace(x, y)
return string |
def format_seconds(self, n_seconds):
"""Format a time in seconds."""
func = self.ok
if n_seconds >= 60:
n_minutes, n_seconds = divmod(n_seconds, 60)
return "%s minutes %s seconds" % (
func("%d" % n_minutes),
func("%.3f" % n_seconds))
else:
return "%s seconds" % (
func("%.3f" % n_seconds)) |
def ppdict(dict_to_print, br='\n', html=False, key_align='l', sort_keys=True,
key_preffix='', key_suffix='', value_prefix='', value_suffix='', left_margin=3, indent=2):
"""Indent representation of a dict"""
if dict_to_print:
if sort_keys:
dic = dict_to_print.copy()
keys = list(dic.keys())
keys.sort()
dict_to_print = OrderedDict()
for k in keys:
dict_to_print[k] = dic[k]
tmp = ['{']
ks = [type(x) == str and "'%s'" % x or x for x in dict_to_print.keys()]
vs = [type(x) == str and "'%s'" % x or x for x in dict_to_print.values()]
max_key_len = max([len(str(x)) for x in ks])
for i in range(len(ks)):
k = {1: str(ks[i]).ljust(max_key_len),
key_align == 'r': str(ks[i]).rjust(max_key_len)}[1]
v = vs[i]
tmp.append(' ' * indent + '{}{}{}:{}{}{},'.format(key_preffix, k, key_suffix,
value_prefix, v, value_suffix))
tmp[-1] = tmp[-1][:-1] # remove the ',' in the last item
tmp.append('}')
if left_margin:
tmp = [' ' * left_margin + x for x in tmp]
if html:
return '<code>{}</code>'.format(br.join(tmp).replace(' ', ' '))
else:
return br.join(tmp)
else:
return '{}' |
def eq_(result, expected, msg=None):
"""
Shadow of the Nose builtin which presents easier to read multiline output.
"""
params = {'expected': expected, 'result': result}
aka = """
--------------------------------- aka -----------------------------------------
Expected:
%(expected)r
Got:
%(result)r
""" % params
default_msg = """
Expected:
%(expected)s
Got:
%(result)s
""" % params
if (
(repr(result) != six.text_type(result)) or
(repr(expected) != six.text_type(expected))
):
default_msg += aka
assertion_msg = msg or default_msg
# This assert will bubble up to Nose's failure handling, which at some
# point calls explicit str() - which will UnicodeDecodeError on any non
# ASCII text.
# To work around this, we make sure Unicode strings become bytestrings
# beforehand, with explicit encode.
if isinstance(assertion_msg, six.text_type):
assertion_msg = assertion_msg.encode('utf-8')
assert result == expected, assertion_msg |
def _assert_contains(haystack, needle, invert, escape=False):
"""
Test for existence of ``needle`` regex within ``haystack``.
Say ``escape`` to escape the ``needle`` if you aren't really using the
regex feature & have special characters in it.
"""
myneedle = re.escape(needle) if escape else needle
matched = re.search(myneedle, haystack, re.M)
if (invert and matched) or (not invert and not matched):
raise AssertionError("'%s' %sfound in '%s'" % (
needle,
"" if invert else "not ",
haystack
)) |
def parse_config_file():
"""
Find the .splunk_logger config file in the current directory, or in the
user's home and parse it. The one in the current directory has precedence.
:return: A tuple with:
- project_id
- access_token
"""
for filename in ('.splunk_logger', os.path.expanduser('~/.splunk_logger')):
project_id, access_token, api_domain = _parse_config_file_impl(filename)
if project_id is not None\
and access_token is not None\
and api_domain is not None:
return project_id, access_token, api_domain
else:
return None, None, None |
def _parse_config_file_impl(filename):
"""
Format for the file is:
credentials:
project_id: ...
access_token: ...
api_domain: ...
:param filename: The filename to parse
:return: A tuple with:
- project_id
- access_token
- api_domain
"""
try:
doc = yaml.load(file(filename).read())
project_id = doc["credentials"]["project_id"]
access_token = doc["credentials"]["access_token"]
api_domain = doc["credentials"]["api_domain"]
return project_id, access_token, api_domain
except:
return None, None, None |
def dem_url_dia(dt_day='2015-06-22'):
"""Obtiene las urls de descarga de los datos de demanda energética de un día concreto."""
def _url_tipo_dato(str_dia, k):
url = SERVER + '/archives/{}/download_json?locale=es'.format(D_TIPOS_REQ_DEM[k])
if type(str_dia) is str:
return url + '&date=' + str_dia
else:
return url + '&date=' + str_dia.date().isoformat()
urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]
return urls |
def dem_procesa_datos_dia(key_day, response):
"""Procesa los datos descargados en JSON."""
dfs_import, df_import, dfs_maxmin, hay_errores = [], None, [], 0
for r in response:
tipo_datos, data = _extract_func_json_data(r)
if tipo_datos is not None:
if ('IND_MaxMin' in tipo_datos) and data:
df_import = _import_daily_max_min(data)
dfs_maxmin.append(df_import)
elif data:
df_import = _import_json_ts_data(data)
dfs_import.append(df_import)
if tipo_datos is None or df_import is None:
hay_errores += 1
if hay_errores == 4:
# No hay nada, salida temprana sin retry:
print_redb('** No hay datos para el día {}!'.format(key_day))
return None, -2
else: # if hay_errores < 3:
# TODO formar datos incompletos!! (max-min con NaN's, etc.)
data_import = {}
if dfs_import:
data_import[KEYS_DATA_DEM[0]] = dfs_import[0].join(dfs_import[1])
if len(dfs_maxmin) == 2:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0].join(dfs_maxmin[1])
elif dfs_maxmin:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0]
if not data_import:
print_err('DÍA: {} -> # ERRORES: {}'.format(key_day, hay_errores))
return None, -2
return data_import, 0 |
def dem_data_dia(str_dia='2015-10-10', str_dia_fin=None):
"""Obtiene datos de demanda energética en un día concreto o un intervalo, accediendo directamente a la web."""
params = {'date_fmt': DATE_FMT, 'usar_multithread': False, 'num_retries': 1, "timeout": 10,
'func_procesa_data_dia': dem_procesa_datos_dia, 'func_url_data_dia': dem_url_dia,
'data_extra_request': {'json_req': False, 'headers': HEADERS}}
if str_dia_fin is not None:
params['usar_multithread'] = True
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)
else:
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)
if not hay_errores:
return data
else:
print_err(str_import)
return None |
def flag_inner_classes(obj):
"""
Mutates any attributes on ``obj`` which are classes, with link to ``obj``.
Adds a convenience accessor which instantiates ``obj`` and then calls its
``setup`` method.
Recurses on those objects as well.
"""
for tup in class_members(obj):
tup[1]._parent = obj
tup[1]._parent_inst = None
tup[1].__getattr__ = my_getattr
flag_inner_classes(tup[1]) |
def autohide(obj):
"""
Automatically hide setup() and teardown() methods, recursively.
"""
# Members on obj
for name, item in six.iteritems(vars(obj)):
if callable(item) and name in ('setup', 'teardown'):
item = hide(item)
# Recurse into class members
for name, subclass in class_members(obj):
autohide(subclass) |
def trap(func):
"""
Replace sys.std(out|err) with a wrapper during execution, restored after.
In addition, a new combined-streams output (another wrapper) will appear at
``sys.stdall``. This stream will resemble what a user sees at a terminal,
i.e. both out/err streams intermingled.
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Use another CarbonCopy even though we're not cc'ing; for our "write
# bytes, return strings on py3" behavior. Meh.
sys.stdall = CarbonCopy()
my_stdout, sys.stdout = sys.stdout, CarbonCopy(cc=sys.stdall)
my_stderr, sys.stderr = sys.stderr, CarbonCopy(cc=sys.stdall)
try:
return func(*args, **kwargs)
finally:
sys.stdout = my_stdout
sys.stderr = my_stderr
del sys.stdall
return wrapper |
def pvpc_url_dia(dt_day):
"""Obtiene la url de descarga de los datos de PVPC de un día concreto.
Anteriormente era: 'http://www.esios.ree.es/Solicitar?fileName=pvpcdesglosehorario_' + str_dia
+ '&fileType=xml&idioma=es', pero ahora es en JSON y requiere token_auth en headers.
"""
if type(dt_day) is str:
return SERVER + '/archives/70/download_json?locale=es' + '&date=' + dt_day
else:
return SERVER + '/archives/70/download_json?locale=es' + '&date=' + dt_day.date().isoformat() |
def pvpc_calc_tcu_cp_feu_d(df, verbose=True, convert_kwh=True):
"""Procesa TCU, CP, FEU diario.
:param df:
:param verbose:
:param convert_kwh:
:return:
"""
if 'TCU' + TARIFAS[0] not in df.columns:
# Pasa de €/MWh a €/kWh:
if convert_kwh:
cols_mwh = [c + t for c in COLS_PVPC for t in TARIFAS if c != 'COF']
df[cols_mwh] = df[cols_mwh].applymap(lambda x: x / 1000.)
# Obtiene columnas TCU, CP, precio día
gb_t = df.groupby(lambda x: TARIFAS[np.argmax([t in x for t in TARIFAS])], axis=1)
for k, g in gb_t:
if verbose:
print('TARIFA {}'.format(k))
print(g.head())
# Cálculo de TCU
df['TCU{}'.format(k)] = g[k] - g['TEU{}'.format(k)]
# Cálculo de CP
# cols_cp = [c + k for c in ['FOS', 'FOM', 'INT', 'PCAP', 'PMH', 'SAH']]
cols_cp = [c + k for c in COLS_PVPC if c not in ['', 'COF', 'TEU']]
df['CP{}'.format(k)] = g[cols_cp].sum(axis=1)
# Cálculo de PERD --> No es posible así, ya que los valores base ya vienen con PERD
# dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['TCU{}'.format(k)] / dfs_pvpc[k]['CP{}'.format(k)]
# dfs_pvpc[k]['PERD{}'.format(k)] = dfs_pvpc[k]['INT{}'.format(k)] / 1.92
# Cálculo de FEU diario
cols_k = ['TEU' + k, 'TCU' + k, 'COF' + k]
g = df[cols_k].groupby('TEU' + k)
pr = g.apply(lambda x: x['TCU' + k].dot(x['COF' + k]) / x['COF' + k].sum())
pr.name = 'PD_' + k
df = df.join(pr, on='TEU' + k, rsuffix='_r')
df['PD_' + k] += df['TEU' + k]
return df |
def pvpc_procesa_datos_dia(_, response, verbose=True):
"""Procesa la información JSON descargada y forma el dataframe de los datos de un día."""
try:
d_data = response['PVPC']
df = _process_json_pvpc_hourly_data(pd.DataFrame(d_data))
return df, 0
except Exception as e:
if verbose:
print('ERROR leyendo información de web: {}'.format(e))
return None, -2 |
def pvpc_data_dia(str_dia, str_dia_fin=None):
"""Obtiene datos de PVPC en un día concreto o un intervalo, accediendo directamente a la web."""
params = {'date_fmt': DATE_FMT, 'usar_multithread': False,
'func_procesa_data_dia': pvpc_procesa_datos_dia, 'func_url_data_dia': pvpc_url_dia,
'data_extra_request': {'json_req': True, 'headers': HEADERS}}
if str_dia_fin is not None:
params['usar_multithread'] = True
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)
else:
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)
if not hay_errores:
return data
else:
return str_import |
def _compress(self, input_str):
"""
Compress the log message in order to send less bytes to the wire.
"""
compressed_bits = cStringIO.StringIO()
f = gzip.GzipFile(fileobj=compressed_bits, mode='wb')
f.write(input_str)
f.close()
return compressed_bits.getvalue() |
def get_data_coeficientes_perfilado_2017(force_download=False):
"""Extrae la información de las dos hojas del Excel proporcionado por REE
con los perfiles iniciales para 2017.
:param force_download: Descarga el fichero 'raw' del servidor, en vez de acudir a la copia local.
:return: perfiles_2017, coefs_alpha_beta_gamma
:rtype: tuple
"""
path_perfs = os.path.join(STORAGE_DIR, 'perfiles_consumo_2017.h5')
if force_download or not os.path.exists(path_perfs):
# Coeficientes de perfilado y demanda de referencia (1ª hoja)
cols_sheet1 = ['Mes', 'Día', 'Hora',
'Pa,0m,d,h', 'Pb,0m,d,h', 'Pc,0m,d,h', 'Pd,0m,d,h', 'Demanda de Referencia 2017 (MW)']
perfs_2017 = pd.read_excel(URL_PERFILES_2017, header=None, skiprows=[0, 1], names=cols_sheet1)
perfs_2017['ts'] = pd.DatetimeIndex(start='2017-01-01', freq='H', tz=TZ, end='2017-12-31 23:59')
perfs_2017 = perfs_2017.set_index('ts').drop(['Mes', 'Día', 'Hora'], axis=1)
# Coefs Alfa, Beta, Gamma (2ª hoja):
coefs_alpha_beta_gamma = pd.read_excel(URL_PERFILES_2017, sheetname=1)
print('Escribiendo perfiles 2017 en disco, en {}'.format(path_perfs))
with pd.HDFStore(path_perfs, 'w') as st:
st.put('coefs', coefs_alpha_beta_gamma)
st.put('perfiles', perfs_2017)
print('HDFStore de tamaño {:.3f} KB'.format(os.path.getsize(path_perfs) / 1000))
else:
with pd.HDFStore(path_perfs, 'r') as st:
coefs_alpha_beta_gamma = st['coefs']
perfs_2017 = st['perfiles']
return perfs_2017, coefs_alpha_beta_gamma |
def get_data_perfiles_estimados_2017(force_download=False):
"""Extrae perfiles estimados para 2017 con el formato de los CSV's mensuales con los perfiles definitivos.
:param force_download: bool para forzar la descarga del excel de la web de REE.
:return: perfiles_2017
:rtype: pd.Dataframe
"""
global DATA_PERFILES_2017
if (DATA_PERFILES_2017 is None) or force_download:
perf_demref_2017, _ = get_data_coeficientes_perfilado_2017(force_download=force_download)
# Conversión de formato de dataframe de perfiles 2017 a finales (para uniformizar):
cols_usar = ['Pa,0m,d,h', 'Pb,0m,d,h', 'Pc,0m,d,h', 'Pd,0m,d,h']
perfs_2017 = perf_demref_2017[cols_usar].copy()
perfs_2017.columns = ['COEF. PERFIL {}'.format(p) for p in 'ABCD']
DATA_PERFILES_2017 = perfs_2017
return perfs_2017
return DATA_PERFILES_2017 |
def main_cli():
"""
Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local,
creando una nueva si no existe o hubiere algún problema. Los datos registrados se guardan en HDF5
"""
def _get_parser_args():
p = argparse.ArgumentParser(description='Gestor de DB de PVPC/DEMANDA (esios.ree.es)')
p.add_argument('-d', '--dem', action='store_true', help='Selecciona BD de demanda (BD de PVPC por defecto)')
p.add_argument('-i', '--info', action='store', nargs='*',
help="Muestra información de la BD seleccionada. "
"* Puede usar intervalos temporales y nombres de columnas, "
"como '-i gen noc 2017-01-24 2017-01-26'")
p.add_argument('-fu', '-FU', '--forceupdate', action='store_true',
help="Fuerza la reconstrucción total de la BD seleccionada")
p.add_argument('-u', '-U', '--update', action='store_true',
help="Actualiza la información de la BD seleccionada hasta el instante actual")
p.add_argument('-p', '--plot', action='store_true', help="Genera plots de la información filtrada de la BD")
p.add_argument('-v', '--verbose', action='store_true', help='Muestra información extra')
arguments = p.parse_args()
return arguments, p
def _parse_date(string, columns):
try:
ts = pd.Timestamp(string)
print_cyan('{} es timestamp: {:%c} --> {}'.format(string, ts, ts.date()))
columns.remove(string)
return ts.date().isoformat()
except ValueError:
pass
args, parser = _get_parser_args()
print_secc('ESIOS PVPC/DEMANDA')
if args.dem:
db_web = DatosREE(update=args.update, force_update=args.forceupdate, verbose=args.verbose)
else:
db_web = PVPC(update=args.update, force_update=args.forceupdate, verbose=args.verbose)
data = db_web.data['data']
if args.info is not None:
if len(args.info) > 0:
cols = args.info.copy()
dates = [d for d in [_parse_date(s, cols) for s in args.info] if d]
if len(dates) == 2:
data = data.loc[dates[0]:dates[1]]
elif len(dates) == 1:
data = data.loc[dates[0]]
if len(cols) > 0:
try:
data = data[[c.upper() for c in cols]]
except KeyError as e:
print_red('NO SE PUEDE FILTRAR LA COLUMNA (Exception: {})\nLAS COLUMNAS DISPONIBLES SON:\n{}'
.format(e, data.columns))
print_info(data)
else:
print_secc('LAST 24h in DB:')
print_info(data.iloc[-24:])
print_cyan(data.columns)
if args.plot:
if args.dem:
from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora
print_red('IMPLEMENTAR PLOTS DEM')
else:
from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora
if len(data) < 750:
pvpcplot_grid_hora(data)
# pvpcplot_tarifas_hora(data)
else:
print_red('La selección para plot es excesiva: {} samples de {} a {}\nSe hace plot de las últimas 24h'.
format(len(data), data.index[0], data.index[-1]))
pvpcplot_grid_hora(db_web.data['data'].iloc[-24:])
pvpcplot_tarifas_hora(db_web.data['data'].iloc[-24:]) |
def registerGoodClass(self, class_):
"""
Internal bookkeeping to handle nested classes
"""
# Class itself added to "good" list
self._valid_classes.append(class_)
# Recurse into any inner classes
for name, cls in class_members(class_):
if self.isValidClass(cls):
self.registerGoodClass(cls) |
def isValidClass(self, class_):
"""
Needs to be its own method so it can be called from both wantClass and
registerGoodClass.
"""
module = inspect.getmodule(class_)
valid = (
module in self._valid_modules
or (
hasattr(module, '__file__')
and module.__file__ in self._valid_named_modules
)
)
return valid and not private(class_) |
def procesa_data_dia(self, key_dia, datos_para_procesar):
"""Procesa los datos descargados correspondientes a un día `key_dia`."""
return pvpc_procesa_datos_dia(key_dia, datos_para_procesar, verbose=self.verbose) |
def get_resample_data(self):
"""Obtiene los dataframes de los datos de PVPC con resampling diario y mensual."""
if self.data is not None:
if self._pvpc_mean_daily is None:
self._pvpc_mean_daily = self.data['data'].resample('D').mean()
if self._pvpc_mean_monthly is None:
self._pvpc_mean_monthly = self.data['data'].resample('MS').mean()
return self._pvpc_mean_daily, self._pvpc_mean_monthly |
def last_entry(self, data_revisar=None, key_revisar=None):
"""
Definición específica para filtrar por datos de demanda energética (pues los datos se extienden más allá del
tiempo presente debido a las columnas de potencia prevista y programada.
:param data_revisar: (OPC) Se puede pasar un dataframe específico
:param key_revisar: (OPC) Normalmente, para utilizar 'dem'
:return: tmax, num_entradas
"""
if data_revisar is None and key_revisar is None:
data_revisar = self.data[self.masterkey][pd.notnull(self.data[self.masterkey]['dem'])]
super(DatosREE, self).printif('Últimos valores de generación y demanda:', 'info')
super(DatosREE, self).printif(data_revisar.tail(), 'info')
return super(DatosREE, self).last_entry(data_revisar, 'dem')
else:
return super(DatosREE, self).last_entry(data_revisar, key_revisar) |
def integridad_data(self, data_integr=None, key=None):
"""
Definición específica para comprobar timezone y frecuencia de los datos, además de comprobar
que el index de cada dataframe de la base de datos sea de fechas, único (sin duplicados) y creciente
:param data_integr:
:param key:
"""
if data_integr is None and key is None and all(k in self.data.keys() for k in KEYS_DATA_DEM):
assert(self.data[KEYS_DATA_DEM[0]].index.freq == FREQ_DAT_DEM
and self.data[KEYS_DATA_DEM[0]].index.tz == self.TZ)
if self.data[KEYS_DATA_DEM[1]] is not None:
assert(self.data[KEYS_DATA_DEM[1]].index.freq == 'D')
super(DatosREE, self).integridad_data(data_integr, key) |
def busca_errores_data(self):
"""
Busca errores o inconsistencias en los datos adquiridos
:return: Dataframe de errores encontrados
"""
data_busqueda = self.append_delta_index(TS_DATA_DEM, data_delta=self.data[self.masterkey].copy())
idx_desconex = (((data_busqueda.index < 'now') & (data_busqueda.index >= self.DATE_INI)) &
((data_busqueda.delta_T > 1) | data_busqueda['dem'].isnull() |
data_busqueda['pre'].isnull() | data_busqueda['pro'].isnull()))
sosp = data_busqueda[idx_desconex].copy()
assert len(sosp) == 0
# if len(sosp) > 0:
# cols_show = ['bad_dem', 'bad_pre', 'bad_T', 'delta', 'delta_T', 'dem', 'pre', 'pro']
# cols_ss = cols_show[:3]
# how_r = {k: pd.Series.sum if k == 'delta' else 'sum' for k in cols_show}
# sosp[cols_show[0]] = sosp['dem'].isnull()
# sosp[cols_show[1]] = sosp['pre'].isnull()
# sosp[cols_show[2]] = sosp['delta_T'] > 1
# if verbose:
# print(sosp[cols_show].tz_localize(None).resample('D', how=how_r).dropna(how='all', subset=cols_ss))
# print(sosp[cols_show].tz_localize(None).resample('MS', how=how_r).dropna(how='all', subset=cols_ss))
# return sosp
return pd.DataFrame() |
def sanitize_path(path):
"""Performs sanitation of the path after validating
:param path: path to sanitize
:return: path
:raises:
- InvalidPath if the path doesn't start with a slash
"""
if path == '/': # Nothing to do, just return
return path
if path[:1] != '/':
raise InvalidPath('The path must start with a slash')
# Deduplicate slashes in path
path = re.sub(r'/+', '/', path)
# Strip trailing slashes and return
return path.rstrip('/') |
def _validate_schema(obj):
"""Ensures the passed schema instance is compatible
:param obj: object to validate
:return: obj
:raises:
- IncompatibleSchema if the passed schema is of an incompatible type
"""
if obj is not None and not isinstance(obj, Schema):
raise IncompatibleSchema('Schema must be of type {0}'.format(Schema))
return obj |
def route(bp, *args, **kwargs):
"""Journey route decorator
Enables simple serialization, deserialization and validation of Flask routes with the help of Marshmallow.
:param bp: :class:`flask.Blueprint` object
:param args: args to pass along to `Blueprint.route`
:param kwargs:
- :strict_slashes: Enable / disable strict slashes (default False)
- :validate: Enable / disable body/query validation (default True)
- :_query: Unmarshal Query string into this schema
- :_body: Unmarshal JSON body into this schema
- :marshal_with: Serialize the output with this schema
:raises:
- ValidationError if the query parameters or JSON body fails validation
"""
kwargs['strict_slashes'] = kwargs.pop('strict_slashes', False)
body = _validate_schema(kwargs.pop('_body', None))
query = _validate_schema(kwargs.pop('_query', None))
output = _validate_schema(kwargs.pop('marshal_with', None))
validate = kwargs.pop('validate', True)
def decorator(f):
@bp.route(*args, **kwargs)
@wraps(f)
def wrapper(*inner_args, **inner_kwargs):
"""If a schema (_body and/or _query) was supplied to the route decorator, the deserialized
:class`marshmallow.Schema` object is injected into the decorated function's kwargs."""
try:
if query is not None:
query.strict = validate
url = furl(request.url)
inner_kwargs['_query'] = query.load(data=url.args)
if body is not None:
body.strict = validate
json_data = request.get_json()
if json_data is None:
# Set json_data to empty dict if body is empty, so it gets picked up by the validator
json_data = {}
inner_kwargs['_body'] = body.load(data=json_data)
except ValidationError as err:
return jsonify(err.messages), 422
if output:
data = output.dump(f(*inner_args, **inner_kwargs))
return jsonify(data[0])
return f(*inner_args, **inner_kwargs)
return f
return decorator |
def attach_bp(self, bp, description=''):
"""Attaches a flask.Blueprint to the bundle
:param bp: :class:`flask.Blueprint` object
:param description: Optional description string
:raises:
- InvalidBlueprint if the Blueprint is not of type `flask.Blueprint`
"""
if not isinstance(bp, Blueprint):
raise InvalidBlueprint('Blueprints attached to the bundle must be of type {0}'.format(Blueprint))
self.blueprints.append((bp, description)) |
def move_dot(self):
"""Returns the DottedRule that results from moving the dot."""
return self.__class__(self.production, self.pos + 1, self.lookahead) |
def first(self, symbols):
"""Computes the intermediate FIRST set using symbols."""
ret = set()
if EPSILON in symbols:
return set([EPSILON])
for symbol in symbols:
ret |= self._first[symbol] - set([EPSILON])
if EPSILON not in self._first[symbol]:
break
else:
ret.add(EPSILON)
return ret |
def _compute_first(self):
"""Computes the FIRST set for every symbol in the grammar.
Tenatively based on _compute_first in PLY.
"""
for terminal in self.terminals:
self._first[terminal].add(terminal)
self._first[END_OF_INPUT].add(END_OF_INPUT)
while True:
changed = False
for nonterminal, productions in self.nonterminals.items():
for production in productions:
new_first = self.first(production.rhs)
if new_first - self._first[nonterminal]:
self._first[nonterminal] |= new_first
changed = True
if not changed:
break |
def _compute_follow(self):
"""Computes the FOLLOW set for every non-terminal in the grammar.
Tenatively based on _compute_follow in PLY.
"""
self._follow[self.start_symbol].add(END_OF_INPUT)
while True:
changed = False
for nonterminal, productions in self.nonterminals.items():
for production in productions:
for i, symbol in enumerate(production.rhs):
if symbol not in self.nonterminals:
continue
first = self.first(production.rhs[i + 1:])
new_follow = first - set([EPSILON])
if EPSILON in first or i == (len(production.rhs) - 1):
new_follow |= self._follow[nonterminal]
if new_follow - self._follow[symbol]:
self._follow[symbol] |= new_follow
changed = True
if not changed:
break |
def initial_closure(self):
"""Computes the initial closure using the START_foo production."""
first_rule = DottedRule(self.start, 0, END_OF_INPUT)
return self.closure([first_rule]) |
def goto(self, rules, symbol):
"""Computes the next closure for rules based on the symbol we got.
Args:
rules - an iterable of DottedRules
symbol - a string denoting the symbol we've just seen
Returns: frozenset of DottedRules
"""
return self.closure(
{rule.move_dot() for rule in rules
if not rule.at_end and rule.rhs[rule.pos] == symbol},
) |
def closure(self, rules):
"""Fills out the entire closure based on some initial dotted rules.
Args:
rules - an iterable of DottedRules
Returns: frozenset of DottedRules
"""
closure = set()
todo = set(rules)
while todo:
rule = todo.pop()
closure.add(rule)
# If the dot is at the end, there's no need to process it.
if rule.at_end:
continue
symbol = rule.rhs[rule.pos]
for production in self.nonterminals[symbol]:
for first in self.first(rule.rest):
if EPSILON in production.rhs:
# Move immediately to the end if the production
# goes to epsilon
new_rule = DottedRule(production, 1, first)
else:
new_rule = DottedRule(production, 0, first)
if new_rule not in closure:
todo.add(new_rule)
return frozenset(closure) |
def closures(self):
"""Computes all LR(1) closure sets for the grammar."""
initial = self.initial_closure()
closures = collections.OrderedDict()
goto = collections.defaultdict(dict)
todo = set([initial])
while todo:
closure = todo.pop()
closures[closure] = closure
symbols = {rule.rhs[rule.pos] for rule in closure
if not rule.at_end}
for symbol in symbols:
next_closure = self.goto(closure, symbol)
if next_closure in closures or next_closure in todo:
next_closure = (closures.get(next_closure)
or todo.get(next_closure))
else:
closures[next_closure] = next_closure
todo.add(next_closure)
goto[closure][symbol] = next_closure
return initial, closures, goto |
def init_app(self, app):
"""Initializes Journey extension
:param app: App passed from constructor or directly to init_app
:raises:
- NoBundlesAttached if no bundles has been attached attached
"""
if len(self._attached_bundles) == 0:
raise NoBundlesAttached("At least one bundle must be attached before initializing Journey")
for bundle in self._attached_bundles:
processed_bundle = {
'path': bundle.path,
'description': bundle.description,
'blueprints': []
}
for (bp, description) in bundle.blueprints:
# Register the BP
blueprint = self._register_blueprint(app, bp, bundle.path,
self.get_bp_path(bp), description)
# Finally, attach the blueprints to its parent
processed_bundle['blueprints'].append(blueprint)
self._registered_bundles.append(processed_bundle) |
def routes_simple(self):
"""Returns simple info about registered blueprints
:return: Tuple containing endpoint, path and allowed methods for each route
"""
routes = []
for bundle in self._registered_bundles:
bundle_path = bundle['path']
for blueprint in bundle['blueprints']:
bp_path = blueprint['path']
for child in blueprint['routes']:
routes.append(
(
child['endpoint'],
bundle_path + bp_path + child['path'],
child['methods']
)
)
return routes |
def _bundle_exists(self, path):
"""Checks if a bundle exists at the provided path
:param path: Bundle path
:return: bool
"""
for attached_bundle in self._attached_bundles:
if path == attached_bundle.path:
return True
return False |
def attach_bundle(self, bundle):
"""Attaches a bundle object
:param bundle: :class:`flask_journey.BlueprintBundle` object
:raises:
- IncompatibleBundle if the bundle is not of type `BlueprintBundle`
- ConflictingPath if a bundle already exists at bundle.path
- MissingBlueprints if the bundle doesn't contain any blueprints
"""
if not isinstance(bundle, BlueprintBundle):
raise IncompatibleBundle('BlueprintBundle object passed to attach_bundle must be of type {0}'
.format(BlueprintBundle))
elif len(bundle.blueprints) == 0:
raise MissingBlueprints("Bundles must contain at least one flask.Blueprint")
elif self._bundle_exists(bundle.path):
raise ConflictingPath("Duplicate bundle path {0}".format(bundle.path))
elif self._journey_path == bundle.path == '/':
raise ConflictingPath("Bundle path and Journey path cannot both be {0}".format(bundle.path))
self._attached_bundles.append(bundle) |
def _register_blueprint(self, app, bp, bundle_path, child_path, description):
"""Register and return info about the registered blueprint
:param bp: :class:`flask.Blueprint` object
:param bundle_path: the URL prefix of the bundle
:param child_path: blueprint relative to the bundle path
:return: Dict with info about the blueprint
"""
base_path = sanitize_path(self._journey_path + bundle_path + child_path)
app.register_blueprint(bp, url_prefix=base_path)
return {
'name': bp.name,
'path': child_path,
'import_name': bp.import_name,
'description': description,
'routes': self.get_blueprint_routes(app, base_path)
} |
def get_blueprint_routes(app, base_path):
"""Returns detailed information about registered blueprint routes matching the `BlueprintBundle` path
:param app: App instance to obtain rules from
:param base_path: Base path to return detailed route info for
:return: List of route detail dicts
"""
routes = []
for child in app.url_map.iter_rules():
if child.rule.startswith(base_path):
relative_path = child.rule[len(base_path):]
routes.append({
'path': relative_path,
'endpoint': child.endpoint,
'methods': list(child.methods)
})
return routes |
def compute_precedence(terminals, productions, precedence_levels):
"""Computes the precedence of terminal and production.
The precedence of a terminal is it's level in the PRECEDENCE tuple. For
a production, the precedence is the right-most terminal (if it exists).
The default precedence is DEFAULT_PREC - (LEFT, 0).
Returns:
precedence - dict[terminal | production] = (assoc, level)
"""
precedence = collections.OrderedDict()
for terminal in terminals:
precedence[terminal] = DEFAULT_PREC
level_precs = range(len(precedence_levels), 0, -1)
for i, level in zip(level_precs, precedence_levels):
assoc = level[0]
for symbol in level[1:]:
precedence[symbol] = (assoc, i)
for production, prec_symbol in productions:
if prec_symbol is None:
prod_terminals = [symbol for symbol in production.rhs
if symbol in terminals] or [None]
precedence[production] = precedence.get(prod_terminals[-1],
DEFAULT_PREC)
else:
precedence[production] = precedence.get(prec_symbol,
DEFAULT_PREC)
return precedence |
def make_tables(grammar, precedence):
"""Generates the ACTION and GOTO tables for the grammar.
Returns:
action - dict[state][lookahead] = (action, ...)
goto - dict[state][just_reduced] = new_state
"""
ACTION = {}
GOTO = {}
labels = {}
def get_label(closure):
if closure not in labels:
labels[closure] = len(labels)
return labels[closure]
def resolve_shift_reduce(lookahead, s_action, r_action):
s_assoc, s_level = precedence[lookahead]
r_assoc, r_level = precedence[r_action[1]]
if s_level < r_level:
return r_action
elif s_level == r_level and r_assoc == LEFT:
return r_action
else:
return s_action
initial, closures, goto = grammar.closures()
for closure in closures:
label = get_label(closure)
for rule in closure:
new_action, lookahead = None, rule.lookahead
if not rule.at_end:
symbol = rule.rhs[rule.pos]
is_terminal = symbol in grammar.terminals
has_goto = symbol in goto[closure]
if is_terminal and has_goto:
next_state = get_label(goto[closure][symbol])
new_action, lookahead = ('shift', next_state), symbol
elif rule.production == grammar.start and rule.at_end:
new_action = ('accept',)
elif rule.at_end:
new_action = ('reduce', rule.production)
if new_action is None:
continue
prev_action = ACTION.get((label, lookahead))
if prev_action is None or prev_action == new_action:
ACTION[label, lookahead] = new_action
else:
types = (prev_action[0], new_action[0])
if types == ('shift', 'reduce'):
chosen = resolve_shift_reduce(lookahead,
prev_action,
new_action)
elif types == ('reduce', 'shift'):
chosen = resolve_shift_reduce(lookahead,
new_action,
prev_action)
else:
raise TableConflictError(prev_action, new_action)
ACTION[label, lookahead] = chosen
for symbol in grammar.nonterminals:
if symbol in goto[closure]:
GOTO[label, symbol] = get_label(goto[closure][symbol])
return get_label(initial), ACTION, GOTO |
def KB_AgentProgram(KB):
"""A generic logical knowledge-based agent program. [Fig. 7.1]"""
steps = itertools.count()
def program(percept):
t = steps.next()
KB.tell(make_percept_sentence(percept, t))
action = KB.ask(make_action_query(t))
KB.tell(make_action_sentence(action, t))
return action
def make_percept_sentence(self, percept, t):
return Expr("Percept")(percept, t)
def make_action_query(self, t):
return expr("ShouldDo(action, %d)" % t)
def make_action_sentence(self, action, t):
return Expr("Did")(action[expr('action')], t)
return program |
def expr(s):
"""Create an Expr representing a logic expression by parsing the input
string. Symbols and numbers are automatically converted to Exprs.
In addition you can use alternative spellings of these operators:
'x ==> y' parses as (x >> y) # Implication
'x <== y' parses as (x << y) # Reverse implication
'x <=> y' parses as (x % y) # Logical equivalence
'x =/= y' parses as (x ^ y) # Logical disequality (xor)
But BE CAREFUL; precedence of implication is wrong. expr('P & Q ==> R & S')
is ((P & (Q >> R)) & S); so you must use expr('(P & Q) ==> (R & S)').
>>> expr('P <=> Q(1)')
(P <=> Q(1))
>>> expr('P & Q | ~R(x, F(x))')
((P & Q) | ~R(x, F(x)))
"""
if isinstance(s, Expr): return s
if isnumber(s): return Expr(s)
## Replace the alternative spellings of operators with canonical spellings
s = s.replace('==>', '>>').replace('<==', '<<')
s = s.replace('<=>', '%').replace('=/=', '^')
## Replace a symbol or number, such as 'P' with 'Expr("P")'
s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr("\1")', s)
## Now eval the string. (A security hole; do not use with an adversary.)
return eval(s, {'Expr':Expr}) |
def variables(s):
"""Return a set of the variables in expression s.
>>> ppset(variables(F(x, A, y)))
set([x, y])
>>> ppset(variables(F(G(x), z)))
set([x, z])
>>> ppset(variables(expr('F(x, x) & G(x, y) & H(y, z) & R(A, z, z)')))
set([x, y, z])
"""
result = set([])
def walk(s):
if is_variable(s):
result.add(s)
else:
for arg in s.args:
walk(arg)
walk(s)
return result |
def is_definite_clause(s):
"""returns True for exprs s of the form A & B & ... & C ==> D,
where all literals are positive. In clause form, this is
~A | ~B | ... | ~C | D, where exactly one clause is positive.
>>> is_definite_clause(expr('Farmer(Mac)'))
True
>>> is_definite_clause(expr('~Farmer(Mac)'))
False
>>> is_definite_clause(expr('(Farmer(f) & Rabbit(r)) ==> Hates(f, r)'))
True
>>> is_definite_clause(expr('(Farmer(f) & ~Rabbit(r)) ==> Hates(f, r)'))
False
>>> is_definite_clause(expr('(Farmer(f) | Rabbit(r)) ==> Hates(f, r)'))
False
"""
if is_symbol(s.op):
return True
elif s.op == '>>':
antecedent, consequent = s.args
return (is_symbol(consequent.op)
and every(lambda arg: is_symbol(arg.op), conjuncts(antecedent)))
else:
return False |
def parse_definite_clause(s):
"Return the antecedents and the consequent of a definite clause."
assert is_definite_clause(s)
if is_symbol(s.op):
return [], s
else:
antecedent, consequent = s.args
return conjuncts(antecedent), consequent |
def tt_entails(kb, alpha):
"""Does kb entail the sentence alpha? Use truth tables. For propositional
kb's and sentences. [Fig. 7.10]
>>> tt_entails(expr('P & Q'), expr('Q'))
True
"""
assert not variables(alpha)
return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {}) |
def tt_check_all(kb, alpha, symbols, model):
"Auxiliary routine to implement tt_entails."
if not symbols:
if pl_true(kb, model):
result = pl_true(alpha, model)
assert result in (True, False)
return result
else:
return True
else:
P, rest = symbols[0], symbols[1:]
return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and
tt_check_all(kb, alpha, rest, extend(model, P, False))) |
def prop_symbols(x):
"Return a list of all propositional symbols in x."
if not isinstance(x, Expr):
return []
elif is_prop_symbol(x.op):
return [x]
else:
return list(set(symbol for arg in x.args
for symbol in prop_symbols(arg))) |
def pl_true(exp, model={}):
"""Return True if the propositional logic expression is true in the model,
and False if it is false. If the model does not specify the value for
every proposition, this may return None to indicate 'not obvious';
this may happen even when the expression is tautological."""
op, args = exp.op, exp.args
if exp == TRUE:
return True
elif exp == FALSE:
return False
elif is_prop_symbol(op):
return model.get(exp)
elif op == '~':
p = pl_true(args[0], model)
if p is None: return None
else: return not p
elif op == '|':
result = False
for arg in args:
p = pl_true(arg, model)
if p is True: return True
if p is None: result = None
return result
elif op == '&':
result = True
for arg in args:
p = pl_true(arg, model)
if p is False: return False
if p is None: result = None
return result
p, q = args
if op == '>>':
return pl_true(~p | q, model)
elif op == '<<':
return pl_true(p | ~q, model)
pt = pl_true(p, model)
if pt is None: return None
qt = pl_true(q, model)
if qt is None: return None
if op == '<=>':
return pt == qt
elif op == '^':
return pt != qt
else:
raise ValueError, "illegal operator in logic expression" + str(exp) |
def to_cnf(s):
"""Convert a propositional logical sentence s to conjunctive normal form.
That is, to the form ((A | ~B | ...) & (B | C | ...) & ...) [p. 253]
>>> to_cnf("~(B|C)")
(~B & ~C)
>>> to_cnf("B <=> (P1|P2)")
((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))
>>> to_cnf("a | (b & c) | d")
((b | a | d) & (c | a | d))
>>> to_cnf("A & (B | (D & E))")
(A & (D | B) & (E | B))
>>> to_cnf("A | (B | (C | (D & E)))")
((D | A | B | C) & (E | A | B | C))
"""
if isinstance(s, str): s = expr(s)
s = eliminate_implications(s) # Steps 1, 2 from p. 253
s = move_not_inwards(s) # Step 3
return distribute_and_over_or(s) |
def eliminate_implications(s):
"""Change >>, <<, and <=> into &, |, and ~. That is, return an Expr
that is equivalent to s, but has only &, |, and ~ as logical operators.
>>> eliminate_implications(A >> (~B << C))
((~B | ~C) | ~A)
>>> eliminate_implications(A ^ B)
((A & ~B) | (~A & B))
"""
if not s.args or is_symbol(s.op): return s ## (Atoms are unchanged.)
args = map(eliminate_implications, s.args)
a, b = args[0], args[-1]
if s.op == '>>':
return (b | ~a)
elif s.op == '<<':
return (a | ~b)
elif s.op == '<=>':
return (a | ~b) & (b | ~a)
elif s.op == '^':
assert len(args) == 2 ## TODO: relax this restriction
return (a & ~b) | (~a & b)
else:
assert s.op in ('&', '|', '~')
return Expr(s.op, *args) |
def move_not_inwards(s):
"""Rewrite sentence s by moving negation sign inward.
>>> move_not_inwards(~(A | B))
(~A & ~B)
>>> move_not_inwards(~(A & B))
(~A | ~B)
>>> move_not_inwards(~(~(A | ~B) | ~~C))
((A | ~B) & ~C)
"""
if s.op == '~':
NOT = lambda b: move_not_inwards(~b)
a = s.args[0]
if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A
if a.op =='&': return associate('|', map(NOT, a.args))
if a.op =='|': return associate('&', map(NOT, a.args))
return s
elif is_symbol(s.op) or not s.args:
return s
else:
return Expr(s.op, *map(move_not_inwards, s.args)) |
def distribute_and_over_or(s):
"""Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
>>> distribute_and_over_or((A & B) | C)
((A | C) & (B | C))
"""
if s.op == '|':
s = associate('|', s.args)
if s.op != '|':
return distribute_and_over_or(s)
if len(s.args) == 0:
return FALSE
if len(s.args) == 1:
return distribute_and_over_or(s.args[0])
conj = find_if((lambda d: d.op == '&'), s.args)
if not conj:
return s
others = [a for a in s.args if a is not conj]
rest = associate('|', others)
return associate('&', [distribute_and_over_or(c|rest)
for c in conj.args])
elif s.op == '&':
return associate('&', map(distribute_and_over_or, s.args))
else:
return s |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.