sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def _normalize(mat: np.ndarray):
"""rescales a numpy array, so that min is 0 and max is 255"""
return ((mat - mat.min()) * (255 / mat.max())).astype(np.uint8) | rescales a numpy array, so that min is 0 and max is 255 | entailment |
def to_24bit_gray(mat: np.ndarray):
"""returns a matrix that contains RGB channels, and colors scaled
from 0 to 255"""
return np.repeat(np.expand_dims(_normalize(mat), axis=2), 3, axis=2) | returns a matrix that contains RGB channels, and colors scaled
from 0 to 255 | entailment |
def apply_color_map(name: str, mat: np.ndarray = None):
"""returns an RGB matrix scaled by a matplotlib color map"""
def apply_map(mat):
return (cm.get_cmap(name)(_normalize(mat))[:, :, :3] * 255).astype(np.uint8)
return apply_map if mat is None else apply_map(mat) | returns an RGB matrix scaled by a matplotlib color map | entailment |
def mat_to_surface(mat: np.ndarray, transformer=to_24bit_gray):
"""Can be used to create a pygame.Surface from a 2d numpy array.
By default a grey image with scaled colors is returned, but using the
transformer argument any transformation can be used.
:param mat: the matrix to create the surface of.
:type mat: np.ndarray
:param transformer: function that transforms the matrix to a valid color
matrix, i.e. it must have 3dimension, were the 3rd dimension are the color
channels. For each channel a value between 0 and 255 is allowed
:type transformer: Callable[np.ndarray[np.ndarray]]"""
return pygame.pixelcopy.make_surface(transformer(mat.transpose())
if transformer is not None else mat.transpose()) | Can be used to create a pygame.Surface from a 2d numpy array.
By default a grey image with scaled colors is returned, but using the
transformer argument any transformation can be used.
:param mat: the matrix to create the surface of.
:type mat: np.ndarray
:param transformer: function that transforms the matrix to a valid color
matrix, i.e. it must have 3dimension, were the 3rd dimension are the color
channels. For each channel a value between 0 and 255 is allowed
:type transformer: Callable[np.ndarray[np.ndarray]] | entailment |
def random_id(length=16, charset=alphanum_chars, first_charset=alpha_chars, sep='', group=0):
"""Creates a random id with the given length and charset.
## Parameters
* length the number of characters in the id
* charset what character set to use (a list of characters)
* first_charset what character set for the first character
* sep='' what character to insert between groups
* group=0 how long the groups are (default 0 means no groups)
"""
t = []
first_chars = list(set(charset).intersection(first_charset))
if len(first_chars) == 0:
first_chars = charset
t.append(first_chars[random.randrange(len(first_chars))])
for i in range(len(t), length):
if (group > 0) and (i % group == 0) and (i < length):
t.append(sep)
t.append(charset[random.randrange(len(charset))])
return ''.join(t) | Creates a random id with the given length and charset.
## Parameters
* length the number of characters in the id
* charset what character set to use (a list of characters)
* first_charset what character set for the first character
* sep='' what character to insert between groups
* group=0 how long the groups are (default 0 means no groups) | entailment |
def merge_dict(data, *args):
"""Merge any number of dictionaries
"""
results = {}
for current in (data,) + args:
results.update(current)
return results | Merge any number of dictionaries | entailment |
def make_url(url, *paths):
"""Joins individual URL strings together, and returns a single string.
"""
for path in paths:
url = re.sub(r'/?$', re.sub(r'^/?', '/', path), url)
return url | Joins individual URL strings together, and returns a single string. | entailment |
def aggregate_result(self, return_code, output, service_description='', specific_servers=None):
'''
aggregate result
'''
if specific_servers == None:
specific_servers = self.servers
else:
specific_servers = set(self.servers).intersection(specific_servers)
for server in specific_servers:
if not self.servers[server]['send_errors_only'] or return_code > 0:
self.servers[server]['results'].append({'return_code': return_code,
'output': output,
'service_description': service_description,
'return_status': STATUSES[return_code][0],
'custom_fqdn': self.servers[server]['custom_fqdn']})
LOG.info("[email][%s][%s]: Aggregate result: %r", service_description, server, self.servers[server]['results'][-1]) | aggregate result | entailment |
def send_results(self):
'''
send results
'''
for server in self.servers:
if self.servers[server]['results']:
if len(self.servers[server]['results']) == 1:
msg = MIMEText('')
msg['Subject'] = '[%(custom_fqdn)s] [%(service_description)s] %(return_status)s: %(output)s' % self.servers[server]['results'][0]
else:
txt = ''
summary = [0, 0, 0, 0]
for results in self.servers[server]['results']:
txt += '[%(service_description)s] %(return_status)s: %(output)s\n' % results
summary[results['return_code']] += 1
msg = MIMEText(txt)
subject = '[%(custom_fqdn)s]' % self.servers[server]['results'][0]
for i, status in enumerate(STATUSES):
subject += ' %s:%s' % (status[0], summary[i])
msg['Subject'] = subject
msg['From'] = self.servers[server]['from']
msg['To'] = ', '.join(self.servers[server]['to'])
if self.servers[server]['tls']:
smtp_server = smtplib.SMTP_SSL(self.servers[server]['host'], self.servers[server]['port'])
else:
smtp_server = smtplib.SMTP(self.servers[server]['host'], self.servers[server]['port'])
if self.servers[server]['login'] and len(self.servers[server]['login']) > 0:
smtp_server.login(self.servers[server]['login'], self.servers[server]['password'])
smtp_server.sendmail(self.servers[server]['from'], self.servers[server]['to'], msg.as_string())
smtp_server.quit()
LOG.info("[email][%s]: e-mail sent from: %s to: %s", server, self.servers[server]['from'], self.servers[server]['to']) | send results | entailment |
def main():
"""MAIN"""
config = {
"api": {
"services": [
{
"name": "my_api",
"testkey": "testval",
},
],
"calls": {
"hello_world": {
"delay": 5,
"priority": 1,
"arguments": None,
},
"marco": {
"delay": 1,
"priority": 1,
},
"pollo": {
"delay": 1,
"priority": 1,
},
}
}
}
app = AppBuilder([MyAPI], Strategy(Print()), AppConf(config))
app.run() | MAIN | entailment |
def make_request(self, data):
"""Parse the outgoing schema"""
sch = MockItemSchema()
return Request(**{
"callname": self.context.get("callname"),
"payload": sch.dump(data),
}) | Parse the outgoing schema | entailment |
def populate_data(self, data):
"""Parse the outgoing schema"""
sch = MockItemSchema()
return Result(**{
"callname": self.context.get("callname"),
"result": sch.dump(data),
}) | Parse the outgoing schema | entailment |
def key_press(keys):
"""returns a handler that can be used with EventListener.listen()
and returns when a key in keys is pressed"""
return lambda e: e.key if e.type == pygame.KEYDOWN \
and e.key in keys else EventConsumerInfo.DONT_CARE | returns a handler that can be used with EventListener.listen()
and returns when a key in keys is pressed | entailment |
def unicode_char(ignored_chars=None):
"""returns a handler that listens for unicode characters"""
return lambda e: e.unicode if e.type == pygame.KEYDOWN \
and ((ignored_chars is None)
or (e.unicode not in ignored_chars))\
else EventConsumerInfo.DONT_CARE | returns a handler that listens for unicode characters | entailment |
def mouse_area(self, handler, group=0, ident=None):
"""Adds a new MouseProxy for the given group to the
EventListener.mouse_proxies dict if it is not in there yet, and returns
the (new) MouseProxy. In listen() all entries in the current group of
mouse_proxies are used."""
key = ident or id(handler)
if key not in self.mouse_proxies[group]:
self.mouse_proxies[group][key] = MouseProxy(handler, ident)
return self.mouse_proxies[group][key] | Adds a new MouseProxy for the given group to the
EventListener.mouse_proxies dict if it is not in there yet, and returns
the (new) MouseProxy. In listen() all entries in the current group of
mouse_proxies are used. | entailment |
def listen(self, *temporary_handlers):
"""When listen() is called all queued pygame.Events will be passed to all
registered listeners. There are two ways to register a listener:
1. as a permanent listener, that is always executed for every event. These
are registered by passing the handler-functions during construction
2. as a temporary listener, that will only be executed during the current
call to listen(). These are registered by passing the handler functions
as arguments to listen()
When a handler is called it can provoke three different reactions through
its return value.
1. It can return EventConsumerInfo.DONT_CARE in which case the EventListener
will pass the event to the next handler in line, or go to the next event,
if the last handler was called.
2. It can return EventConsumerInfo.CONSUMED in which case the event will not
be passed to following handlers, and the next event in line will be
processed.
3. It can return anything else (including None, which will be returned if no
return value is specified) in this case the listen()-method will return
the result of the handler.
Therefore all permanent handlers should usually return
EventConsumerInfo.DONT_CARE
"""
funcs = tuple(itt.chain(self.permanent_handlers,
(proxy.listener for proxy in
self.mouse_proxies[self.proxy_group].values()),
temporary_handlers))
for event in self._get_q():
for func in funcs:
ret = func(event)
if ret == EventConsumerInfo.CONSUMED:
break
if ret == EventConsumerInfo.DONT_CARE:
continue
else:
return ret | When listen() is called all queued pygame.Events will be passed to all
registered listeners. There are two ways to register a listener:
1. as a permanent listener, that is always executed for every event. These
are registered by passing the handler-functions during construction
2. as a temporary listener, that will only be executed during the current
call to listen(). These are registered by passing the handler functions
as arguments to listen()
When a handler is called it can provoke three different reactions through
its return value.
1. It can return EventConsumerInfo.DONT_CARE in which case the EventListener
will pass the event to the next handler in line, or go to the next event,
if the last handler was called.
2. It can return EventConsumerInfo.CONSUMED in which case the event will not
be passed to following handlers, and the next event in line will be
processed.
3. It can return anything else (including None, which will be returned if no
return value is specified) in this case the listen()-method will return
the result of the handler.
Therefore all permanent handlers should usually return
EventConsumerInfo.DONT_CARE | entailment |
def listen_until_return(self, *temporary_handlers, timeout=0):
"""Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None."""
start = time.time()
while timeout == 0 or time.time() - start < timeout:
res = self.listen(*temporary_handlers)
if res is not None:
return res | Calls listen repeatedly until listen returns something else than None.
Then returns listen's result. If timeout is not zero listen_until_return
stops after timeout seconds and returns None. | entailment |
def wait_for_n_keypresses(self, key, n=1):
"""Waits till one key was pressed n times.
:param key: the key to be pressed as defined by pygame. E.g.
pygame.K_LEFT for the left arrow key
:type key: int
:param n: number of repetitions till the function returns
:type n: int
"""
my_const = "key_consumed"
counter = 0
def keypress_listener(e): return my_const \
if e.type == pygame.KEYDOWN and e.key == key \
else EventConsumerInfo.DONT_CARE
while counter < n:
if self.listen(keypress_listener) == my_const:
counter += 1 | Waits till one key was pressed n times.
:param key: the key to be pressed as defined by pygame. E.g.
pygame.K_LEFT for the left arrow key
:type key: int
:param n: number of repetitions till the function returns
:type n: int | entailment |
def wait_for_keys(self, *keys, timeout=0):
"""Waits until one of the specified keys was pressed, and returns
which key was pressed.
:param keys: iterable of integers of pygame-keycodes, or simply
multiple keys passed via multiple arguments
:type keys: iterable
:param timeout: number of seconds to wait till the function returns
:type timeout: float
:returns: The keycode of the pressed key, or None in case of timeout
:rtype: int
"""
if len(keys) == 1 and _is_iterable(keys[0]):
keys = keys[0]
return self.listen_until_return(Handler.key_press(keys), timeout=timeout) | Waits until one of the specified keys was pressed, and returns
which key was pressed.
:param keys: iterable of integers of pygame-keycodes, or simply
multiple keys passed via multiple arguments
:type keys: iterable
:param timeout: number of seconds to wait till the function returns
:type timeout: float
:returns: The keycode of the pressed key, or None in case of timeout
:rtype: int | entailment |
def wait_for_keys_modified(self, *keys, modifiers_to_check=_mod_keys,
timeout=0):
"""The same as wait_for_keys, but returns a frozen_set which contains
the pressed key, and the modifier keys.
:param modifiers_to_check: iterable of modifiers for which the function
will check whether they are pressed
:type modifiers: Iterable[int]"""
set_mods = pygame.key.get_mods()
return frozenset.union(
frozenset([self.wait_for_keys(*keys, timeout=timeout)]),
EventListener._contained_modifiers(set_mods, modifiers_to_check)) | The same as wait_for_keys, but returns a frozen_set which contains
the pressed key, and the modifier keys.
:param modifiers_to_check: iterable of modifiers for which the function
will check whether they are pressed
:type modifiers: Iterable[int] | entailment |
def wait_for_unicode_char(self, ignored_chars=None, timeout=0):
"""Returns a str that contains the single character that was pressed.
This already respects modifier keys and keyboard layouts. If timeout is
not none and no key is pressed within the specified timeout, None is
returned. If a key is ingnored_chars it will be ignored. As argument for
irgnored_chars any object that has a __contains__ method can be used,
e.g. a string, a set, a list, etc"""
return self.listen_until_return(Handler.unicode_char(ignored_chars),
timeout=timeout) | Returns a str that contains the single character that was pressed.
This already respects modifier keys and keyboard layouts. If timeout is
not none and no key is pressed within the specified timeout, None is
returned. If a key is ingnored_chars it will be ignored. As argument for
irgnored_chars any object that has a __contains__ method can be used,
e.g. a string, a set, a list, etc | entailment |
def _find_all_first_files(self, item):
"""
Does not support the full range of ways rar can split
as it'd require reading the file to ensure you are using the
correct way.
"""
for listed_item in item.list():
new_style = re.findall(r'(?i)\.part(\d+)\.rar^', listed_item.id)
if new_style:
if int(new_style[0]) == 1:
yield 'new', listed_item
elif listed_item.id.lower().endswith('.rar'):
yield 'old', listed_item | Does not support the full range of ways rar can split
as it'd require reading the file to ensure you are using the
correct way. | entailment |
def find_datafile(name, search_path, codecs=get_codecs()):
"""
find all matching data files in search_path
search_path: path of directories to load from
codecs: allow to override from list of installed
returns array of tuples (codec_object, filename)
"""
return munge.find_datafile(name, search_path, codecs) | find all matching data files in search_path
search_path: path of directories to load from
codecs: allow to override from list of installed
returns array of tuples (codec_object, filename) | entailment |
def load_datafile(name, search_path, codecs=get_codecs(), **kwargs):
"""
find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing
"""
return munge.load_datafile(name, search_path, codecs, **kwargs) | find datafile and load them from codec
TODO only does the first one
kwargs:
default = if passed will return that on failure instead of throwing | entailment |
def cred_init(
self,
*,
secrets_dir: str,
log: Logger,
bot_name: str,
) -> None:
"""
Initialize what requires credentials/secret files.
:param secrets_dir: dir to expect credentials in and store logs/history in.
:param log: logger to use for log output.
:param bot_name: name of this bot,
used for various kinds of labelling.
:returns: none.
"""
super().__init__(secrets_dir=secrets_dir, log=log, bot_name=bot_name)
self.ldebug("Retrieving CONSUMER_KEY...")
with open(path.join(self.secrets_dir, "CONSUMER_KEY")) as f:
CONSUMER_KEY = f.read().strip()
self.ldebug("Retrieving CONSUMER_SECRET...")
with open(path.join(self.secrets_dir, "CONSUMER_SECRET")) as f:
CONSUMER_SECRET = f.read().strip()
self.ldebug("Retrieving ACCESS_TOKEN...")
with open(path.join(self.secrets_dir, "ACCESS_TOKEN")) as f:
ACCESS_TOKEN = f.read().strip()
self.ldebug("Retrieving ACCESS_SECRET...")
with open(path.join(self.secrets_dir, "ACCESS_SECRET")) as f:
ACCESS_SECRET = f.read().strip()
self.ldebug("Looking for OWNER_HANDLE...")
owner_handle_path = path.join(self.secrets_dir, "OWNER_HANDLE")
if path.isfile(owner_handle_path):
with open(owner_handle_path) as f:
self.owner_handle = f.read().strip()
else:
self.ldebug("Couldn't find OWNER_HANDLE, unable to DM...")
self.owner_handle = ""
self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
self.api = tweepy.API(self.auth) | Initialize what requires credentials/secret files.
:param secrets_dir: dir to expect credentials in and store logs/history in.
:param log: logger to use for log output.
:param bot_name: name of this bot,
used for various kinds of labelling.
:returns: none. | entailment |
def send(
self,
*,
text: str,
) -> List[OutputRecord]:
"""
Send birdsite message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
try:
status = self.api.update_status(text)
self.ldebug(f"Status object from tweet: {status}.")
return [TweetRecord(record_data={"tweet_id": status._json["id"], "text": text})]
except tweepy.TweepError as e:
return [self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"sending post {text} without media:\n{e}\n"),
error=e)] | Send birdsite message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error. | entailment |
def send_with_media(
self,
*,
text: str,
files: List[str],
captions: List[str]=[]
) -> List[OutputRecord]:
"""
Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
# upload media
media_ids = None
try:
self.ldebug(f"Uploading files {files}.")
media_ids = [self.api.media_upload(file).media_id_string for file in files]
except tweepy.TweepError as e:
return [self.handle_error(
message=f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n",
error=e)]
# apply captions, if present
self._handle_caption_upload(media_ids=media_ids, captions=captions)
# send status
try:
status = self.api.update_status(status=text, media_ids=media_ids)
self.ldebug(f"Status object from tweet: {status}.")
return [TweetRecord(record_data={
"tweet_id": status._json["id"],
"text": text,
"media_ids": media_ids,
"captions": captions,
"files": files
})]
except tweepy.TweepError as e:
return [self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"sending post {text} with media ids {media_ids}:\n{e}\n"),
error=e)] | Upload media to birdsite,
and send status and media,
and captions if present.
:param text: tweet text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error. | entailment |
def perform_batch_reply(
self,
*,
callback: Callable[..., str],
lookback_limit: int,
target_handle: str,
) -> List[OutputRecord]:
"""
Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
self.log.info(f"Attempting to batch reply to birdsite user {target_handle}")
if "@" in target_handle:
base_target_handle = target_handle[1:]
else:
base_target_handle = target_handle
records: List[OutputRecord] = []
statuses = self.api.user_timeline(screen_name=base_target_handle, count=lookback_limit)
self.log.debug(f"Retrieved {len(statuses)} statuses.")
for i, status in enumerate(statuses):
self.log.debug(f"Processing status {i} of {len(statuses)}")
status_id = status.id
# find possible replies we've made.
# the 10 * lookback_limit is a guess,
# might not be enough and I'm not sure we can guarantee it is.
our_statuses = self.api.user_timeline(since_id=status_id,
count=lookback_limit * 10)
in_reply_to_ids = list(map(lambda x: x.in_reply_to_status_id, our_statuses))
if status_id not in in_reply_to_ids:
# the twitter API and tweepy will attempt to give us the truncated text of the
# message if we don't do this roundabout thing.
encoded_status_text = self.api.get_status(status_id,
tweet_mode="extended")._json["full_text"]
status_text = html.unescape(encoded_status_text)
message = callback(message_id=status_id, message=status_text, extra_keys={})
full_message = f"@{base_target_handle} {message}"
self.log.info(f"Trying to reply with {message} to status {status_id} "
f"from {target_handle}.")
try:
new_status = self.api.update_status(status=full_message,
in_reply_to_status_id=status_id)
records.append(TweetRecord(record_data={
"tweet_id": new_status.id,
"in_reply_to": f"@{base_target_handle}",
"in_reply_to_id": status_id,
"text": full_message,
}))
except tweepy.TweepError as e:
records.append(self.handle_error(
message=(f"Bot {self.bot_name} encountered an error when "
f"trying to reply to {status_id} with {message}:\n{e}\n"),
error=e))
else:
self.log.info(f"Not replying to status {status_id} from {target_handle} "
f"- we already replied.")
return records | Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error. | entailment |
def send_dm_sos(self, message: str) -> None:
"""
Send DM to owner if something happens.
:param message: message to send to owner.
:returns: None.
"""
if self.owner_handle:
try:
# twitter changed the DM API and tweepy (as of 2019-03-08)
# has not adapted.
# fixing with
# https://github.com/tweepy/tweepy/issues/1081#issuecomment-423486837
owner_id = self.api.get_user(screen_name=self.owner_handle).id
event = {
"event": {
"type": "message_create",
"message_create": {
"target": {
"recipient_id": f"{owner_id}",
},
"message_data": {
"text": message
}
}
}
}
self._send_direct_message_new(event)
except tweepy.TweepError as de:
self.lerror(f"Error trying to send DM about error!: {de}")
else:
self.lerror("Can't send DM SOS, no owner handle.") | Send DM to owner if something happens.
:param message: message to send to owner.
:returns: None. | entailment |
def handle_error(
self,
*,
message: str,
error: tweepy.TweepError,
) -> OutputRecord:
"""
Handle error while trying to do something.
:param message: message to send in DM regarding error.
:param e: tweepy error object.
:returns: OutputRecord containing an error.
"""
self.lerror(f"Got an error! {error}")
# Handle errors if we know how.
try:
code = error[0]["code"]
if code in self.handled_errors:
self.handled_errors[code]
else:
self.send_dm_sos(message)
except Exception:
self.send_dm_sos(message)
return TweetRecord(error=error) | Handle error while trying to do something.
:param message: message to send in DM regarding error.
:param e: tweepy error object.
:returns: OutputRecord containing an error. | entailment |
def _handle_caption_upload(
self,
*,
media_ids: List[str],
captions: Optional[List[str]],
) -> None:
"""
Handle uploading all captions.
:param media_ids: media ids of uploads to attach captions to.
:param captions: captions to be attached to those media ids.
:returns: None.
"""
if captions is None:
captions = []
if len(media_ids) > len(captions):
captions.extend([self.default_caption_message] * (len(media_ids) - len(captions)))
for i, media_id in enumerate(media_ids):
caption = captions[i]
self._upload_caption(media_id=media_id, caption=caption) | Handle uploading all captions.
:param media_ids: media ids of uploads to attach captions to.
:param captions: captions to be attached to those media ids.
:returns: None. | entailment |
def _send_direct_message_new(self, messageobject: Dict[str, Dict]) -> Any:
"""
:reference: https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-event.html
"""
headers, post_data = _buildmessageobject(messageobject)
newdm_path = "/direct_messages/events/new.json"
return tweepy.binder.bind_api(
api=self.api,
path=newdm_path,
method="POST",
require_auth=True,
)(post_data=post_data, headers=headers) | :reference: https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-event.html | entailment |
def main():
"""Takes crash data via stdin and generates a Socorro signature"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
'-v', '--verbose', help='increase output verbosity', action='store_true'
)
args = parser.parse_args()
generator = SignatureGenerator(debug=args.verbose)
crash_data = json.loads(sys.stdin.read())
ret = generator.generate(crash_data)
print(json.dumps(ret, indent=2)) | Takes crash data via stdin and generates a Socorro signature | entailment |
def add_config(self, config):
"""
Update internel configuration dict with config and recheck
"""
for attr in self.__fixed_attrs:
if attr in config:
raise Exception("cannot set '%s' outside of init", attr)
# pre checkout
stages = config.get('stages', None)
if stages:
self.stages = stages
# maybe pre checkout
# validate options
self.__dry_run = config.get('dry_run', False)
self.system = str.lower(platform.system())
self.__start = config.get('start', None)
self.__end = config.get('end', None)
self.__only = config.get('only', None)
self.__build_docs = config.get('build_docs', False)
self.__chatty = config.get('chatty', False)
self.__clean = config.get('clean', False)
self.__devel = config.get('devel', False)
self.__debug = config.get('debug', False)
self.__skip_libcheck = config.get('skip_libcheck', False)
self.__debuginfo = config.get('debuginfo', False)
self.__release = config.get('release', False)
self.__skip_unit = config.get('skip_unit', False)
self.__static = config.get('static', False)
self.__make_dash_j = int(config.get('j', 0))
self.__target_only = config.get('target_only', None)
bits = config.get('bits', None)
if bits:
self.bits = int(bits)
else:
self.bits = self.sys_bits
self.compiler = config.get('compiler', None)
self.test_config = config.get('test_config', '-')
if not self.test_config:
self.test_config = '-'
self.use_ccache = config.get('use_ccache', False)
self.tmpl_engine = config.get('tmpl_engine', 'jinja2')
self.__write_codec = config.get('write_codec', None)
self.__codec = None
# TODO move out of init
if not config.get('skip_env_check', False):
if "LD_LIBRARY_PATH" in os.environ:
raise Exception("environment variable LD_LIBRARY_PATH is set")
self.check_config() | Update internel configuration dict with config and recheck | entailment |
def check_config(self):
"""
called after config was modified to sanity check
raises on error
"""
# sanity checks - no config access past here
if not getattr(self, 'stages', None):
raise NotImplementedError("member variable 'stages' must be defined")
# start at stage
if self.__start:
self.__stage_start = self.find_stage(self.__start)
else:
self.__stage_start = 0
# end at stage
if self.__end:
self.__stage_end = self.find_stage(self.__end) + 1
self.opt_end = self.__end
else:
self.__stage_end = len(self.stages)
# only stage
if self.__only:
if self.__start or self.__end:
raise Exception(
"stage option 'only' cannot be used with start or end")
self.__stage_start = self.find_stage(self.__only)
self.__stage_end = self.__stage_start + 1
if self.__devel:
self.__devel = True
# force deploy skip
if self.__stage_end >= len(self.stages):
self.status_msg("removing deploy stage for development build")
# XXX self.__stage_end = self.__stage_end - 1
if self.stage_start >= self.stage_end:
raise Exception("start and end produce no stages")
if self.bits not in [32, 64]:
raise Exception(
"can't do a %d bit build: unknown build process" % self.bits)
if self.bits == 64 and not self.is_64b:
raise Exception(
"this machine is not 64 bit, cannot perform 64 bit build")
if self.system == 'windows':
self.compilertag = 'vc10'
elif self.system == 'linux':
self.compilertag = 'gcc44'
else:
raise RuntimeError("can't decide compilertag on " + self.system)
self.build_suffix = ''
if not self.is_unixy:
if self.__static:
runtime = 'MT'
else:
runtime = 'MD'
if self.__release:
self.configuration_name = 'Release'
else:
runtime += 'd'
self.configuration_name = 'Debug'
self.build_suffix = '-' + runtime
self.runtime = runtime
else:
self.configuration_name = 'CFNAME_INVALID_ON_LINUX'
self.runtime = 'RUNTIME_INVALID_ON_LINUX'
if self.test_config != '-':
self.test_config = os.path.abspath(self.test_config)
# split version
if self.version:
ver = self.version.split('.')
self.version_major = int(ver[0])
self.version_minor = int(ver[1])
self.version_patch = int(ver[2])
if(len(ver) == 4):
self.version_build = int(ver[3]) | called after config was modified to sanity check
raises on error | entailment |
def check_definition(self):
"""
called after Defintion was loaded to sanity check
raises on error
"""
if not self.write_codec:
self.__write_codec = self.defined.data_ext
# TODO need to add back a class scope target limited for subprojects with sub target sets
targets = self.get_defined_targets()
if self.__target_only:
if self.__target_only not in targets:
raise RuntimeError("invalid target '%s'" % self.__target_only)
self.targets = [self.__target_only]
else:
self.targets = targets | called after Defintion was loaded to sanity check
raises on error | entailment |
def find_datafile(self, name, search_path=None):
"""
find all matching data files in search_path
returns array of tuples (codec_object, filename)
"""
if not search_path:
search_path = self.define_dir
return codec.find_datafile(name, search_path) | find all matching data files in search_path
returns array of tuples (codec_object, filename) | entailment |
def load_datafile(self, name, search_path=None, **kwargs):
"""
find datafile and load them from codec
"""
if not search_path:
search_path = self.define_dir
self.debug_msg('loading datafile %s from %s' % (name, str(search_path)))
return codec.load_datafile(name, search_path, **kwargs) | find datafile and load them from codec | entailment |
def run(self):
""" run all configured stages """
self.sanity_check()
# TODO - check for devel
# if not self.version:
# raise Exception("no version")
# XXX check attr exist
if not self.release_environment:
raise Exception("no instance name")
time_start = time.time()
cwd = os.getcwd()
who = getpass.getuser()
self._make_outdirs()
append_notices = ""
if hasattr(self, 'opt_end'):
append_notices = ". shortened push, only to %s stage" % self.opt_end
if self.is_devel:
append_notices += ". devel build"
if hasattr(self, 'append_notices'):
append_notices += self.append_notices
line = "%s %s %s by %s%s" % (
sys.argv[0], self.version, self.release_environment, who, append_notices)
b = 'deploy begin %s' % line
e = 'deploy done %s' % line
if self.chatty:
self.alact(b)
ok = False
stage_passed = None
try:
for stage in self.stages[self.stage_start:self.stage_end]:
self.debug_msg("stage %s starting" % (stage,))
getattr(self, stage)()
self.chdir(cwd)
stage_passed = stage
self.debug_msg("stage %s complete" % (stage,))
ok = True
finally:
if not ok:
if self.chatty:
if not stage_passed:
self.alact(
'deploy failed %s. completed no stages' % line)
else:
self.alact('deploy failed %s. completed %s' %
(line, stage_passed))
self.status_msg('[OK]')
if self.chatty:
self.alact('%s in %0.3f sec' % (e, time.time() - time_start))
return 0 | run all configured stages | entailment |
def timestamp(dt):
"""
Return POSIX timestamp as float.
>>> timestamp(datetime.datetime.now()) > 1494638812
True
>>> timestamp(datetime.datetime.now()) % 1 > 0
True
"""
if dt.tzinfo is None:
return time.mktime((
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
-1, -1, -1)) + dt.microsecond / 1e6
else:
return (dt - _EPOCH).total_seconds() | Return POSIX timestamp as float.
>>> timestamp(datetime.datetime.now()) > 1494638812
True
>>> timestamp(datetime.datetime.now()) % 1 > 0
True | entailment |
def rate_limited(max_per_hour: int, *args: Any) -> Callable[..., Any]:
"""Rate limit a function."""
return util.rate_limited(max_per_hour, *args) | Rate limit a function. | entailment |
def _repair(record: Dict[str, Any]) -> Dict[str, Any]:
"""Repair a corrupted IterationRecord with a specific known issue."""
output_records = record.get("output_records")
if record.get("_type", None) == "IterationRecord" and output_records is not None:
birdsite_record = output_records.get("birdsite")
# check for the bug
if isinstance(birdsite_record, dict) and birdsite_record.get("_type") == "IterationRecord":
# get to the bottom of the corrupted record
failed = False
while birdsite_record.get("_type") == "IterationRecord":
sub_record = birdsite_record.get("output_records")
if sub_record is None:
failed = True
break
birdsite_record = sub_record.get("birdsite")
if birdsite_record is None:
failed = True
break
if failed:
return record
# add type
birdsite_record["_type"] = TweetRecord.__name__
# lift extra keys, just in case
if "extra_keys" in birdsite_record:
record_extra_values = record.get("extra_keys", {})
for key, value in birdsite_record["extra_keys"].items():
if key not in record_extra_values:
record_extra_values[key] = value
record["extra_keys"] = record_extra_values
del birdsite_record["extra_keys"]
output_records["birdsite"] = birdsite_record
# pull that correct record up to the top level, fixing corruption
record["output_records"] = output_records
return record | Repair a corrupted IterationRecord with a specific known issue. | entailment |
def from_dict(cls, obj_dict: Dict[str, Any]) -> "IterationRecord":
"""Get object back from dict."""
obj = cls()
for key, item in obj_dict.items():
obj.__dict__[key] = item
return obj | Get object back from dict. | entailment |
def send(
self,
*args: str,
text: str=None,
) -> IterationRecord:
"""
Post text-only to all outputs.
:param args: positional arguments.
expected: text to send as message in post.
keyword text argument is preferred over this.
:param text: text to send as message in post.
:returns: new record of iteration
"""
if text is not None:
final_text = text
else:
if len(args) == 0:
raise BotSkeletonException(("Please provide text either as a positional arg or "
"as a keyword arg (text=TEXT)"))
else:
final_text = args[0]
# TODO there could be some annotation stuff here.
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if output["active"]:
self.log.info(f"Output {key} is active, calling send on it.")
entry: Any = output["obj"]
output_result = entry.send(text=final_text)
record.output_records[key] = output_result
else:
self.log.info(f"Output {key} is inactive. Not sending.")
self.history.append(record)
self.update_history()
return record | Post text-only to all outputs.
:param args: positional arguments.
expected: text to send as message in post.
keyword text argument is preferred over this.
:param text: text to send as message in post.
:returns: new record of iteration | entailment |
def send_with_one_media(
self,
*args: str,
text: str=None,
file: str=None,
caption: str=None,
) -> IterationRecord:
"""
Post with one media item to all outputs.
Provide filename so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
file to be uploaded.
caption to be paired with file.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param file: file to be uploaded in post.
:param caption: caption to be uploaded alongside file.
:returns: new record of iteration
"""
final_text = text
if final_text is None:
if len(args) < 1:
raise TypeError(("Please provide either positional argument "
"TEXT, or keyword argument text=TEXT"))
else:
final_text = args[0]
final_file = file
if final_file is None:
if len(args) < 2:
raise TypeError(("Please provide either positional argument "
"FILE, or keyword argument file=FILE"))
else:
final_file = args[1]
# this arg is ACTUALLY optional,
# so the pattern is changed.
final_caption = caption
if final_caption is None:
if len(args) >= 3:
final_caption = args[2]
# TODO more error checking like this.
if final_caption is None or final_caption == "":
captions:List[str] = []
else:
captions = [final_caption]
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if output["active"]:
self.log.info(f"Output {key} is active, calling media send on it.")
entry: Any = output["obj"]
output_result = entry.send_with_media(text=final_text,
files=[final_file],
captions=captions)
record.output_records[key] = output_result
else:
self.log.info(f"Output {key} is inactive. Not sending with media.")
self.history.append(record)
self.update_history()
return record | Post with one media item to all outputs.
Provide filename so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
file to be uploaded.
caption to be paired with file.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param file: file to be uploaded in post.
:param caption: caption to be uploaded alongside file.
:returns: new record of iteration | entailment |
def send_with_many_media(
self,
*args: str,
text: str=None,
files: List[str]=None,
captions: List[str]=[],
) -> IterationRecord:
"""
Post with several media.
Provide filenames so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
files to be uploaded.
captions to be paired with files.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param files: files to be uploaded in post.
:param captions: captions to be uploaded alongside files.
:returns: new record of iteration
"""
if text is None:
if len(args) < 1:
raise TypeError(("Please provide either required positional argument "
"TEXT, or keyword argument text=TEXT"))
else:
final_text = args[0]
else:
final_text = text
if files is None:
if len(args) < 2:
raise TypeError(("Please provide either positional argument "
"FILES, or keyword argument files=FILES"))
else:
final_files = list(args[1:])
else:
final_files = files
# captions have never been permitted to be provided as positional args
# (kind of backed myself into that)
# so they just get defaulted and it's fine.
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if output["active"]:
self.log.info(f"Output {key} is active, calling media send on it.")
entry: Any = output["obj"]
output_result = entry.send_with_media(text=final_text,
files=final_files,
captions=captions)
record.output_records[key] = output_result
else:
self.log.info(f"Output {key} is inactive. Not sending with media.")
self.history.append(record)
self.update_history()
return record | Post with several media.
Provide filenames so outputs can handle their own uploads.
:param args: positional arguments.
expected:
text to send as message in post.
files to be uploaded.
captions to be paired with files.
keyword arguments preferred over positional ones.
:param text: text to send as message in post.
:param files: files to be uploaded in post.
:param captions: captions to be uploaded alongside files.
:returns: new record of iteration | entailment |
def perform_batch_reply(
self,
*,
callback: Callable[..., str]=None,
target_handles: Dict[str, str]=None,
lookback_limit: int=20,
per_service_lookback_limit: Dict[str, int]=None,
) -> IterationRecord:
"""
Performs batch reply on target accounts.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param targets: a dictionary of service names to target handles
(currently only one per service).
:param lookback_limit: a lookback limit of how many messages to consider (optional).
:param per_service_lookback: and a dictionary of service names to per-service
lookback limits.
takes preference over lookback_limit (optional).
:returns: new record of iteration
:raises BotSkeletonException: raises BotSkeletonException if batch reply fails or cannot be
performed
"""
if callback is None:
raise BotSkeletonException("Callback must be provided.""")
if target_handles is None:
raise BotSkeletonException("Targets must be provided.""")
if lookback_limit > self.lookback_limit:
raise BotSkeletonException(
f"Lookback_limit cannot exceed {self.lookback_limit}, " +
f"but it was {lookback_limit}"
)
# use per-service lookback dict for convenience in a moment.
# if necessary, use lookback_limit to fill it out.
lookback_dict = per_service_lookback_limit
if (lookback_dict is None):
lookback_dict = {}
record = IterationRecord(extra_keys=self.extra_keys)
for key, output in self.outputs.items():
if key not in lookback_dict:
lookback_dict[key] = lookback_limit
if target_handles.get(key, None) is None:
self.log.info(f"No target for output {key}, skipping this output.")
elif not output.get("active", False):
self.log.info(f"Output {key} is inactive. Not calling batch reply.")
elif output["active"]:
self.log.info(f"Output {key} is active, calling batch reply on it.")
entry: Any = output["obj"]
output_result = entry.perform_batch_reply(callback=callback,
target_handle=target_handles[key],
lookback_limit=lookback_dict[key],
)
record.output_records[key] = output_result
self.history.append(record)
self.update_history()
return record | Performs batch reply on target accounts.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param targets: a dictionary of service names to target handles
(currently only one per service).
:param lookback_limit: a lookback limit of how many messages to consider (optional).
:param per_service_lookback: and a dictionary of service names to per-service
lookback limits.
takes preference over lookback_limit (optional).
:returns: new record of iteration
:raises BotSkeletonException: raises BotSkeletonException if batch reply fails or cannot be
performed | entailment |
def nap(self) -> None:
"""
Go to sleep for the duration of self.delay.
:returns: None
"""
self.log.info(f"Sleeping for {self.delay} seconds.")
for _ in progress.bar(range(self.delay)):
time.sleep(1) | Go to sleep for the duration of self.delay.
:returns: None | entailment |
def store_extra_info(self, key: str, value: Any) -> None:
"""
Store some extra value in the messaging storage.
:param key: key of dictionary entry to add.
:param value: value of dictionary entry to add.
:returns: None
"""
self.extra_keys[key] = value | Store some extra value in the messaging storage.
:param key: key of dictionary entry to add.
:param value: value of dictionary entry to add.
:returns: None | entailment |
def store_extra_keys(self, d: Dict[str, Any]) -> None:
"""
Store several extra values in the messaging storage.
:param d: dictionary entry to merge with current self.extra_keys.
:returns: None
"""
new_dict = dict(self.extra_keys, **d)
self.extra_keys = new_dict.copy() | Store several extra values in the messaging storage.
:param d: dictionary entry to merge with current self.extra_keys.
:returns: None | entailment |
def update_history(self) -> None:
"""
Update messaging history on disk.
:returns: None
"""
self.log.debug(f"Saving history. History is: \n{self.history}")
jsons = []
for item in self.history:
json_item = item.__dict__
# Convert sub-entries into JSON as well.
json_item["output_records"] = self._parse_output_records(item)
jsons.append(json_item)
if not path.isfile(self.history_filename):
open(self.history_filename, "a+").close()
with open(self.history_filename, "w") as f:
json.dump(jsons, f, default=lambda x: x.__dict__.copy(), sort_keys=True, indent=4)
f.write("\n") | Update messaging history on disk.
:returns: None | entailment |
def load_history(self) -> List["IterationRecord"]:
"""
Load messaging history from disk to self.
:returns: List of iteration records comprising history.
"""
if path.isfile(self.history_filename):
with open(self.history_filename, "r") as f:
try:
dicts = json.load(f)
except json.decoder.JSONDecodeError as e:
self.log.error(f"Got error \n{e}\n decoding JSON history, overwriting it.\n"
f"Former history available in {self.history_filename}.bak")
copyfile(self.history_filename, f"{self.history_filename}.bak")
return []
history: List[IterationRecord] = []
for hdict_pre in dicts:
if "_type" in hdict_pre and hdict_pre["_type"] == IterationRecord.__name__:
# repair any corrupted entries
hdict = _repair(hdict_pre)
record = IterationRecord.from_dict(hdict)
history.append(record)
# Be sure to handle legacy tweetrecord-only histories.
# Assume anything without our new _type (which should have been there from the
# start, whoops) is a legacy history.
else:
item = IterationRecord()
# Lift extra keys up to upper record (if they exist).
extra_keys = hdict_pre.pop("extra_keys", {})
item.extra_keys = extra_keys
hdict_obj = TweetRecord.from_dict(hdict_pre)
# Lift timestamp up to upper record.
item.timestamp = hdict_obj.timestamp
item.output_records["birdsite"] = hdict_obj
history.append(item)
self.log.debug(f"Loaded history:\n {history}")
return history
else:
return [] | Load messaging history from disk to self.
:returns: List of iteration records comprising history. | entailment |
def _setup_all_outputs(self) -> None:
"""Set up all output methods. Provide them credentials and anything else they need."""
# The way this is gonna work is that we assume an output should be set up iff it has a
# credentials_ directory under our secrets dir.
for key in self.outputs.keys():
credentials_dir = path.join(self.secrets_dir, f"credentials_{key}")
# special-case birdsite for historical reasons.
if key == "birdsite" and not path.isdir(credentials_dir) \
and path.isfile(path.join(self.secrets_dir, "CONSUMER_KEY")):
credentials_dir = self.secrets_dir
if path.isdir(credentials_dir):
output_skeleton = self.outputs[key]
output_skeleton["active"] = True
obj: Any = output_skeleton["obj"]
obj.cred_init(secrets_dir=credentials_dir, log=self.log, bot_name=self.bot_name)
output_skeleton["obj"] = obj
self.outputs[key] = output_skeleton | Set up all output methods. Provide them credentials and anything else they need. | entailment |
def _parse_output_records(self, item: IterationRecord) -> Dict[str, Any]:
"""Parse output records into dicts ready for JSON."""
output_records = {}
for key, sub_item in item.output_records.items():
if isinstance(sub_item, dict) or isinstance(sub_item, list):
output_records[key] = sub_item
else:
output_records[key] = sub_item.__dict__
return output_records | Parse output records into dicts ready for JSON. | entailment |
def make_dir(fname):
"""
Create the directory of a fully qualified file name if it does not exist.
:param fname: File name
:type fname: string
Equivalent to these Bash shell commands:
.. code-block:: bash
$ fname="${HOME}/mydir/myfile.txt"
$ dir=$(dirname "${fname}")
$ mkdir -p "${dir}"
:param fname: Fully qualified file name
:type fname: string
"""
file_path, fname = os.path.split(os.path.abspath(fname))
if not os.path.exists(file_path):
os.makedirs(file_path) | Create the directory of a fully qualified file name if it does not exist.
:param fname: File name
:type fname: string
Equivalent to these Bash shell commands:
.. code-block:: bash
$ fname="${HOME}/mydir/myfile.txt"
$ dir=$(dirname "${fname}")
$ mkdir -p "${dir}"
:param fname: Fully qualified file name
:type fname: string | entailment |
def normalize_windows_fname(fname, _force=False):
r"""
Fix potential problems with a Microsoft Windows file name.
Superfluous backslashes are removed and unintended escape sequences are
converted to their equivalent (presumably correct and intended)
representation, for example :code:`r'\\\\x07pps'` is transformed to
:code:`r'\\\\\\\\apps'`. A file name is considered network shares if the
file does not include a drive letter and they start with a double backslash
(:code:`'\\\\\\\\'`)
:param fname: File name
:type fname: string
:rtype: string
"""
if (platform.system().lower() != "windows") and (not _force): # pragma: no cover
return fname
# Replace unintended escape sequences that could be in
# the file name, like "C:\appdata"
rchars = {
"\x07": r"\\a",
"\x08": r"\\b",
"\x0C": r"\\f",
"\x0A": r"\\n",
"\x0D": r"\\r",
"\x09": r"\\t",
"\x0B": r"\\v",
}
ret = ""
for char in os.path.normpath(fname):
ret = ret + rchars.get(char, char)
# Remove superfluous double backslashes
network_share = False
tmp = None
network_share = fname.startswith(r"\\")
while tmp != ret:
tmp, ret = ret, ret.replace(r"\\\\", r"\\")
ret = ret.replace(r"\\\\", r"\\")
# Put back network share if needed
if network_share:
ret = r"\\" + ret.lstrip(r"\\")
return ret | r"""
Fix potential problems with a Microsoft Windows file name.
Superfluous backslashes are removed and unintended escape sequences are
converted to their equivalent (presumably correct and intended)
representation, for example :code:`r'\\\\x07pps'` is transformed to
:code:`r'\\\\\\\\apps'`. A file name is considered network shares if the
file does not include a drive letter and they start with a double backslash
(:code:`'\\\\\\\\'`)
:param fname: File name
:type fname: string
:rtype: string | entailment |
def _homogenize_linesep(line):
"""Enforce line separators to be the right one depending on platform."""
token = str(uuid.uuid4())
line = line.replace(os.linesep, token).replace("\n", "").replace("\r", "")
return line.replace(token, os.linesep) | Enforce line separators to be the right one depending on platform. | entailment |
def _proc_token(spec, mlines):
"""Process line range tokens."""
spec = spec.strip().replace(" ", "")
regexp = re.compile(r".*[^0123456789\-,]+.*")
tokens = spec.split(",")
cond = any([not item for item in tokens])
if ("--" in spec) or ("-," in spec) or (",-" in spec) or cond or regexp.match(spec):
raise RuntimeError("Argument `lrange` is not valid")
lines = []
for token in tokens:
if token.count("-") > 1:
raise RuntimeError("Argument `lrange` is not valid")
if "-" in token:
subtokens = token.split("-")
lmin, lmax = (
int(subtokens[0]),
int(subtokens[1]) if subtokens[1] else mlines,
)
for num in range(lmin, lmax + 1):
lines.append(num)
else:
lines.append(int(token))
if lines != sorted(lines):
raise RuntimeError("Argument `lrange` is not valid")
return lines | Process line range tokens. | entailment |
def incfile(fname, fpointer, lrange=None, sdir=None):
r"""
Return a Python source file formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param fname: File name, relative to environment variable
:bash:`PKG_DOC_DIR`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
other functions can be used for debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://www.sphinx-doc.org/en/master/usage
/restructuredtext/directives.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`PKG_DOC_DIR`
environment variable is used if it is defined, otherwise
the directory where the module is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output'
"""
# pylint: disable=R0914
# Read file
file_dir = (
sdir
if sdir
else os.environ.get("PKG_DOC_DIR", os.path.abspath(os.path.dirname(__file__)))
)
fname = os.path.join(file_dir, fname)
with open(fname, "r") as fobj:
lines = fobj.readlines()
# Eliminate spurious carriage returns in Microsoft Windows
lines = [_homogenize_linesep(line) for line in lines]
# Parse line specification
inc_lines = (
_proc_token(lrange, len(lines)) if lrange else list(range(1, len(lines) + 1))
)
# Produce output
fpointer(".. code-block:: python" + os.linesep)
fpointer(os.linesep)
for num, line in enumerate(lines):
if num + 1 in inc_lines:
fpointer(
" " + line.replace("\t", " ").rstrip() + os.linesep
if line.strip()
else os.linesep
)
fpointer(os.linesep) | r"""
Return a Python source file formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param fname: File name, relative to environment variable
:bash:`PKG_DOC_DIR`
:type fname: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
other functions can be used for debugging
:type fpointer: function object
:param lrange: Line range to include, similar to Sphinx
`literalinclude <http://www.sphinx-doc.org/en/master/usage
/restructuredtext/directives.html
#directive-literalinclude>`_ directive
:type lrange: string
:param sdir: Source file directory. If None the :bash:`PKG_DOC_DIR`
environment variable is used if it is defined, otherwise
the directory where the module is located is used
:type sdir: string
For example:
.. code-block:: python
def func():
\"\"\"
This is a docstring. This file shows how to use it:
.. =[=cog
.. import docs.support.incfile
.. docs.support.incfile.incfile('func_example.py', cog.out)
.. =]=
.. code-block:: python
# func_example.py
if __name__ == '__main__':
func()
.. =[=end=]=
\"\"\"
return 'This is func output' | entailment |
def ste(command, nindent, mdir, fpointer, env=None):
"""
Print STDOUT of a shell command formatted in reStructuredText.
This is a simplified version of :py:func:`pmisc.term_echo`.
:param command: Shell command (relative to **mdir** if **env** is not given)
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory, used if **env** is not given
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param env: Environment dictionary. If not provided, the environment
dictionary is the key "PKG_BIN_DIR" with the value of the
**mdir**
:type env: dictionary
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: console
$ ${PKG_BIN_DIR}/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
$
.. ]]]
"""
sdir = LDELIM + "PKG_BIN_DIR" + RDELIM
command = (
sdir + ("{sep}{cmd}".format(sep=os.path.sep, cmd=command))
if env is None
else command
)
env = {"PKG_BIN_DIR": mdir} if env is None else env
term_echo(command, nindent, env, fpointer) | Print STDOUT of a shell command formatted in reStructuredText.
This is a simplified version of :py:func:`pmisc.term_echo`.
:param command: Shell command (relative to **mdir** if **env** is not given)
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param mdir: Module directory, used if **env** is not given
:type mdir: string
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param env: Environment dictionary. If not provided, the environment
dictionary is the key "PKG_BIN_DIR" with the value of the
**mdir**
:type env: dictionary
For example::
.. This is a reStructuredText file snippet
.. [[[cog
.. import os, sys
.. from docs.support.term_echo import term_echo
.. file_name = sys.modules['docs.support.term_echo'].__file__
.. mdir = os.path.realpath(
.. os.path.dirname(
.. os.path.dirname(os.path.dirname(file_name))
.. )
.. )
.. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]
.. code-block:: console
$ ${PKG_BIN_DIR}/build_docs.py -h
usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]
...
$
.. ]]] | entailment |
def term_echo(command, nindent=0, env=None, fpointer=None, cols=60):
"""
Print STDOUT of a shell command formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param command: Shell command
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param env: Environment variable replacement dictionary. The
command is pre-processed and any environment variable
represented in the full notation (:bash:`${...}` in Linux and
OS X or :bash:`%...%` in Windows) is replaced. The dictionary
key is the environment variable name and the dictionary value
is the replacement value. For example, if **command** is
:code:`'${PYTHON_CMD} -m "x=5"'` and **env** is
:code:`{'PYTHON_CMD':'python3'}` the actual command issued
is :code:`'python3 -m "x=5"'`
:type env: dictionary
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param cols: Number of columns of output
:type cols: integer
"""
# pylint: disable=R0204
# Set argparse width so that output does not need horizontal scroll
# bar in narrow windows or displays
os.environ["COLUMNS"] = str(cols)
command_int = command
if env:
for var, repl in env.items():
command_int = command_int.replace('"' + LDELIM + var + RDELIM + '"', repl)
command_int = command_int.replace(LDELIM + var + RDELIM, repl)
tokens = command_int.split(" ")
# Add Python interpreter executable for Python scripts on Windows since
# the shebang does not work
if (platform.system().lower() == "windows") and (
tokens[0].endswith(".py")
): # pragma: no cover
tokens = [sys.executable] + tokens
proc = subprocess.Popen(tokens, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = proc.communicate()[0]
if sys.hexversion >= 0x03000000: # pragma: no cover
stdout = stdout.decode("utf-8")
stdout = stdout.split("\n")
indent = nindent * " "
fpointer(os.linesep)
fpointer("{0}.. code-block:: console{1}".format(indent, os.linesep))
fpointer(os.linesep)
fpointer("{0} $ {1}{2}".format(indent, command, os.linesep))
for line in stdout:
line = _homogenize_linesep(line)
if line.strip():
fpointer(indent + " " + line.replace("\t", " ") + os.linesep)
else:
fpointer(os.linesep) | Print STDOUT of a shell command formatted in reStructuredText.
.. role:: bash(code)
:language: bash
:param command: Shell command
:type command: string
:param nindent: Indentation level
:type nindent: integer
:param env: Environment variable replacement dictionary. The
command is pre-processed and any environment variable
represented in the full notation (:bash:`${...}` in Linux and
OS X or :bash:`%...%` in Windows) is replaced. The dictionary
key is the environment variable name and the dictionary value
is the replacement value. For example, if **command** is
:code:`'${PYTHON_CMD} -m "x=5"'` and **env** is
:code:`{'PYTHON_CMD':'python3'}` the actual command issued
is :code:`'python3 -m "x=5"'`
:type env: dictionary
:param fpointer: Output function pointer. Normally is :code:`cog.out` but
:code:`print` or other functions can be used for
debugging
:type fpointer: function object
:param cols: Number of columns of output
:type cols: integer | entailment |
def flo(string):
'''Return the string given by param formatted with the callers locals.'''
callers_locals = {}
frame = inspect.currentframe()
try:
outerframe = frame.f_back
callers_locals = outerframe.f_locals
finally:
del frame
return string.format(**callers_locals) | Return the string given by param formatted with the callers locals. | entailment |
def _wrap_with(color_code):
'''Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m
'''
def inner(text, bold=False):
'''Inner color function.'''
code = color_code
if bold:
code = flo("1;{code}")
return flo('\033[{code}m{text}\033[0m')
return inner | Color wrapper.
Example:
>>> blue = _wrap_with('34')
>>> print(blue('text'))
\033[34mtext\033[0m | entailment |
def clean(deltox=False):
'''Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
'''
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/')) | Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox | entailment |
def pythons():
'''Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions.
'''
if not _pyenv_exists():
print('\npyenv is not installed. You can install it with fabsetup '
'(https://github.com/theno/fabsetup):\n\n ' +
cyan('mkdir ~/repos && cd ~/repos\n '
'git clone https://github.com/theno/fabsetup.git\n '
'cd fabsetup && fab setup.pyenv -H localhost'))
return 1
latest_pythons = _determine_latest_pythons()
print(cyan('\n## install latest python versions'))
for version in latest_pythons:
local(flo('pyenv install --skip-existing {version}'))
print(cyan('\n## activate pythons'))
basedir = dirname(__file__)
latest_pythons_str = ' '.join(latest_pythons)
local(flo('cd {basedir} && pyenv local system {latest_pythons_str}'))
highest_python = latest_pythons[-1]
print(cyan(flo(
'\n## prepare Python-{highest_python} for testing and packaging')))
packages_for_testing = 'pytest tox'
packages_for_packaging = 'pypandoc twine'
local(flo('~/.pyenv/versions/{highest_python}/bin/pip install --upgrade '
'pip {packages_for_testing} {packages_for_packaging}')) | Install latest pythons with pyenv.
The python version will be activated in the projects base dir.
Will skip already installed latest python versions. | entailment |
def tox(args=''):
'''Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r'
'''
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor_python: '3.6'
highest_minor_python = _highest_minor(latest_pythons)
_local_needs_pythons(flo('cd {basedir} && '
'python{highest_minor_python} -m tox {args}')) | Run tox.
Build package and run unit tests against several pythons.
Args:
args: Optional arguments passed to tox.
Example:
fab tox:'-e py36 -r' | entailment |
def pypi():
'''Build package and upload to pypi.'''
if query_yes_no('version updated in setup.py?'):
print(cyan('\n## clean-up\n'))
execute(clean)
basedir = dirname(__file__)
latest_pythons = _determine_latest_pythons()
# e.g. highest_minor: '3.6'
highest_minor = _highest_minor(latest_pythons)
python = flo('python{highest_minor}')
print(cyan('\n## build package'))
_local_needs_pythons(flo('cd {basedir} && {python} setup.py sdist'))
print(cyan('\n## upload package'))
local(flo('cd {basedir} && {python} -m twine upload dist/*')) | Build package and upload to pypi. | entailment |
def chk_col_numbers(line_num, num_cols, tax_id_col, id_col, symbol_col):
"""
Check that none of the input column numbers is out of range.
(Instead of defining this function, we could depend on Python's built-in
IndexError exception for this issue, but the IndexError exception wouldn't
include line number information, which is helpful for users to find exactly
which line is the culprit.)
"""
bad_col = ''
if tax_id_col >= num_cols:
bad_col = 'tax_id_col'
elif id_col >= num_cols:
bad_col = 'discontinued_id_col'
elif symbol_col >= num_cols:
bad_col = 'discontinued_symbol_col'
if bad_col:
raise Exception(
'Input file line #%d: column number of %s is out of range' %
(line_num, bad_col)) | Check that none of the input column numbers is out of range.
(Instead of defining this function, we could depend on Python's built-in
IndexError exception for this issue, but the IndexError exception wouldn't
include line number information, which is helpful for users to find exactly
which line is the culprit.) | entailment |
def import_gene_history(file_handle, tax_id, tax_id_col, id_col, symbol_col):
"""
Read input gene history file into the database.
Note that the arguments tax_id_col, id_col and symbol_col have been
converted into 0-based column indexes.
"""
# Make sure that tax_id is not "" or " "
if not tax_id or tax_id.isspace():
raise Exception("Input tax_id is blank")
# Make sure that tax_id exists in Organism table in the database.
try:
organism = Organism.objects.get(taxonomy_id=tax_id)
except Organism.DoesNotExist:
raise Exception('Input tax_id %s does NOT exist in Organism table. '
'Please add it into Organism table first.' % tax_id)
if tax_id_col < 0 or id_col < 0 or symbol_col < 0:
raise Exception(
'tax_id_col, id_col and symbol_col must be positive integers')
for line_index, line in enumerate(file_handle):
if line.startswith('#'): # Skip comment lines.
continue
fields = line.rstrip().split('\t')
# Check input column numbers.
chk_col_numbers(line_index + 1, len(fields), tax_id_col, id_col,
symbol_col)
# Skip lines whose tax_id's do not match input tax_id.
if tax_id != fields[tax_id_col]:
continue
entrez_id = fields[id_col]
# If the gene already exists in database, set its "obsolete" attribute
# to True; otherwise create a new obsolete Gene record in database.
try:
gene = Gene.objects.get(entrezid=entrez_id)
if not gene.obsolete:
gene.obsolete = True
gene.save()
except Gene.DoesNotExist:
Gene.objects.create(entrezid=entrez_id, organism=organism,
systematic_name=fields[symbol_col],
obsolete=True) | Read input gene history file into the database.
Note that the arguments tax_id_col, id_col and symbol_col have been
converted into 0-based column indexes. | entailment |
def cred_init(
self,
*,
secrets_dir: str,
log: Logger,
bot_name: str="",
) -> None:
"""Initialize what requires credentials/secret files."""
super().__init__(secrets_dir=secrets_dir, log=log, bot_name=bot_name)
self.ldebug("Retrieving ACCESS_TOKEN ...")
with open(path.join(self.secrets_dir, "ACCESS_TOKEN")) as f:
ACCESS_TOKEN = f.read().strip()
# Instance base url optional.
self.ldebug("Looking for INSTANCE_BASE_URL ...")
instance_base_url_path = path.join(self.secrets_dir, "INSTANCE_BASE_URL")
if path.isfile(instance_base_url_path):
with open(instance_base_url_path) as f:
self.instance_base_url = f.read().strip()
else:
self.ldebug("Couldn't find INSTANCE_BASE_URL, defaulting to mastodon.social.")
self.instance_base_url = "https://mastodon.social"
self.api = mastodon.Mastodon(access_token=ACCESS_TOKEN,
api_base_url=self.instance_base_url)
self.html_re = re.compile("<.*?>") | Initialize what requires credentials/secret files. | entailment |
def send(
self,
*,
text: str,
) -> List[OutputRecord]:
"""
Send mastodon message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
try:
status = self.api.status_post(status=text)
return [TootRecord(record_data={
"toot_id": status["id"],
"text": text
})]
except mastodon.MastodonError as e:
return [self.handle_error((f"Bot {self.bot_name} encountered an error when "
f"sending post {text} without media:\n{e}\n"),
e)] | Send mastodon message.
:param text: text to send in post.
:returns: list of output records,
each corresponding to either a single post,
or an error. | entailment |
def send_with_media(
self,
*,
text: str,
files: List[str],
captions: List[str]=[],
) -> List[OutputRecord]:
"""
Upload media to mastodon,
and send status and media,
and captions if present.
:param text: post text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
try:
self.ldebug(f"Uploading files {files}.")
if captions is None:
captions = []
if len(files) > len(captions):
captions.extend([self.default_caption_message] * (len(files) - len(captions)))
media_dicts = []
for i, file in enumerate(files):
caption = captions[i]
media_dicts.append(self.api.media_post(file, description=caption))
self.ldebug(f"Media ids {media_dicts}")
except mastodon.MastodonError as e:
return [self.handle_error(
f"Bot {self.bot_name} encountered an error when uploading {files}:\n{e}\n", e
)]
try:
status = self.api.status_post(status=text, media_ids=media_dicts)
self.ldebug(f"Status object from toot: {status}.")
return [TootRecord(record_data={
"toot_id": status["id"],
"text": text,
"media_ids": media_dicts,
"captions": captions
})]
except mastodon.MastodonError as e:
return [self.handle_error((f"Bot {self.bot_name} encountered an error when "
f"sending post {text} with media dicts {media_dicts}:"
f"\n{e}\n"),
e)] | Upload media to mastodon,
and send status and media,
and captions if present.
:param text: post text.
:param files: list of files to upload with post.
:param captions: list of captions to include as alt-text with files.
:returns: list of output records,
each corresponding to either a single post,
or an error. | entailment |
def perform_batch_reply(
self,
*,
callback: Callable[..., str],
lookback_limit: int,
target_handle: str,
) -> List[OutputRecord]:
"""
Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error.
"""
self.log.info(f"Attempting to batch reply to mastodon user {target_handle}")
# target handle should be able to be provided either as @user or @user@domain
# note that this produces an empty first chunk
handle_chunks = target_handle.split("@")
target_base_handle = handle_chunks[1]
records: List[OutputRecord] = []
our_id = self.api.account_verify_credentials()["id"]
# be careful here - we're using a search to do this,
# and if we're not careful we'll pull up people just mentioning the target.
possible_accounts = self.api.account_search(target_handle, following=True)
their_id = None
for account in possible_accounts:
if account["username"] == target_base_handle:
their_id = account["id"]
break
if their_id is None:
return [self.handle_error(f"Could not find target handle {target_handle}!", None)]
statuses = self.api.account_statuses(their_id, limit=lookback_limit)
for status in statuses:
status_id = status.id
# find possible replies we've made.
our_statuses = self.api.account_statuses(our_id, since_id=status_id)
in_reply_to_ids = list(map(lambda x: x.in_reply_to_id, our_statuses))
if status_id not in in_reply_to_ids:
encoded_status_text = re.sub(self.html_re, "", status.content)
status_text = html.unescape(encoded_status_text)
message = callback(message_id=status_id, message=status_text, extra_keys={})
self.log.info(f"Replying {message} to status {status_id} from {target_handle}.")
try:
new_status = self.api.status_post(status=message, in_reply_to_id=status_id)
records.append(TootRecord(record_data={
"toot_id": new_status.id,
"in_reply_to": target_handle,
"in_reply_to_id": status_id,
"text": message,
}))
except mastodon.MastodonError as e:
records.append(
self.handle_error((f"Bot {self.bot_name} encountered an error when "
f"sending post {message} during a batch reply "
f":\n{e}\n"),
e))
else:
self.log.info(f"Not replying to status {status_id} from {target_handle} "
f"- we already replied.")
return records | Performs batch reply on target account.
Looks up the recent messages of the target user,
applies the callback,
and replies with
what the callback generates.
:param callback: a callback taking a message id,
message contents,
and optional extra keys,
and returning a message string.
:param target: the id of the target account.
:param lookback_limit: a lookback limit of how many messages to consider.
:returns: list of output records,
each corresponding to either a single post,
or an error. | entailment |
def handle_error(self, message: str, e: mastodon.MastodonError) -> OutputRecord:
"""Handle error while trying to do something."""
self.lerror(f"Got an error! {e}")
# Handle errors if we know how.
try:
code = e[0]["code"]
if code in self.handled_errors:
self.handled_errors[code]
else:
pass
except Exception:
pass
return TootRecord(error=e) | Handle error while trying to do something. | entailment |
def _read_header(self):
'''
Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count |
'''
self._fh.seek(0)
buf = self._fh.read(4*2)
fc, dc = struct.unpack("<II", buf)
return fc, dc | Little-endian
|... 4 bytes unsigned int ...|... 4 bytes unsigned int ...|
| frames count | dimensions count | | entailment |
def listen(self, you):
"""
Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute.
"""
self._listeners.append(you)
self.raw.talk_to(you) | Request a callback for value modification.
Parameters
----------
you : object
An instance having ``__call__`` attribute. | entailment |
def _get_term_by_id(self, id):
'''Simple utility function to load a term.
'''
url = (self.url + '/%s.json') % id
r = self.session.get(url)
return r.json() | Simple utility function to load a term. | entailment |
def get_top_display(self, **kwargs):
'''
Returns all concepts or collections that form the top-level of a display
hierarchy.
As opposed to the :meth:`get_top_concepts`, this method can possibly
return both concepts and collections.
:rtype: Returns a list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present.
'''
language = self._get_language(**kwargs)
url = self.url + '/lijst.json'
args = {'type[]': ['HR']}
r = self.session.get(url, params=args)
result = r.json()
items = result
top = self.get_by_id(items[0]['id'])
res = []
def expand_coll(res, coll):
for nid in coll.members:
c = self.get_by_id(nid)
res.append({
'id': c.id,
'label': c.label(language)
})
return res
return expand_coll(res, top) | Returns all concepts or collections that form the top-level of a display
hierarchy.
As opposed to the :meth:`get_top_concepts`, this method can possibly
return both concepts and collections.
:rtype: Returns a list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present. | entailment |
def get_children_display(self, id, **kwargs):
'''
Return a list of concepts or collections that should be displayed
under this concept or collection.
:param id: A concept or collection id.
:rtype: A list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present. If the id does not
exist, return `False`.
'''
language = self._get_language(**kwargs)
item = self.get_by_id(id)
res = []
if isinstance(item, Collection):
for mid in item.members:
m = self.get_by_id(mid)
res.append({
'id': m.id,
'label': m.label(language)
})
else:
for cid in item.narrower:
c = self.get_by_id(cid)
res.append({
'id': c.id,
'label': c.label(language)
})
return res | Return a list of concepts or collections that should be displayed
under this concept or collection.
:param id: A concept or collection id.
:rtype: A list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present. If the id does not
exist, return `False`. | entailment |
def translate_genes(id_list=None, from_id=None, to_id=None, organism=None):
"""
Pass a list of identifiers (id_list), the name of the database ('Entrez',
'Symbol', 'Standard name', 'Systematic name' or a loaded crossreference
database) that you wish to translate from, and the name of the database
that you wish to translate to.
"""
ids = set(id_list)
# Initialize set of identifiers not found by this translate_genes method.
not_found = set()
from_ids = None # Get the map of from_ids to the gene pks
if organism is not None:
gene_objects_manager = Gene.objects.filter(
organism__scientific_name=organism)
else:
gene_objects_manager = Gene.objects
if (from_id == 'Entrez'):
int_list = []
for x in ids:
try:
int_list.append(int(x))
except(ValueError):
not_found.add(x)
ids = set(int_list)
from_ids = gene_objects_manager.filter(entrezid__in=ids).values_list(
'entrezid', 'id')
elif (from_id == 'Systematic name'):
from_ids = gene_objects_manager.filter(
systematic_name__in=ids).values_list('systematic_name', 'id')
elif (from_id == 'Standard name'):
from_ids = gene_objects_manager.filter(
standard_name__in=ids).values_list('standard_name', 'id')
elif (from_id == 'Symbol'):
# If standard_name exists, symbol will be standard_name; otherwise
# symbol will be systematic_name.
from_ids = gene_objects_manager.annotate(
symbol=Coalesce('standard_name', 'systematic_name')).filter(
symbol__in=ids).values_list('symbol', 'id')
else: # a crossreference db?
xrdb = CrossRefDB.objects.get(name=from_id)
from_ids = CrossRef.objects.filter(crossrefdb=xrdb).values_list(
'xrid', 'gene__id')
# Dictionary that maps from type ID passed by user to gene__id.
from_id_map = {}
gene_ids = []
for item in from_ids:
from_id_map[item[0]] = item[1]
gene_ids.append(item[1])
# Now let's figure out what we need to go to:
to_ids = None
if (to_id == 'Entrez'):
to_ids = Gene.objects.filter(id__in=gene_ids).values_list(
'id', 'entrezid')
elif (to_id == 'Systematic name'):
to_ids = Gene.objects.filter(id__in=gene_ids).values_list(
'id', 'systematic_name')
elif (to_id == 'Standard name'):
to_ids = Gene.objects.filter(id__in=gene_ids).values_list(
'id', 'standard_name')
elif (to_id == 'Symbol'):
# If standard_name exists, symbol will be standard_name; otherwise
# symbol will be systematic_name.
to_ids = Gene.objects.annotate(
symbol=Coalesce('standard_name', 'systematic_name')).filter(
id__in=gene_ids).values_list('id', 'symbol')
else: # A crossreference db?
xrdb = CrossRefDB.objects.get(name=to_id)
to_ids = CrossRef.objects.filter(crossrefdb=xrdb).values_list(
'gene__id', 'xrid')
to_id_map = {}
for item in to_ids:
if not item[0] in to_id_map:
to_id_map[item[0]] = [item[1], ]
else:
to_id_map[item[0]].append(item[1])
from_to = {}
for item in ids:
try:
gene_id = from_id_map[item]
except KeyError:
not_found.add(item)
continue
to_id = to_id_map[gene_id]
from_to[item] = to_id
from_to['not_found'] = list(not_found)
return from_to | Pass a list of identifiers (id_list), the name of the database ('Entrez',
'Symbol', 'Standard name', 'Systematic name' or a loaded crossreference
database) that you wish to translate from, and the name of the database
that you wish to translate to. | entailment |
def _inner_func_anot(func):
"""must be applied to all inner functions that return contexts.
Wraps all instances of pygame.Surface in the input in Surface"""
@wraps(func)
def new_func(*args):
return func(*_lmap(_wrap_surface, args))
return new_func | must be applied to all inner functions that return contexts.
Wraps all instances of pygame.Surface in the input in Surface | entailment |
def Cross(width=3, color=0):
"""Draws a cross centered in the target area
:param width: width of the lines of the cross in pixels
:type width: int
:param color: color of the lines of the cross
:type color: pygame.Color
"""
return Overlay(Line("h", width, color), Line("v", width, color)) | Draws a cross centered in the target area
:param width: width of the lines of the cross in pixels
:type width: int
:param color: color of the lines of the cross
:type color: pygame.Color | entailment |
def compose(target, root=None):
"""Top level function to create a surface.
:param target: the pygame.Surface to blit on. Or a (width, height) tuple
in which case a new surface will be created
:type target: -
"""
if type(root) == Surface:
raise ValueError("A Surface may not be used as root, please add "
+"it as a single child i.e. compose(...)(Surface(...))")
@_inner_func_anot
def inner_compose(*children):
if root:
root_context = root(*children)
else:
assert len(children) == 1
root_context = children[0]
if type(target) == pygame.Surface:
surface = target
size = target.get_size()
else:
size = target
surface = pygame.Surface(size)
root_context._draw(surface, pygame.Rect(0, 0, *size))
return surface
return inner_compose | Top level function to create a surface.
:param target: the pygame.Surface to blit on. Or a (width, height) tuple
in which case a new surface will be created
:type target: - | entailment |
def Font(name=None, source="sys", italic=False, bold=False, size=20):
"""Unifies loading of fonts.
:param name: name of system-font or filepath, if None is passed the default
system-font is loaded
:type name: str
:param source: "sys" for system font, or "file" to load a file
:type source: str
"""
assert source in ["sys", "file"]
if not name:
return pygame.font.SysFont(pygame.font.get_default_font(),
size, bold=bold, italic=italic)
if source == "sys":
return pygame.font.SysFont(name,
size, bold=bold, italic=italic)
else:
f = pygame.font.Font(name, size)
f.set_italic(italic)
f.set_bold(bold)
return f | Unifies loading of fonts.
:param name: name of system-font or filepath, if None is passed the default
system-font is loaded
:type name: str
:param source: "sys" for system font, or "file" to load a file
:type source: str | entailment |
def Text(text, font, color=pygame.Color(0, 0, 0), antialias=False, align="center"):
"""Renders a text. Supports multiline text, the background will be transparent.
:param align: text-alignment must be "center", "left", or "righ"
:type align: str
:return: the input text
:rtype: pygame.Surface
"""
assert align in ["center", "left", "right"]
margin_l, margin_r = 1, 1
if align == "left": margin_l = 0
elif align == "right": margin_r = 0
margin = Margin(margin_l, margin_r)
color_key = pygame.Color(0, 0, 1) if pygame.Color(0, 0, 1) != color else 0x000002
text_surfaces = _lmap(lambda text: _text(text, font=font,
color=color, antialias=antialias),
map(methodcaller("strip"), text.split("\n")))
w = max(surf.get_rect().w for surf in text_surfaces)
h = sum(surf.get_rect().h for surf in text_surfaces)
surf = compose((w, h), Fill(color_key))(LinLayout("v")(
*_lmap(lambda s: Surface(margin)(s), text_surfaces)))
surf.set_colorkey(color_key)
return surf.convert_alpha() | Renders a text. Supports multiline text, the background will be transparent.
:param align: text-alignment must be "center", "left", or "righ"
:type align: str
:return: the input text
:rtype: pygame.Surface | entailment |
def from_scale(scale_w, scale_h=None):
"""Creates a padding by the remaining space after scaling the content.
E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and
Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0)
because the content would not be scaled (since scale_h=1) and therefore
there would be no vertical padding.
If scale_h is not specified scale_h=scale_w is used as default
:param scale_w: horizontal scaling factors
:type scale_w: float
:param scale_h: vertical scaling factor
:type scale_h: float
"""
if not scale_h: scale_h = scale_w
w_padding = [(1 - scale_w) * 0.5] * 2
h_padding = [(1 - scale_h) * 0.5] * 2
return Padding(*w_padding, *h_padding) | Creates a padding by the remaining space after scaling the content.
E.g. Padding.from_scale(0.5) would produce Padding(0.25, 0.25, 0.25, 0.25) and
Padding.from_scale(0.5, 1) would produce Padding(0.25, 0.25, 0, 0)
because the content would not be scaled (since scale_h=1) and therefore
there would be no vertical padding.
If scale_h is not specified scale_h=scale_w is used as default
:param scale_w: horizontal scaling factors
:type scale_w: float
:param scale_h: vertical scaling factor
:type scale_h: float | entailment |
def radicals(self, levels=None):
"""
:param levels string: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
http://www.wanikani.com/api/v1.2#radicals-list
"""
url = WANIKANI_BASE.format(self.api_key, 'radicals')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
for item in data['requested_information']:
yield Radical(item) | :param levels string: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
http://www.wanikani.com/api/v1.2#radicals-list | entailment |
def kanji(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#kanji-list
"""
url = WANIKANI_BASE.format(self.api_key, 'kanji')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
for item in data['requested_information']:
yield Kanji(item) | :param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#kanji-list | entailment |
def vocabulary(self, levels=None):
"""
:param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list
"""
url = WANIKANI_BASE.format(self.api_key, 'vocabulary')
if levels:
url += '/{0}'.format(levels)
data = self.get(url)
if 'general' in data['requested_information']:
for item in data['requested_information']['general']:
yield Vocabulary(item)
else:
for item in data['requested_information']:
yield Vocabulary(item) | :param levels: An optional argument of declaring a single or
comma-delimited list of levels is available, as seen in the example
as 1. An example of a comma-delimited list of levels is 1,2,5,9.
:type levels: str or None
http://www.wanikani.com/api/v1.2#vocabulary-list | entailment |
def ishex(obj):
"""
Test if the argument is a string representing a valid hexadecimal digit.
:param obj: Object
:type obj: any
:rtype: boolean
"""
return isinstance(obj, str) and (len(obj) == 1) and (obj in string.hexdigits) | Test if the argument is a string representing a valid hexadecimal digit.
:param obj: Object
:type obj: any
:rtype: boolean | entailment |
def isnumber(obj):
"""
Test if the argument is a number (complex, float or integer).
:param obj: Object
:type obj: any
:rtype: boolean
"""
return (
(obj is not None)
and (not isinstance(obj, bool))
and isinstance(obj, (int, float, complex))
) | Test if the argument is a number (complex, float or integer).
:param obj: Object
:type obj: any
:rtype: boolean | entailment |
def isreal(obj):
"""
Test if the argument is a real number (float or integer).
:param obj: Object
:type obj: any
:rtype: boolean
"""
return (
(obj is not None)
and (not isinstance(obj, bool))
and isinstance(obj, (int, float))
) | Test if the argument is a real number (float or integer).
:param obj: Object
:type obj: any
:rtype: boolean | entailment |
def create_api_context(self, cls):
"""Create and return an API context"""
return self.api_context_schema().load({
"name": cls.name,
"cls": cls,
"inst": [],
"conf": self.conf.get_api_service(cls.name),
"calls": self.conf.get_api_calls(),
"shared": {}, # Used per-API to monitor state
"log_level": self.conf.get_log_level(),
"callback": self.receive
}) | Create and return an API context | entailment |
def receive(self, data, api_context):
"""Pass an API result down the pipeline"""
self.log.debug(f"Putting data on the pipeline: {data}")
result = {
"api_contexts": self.api_contexts,
"api_context": api_context,
"strategy": dict(), # Shared strategy data
"result": data,
"log_level": api_context["log_level"],
}
self.strat.execute(self.strategy_context_schema().load(result).data) | Pass an API result down the pipeline | entailment |
def shutdown(self, signum, frame): # pylint: disable=unused-argument
"""Shut it down"""
if not self.exit:
self.exit = True
self.log.debug(f"SIGTRAP!{signum};{frame}")
self.api.shutdown()
self.strat.shutdown() | Shut it down | entailment |
def course(self):
"""
Course this node belongs to
"""
course = self.parent
while course.parent:
course = course.parent
return course | Course this node belongs to | entailment |
def path(self):
"""
Path of this node on Studip. Looks like Coures/folder/folder/document. Respects the renaming policies defined in the namemap
"""
if self.parent is None:
return self.title
return join(self.parent.path, self.title) | Path of this node on Studip. Looks like Coures/folder/folder/document. Respects the renaming policies defined in the namemap | entailment |
def title(self):
"""
get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used.
"""
tmp = c.namemap_lookup(self.id) if c.namemap_lookup(self.id) is not None else self._title
return secure_filename(tmp) | get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used. | entailment |
def deep_documents(self):
"""
list of all documents find in subtrees of this node
"""
tree = []
for entry in self.contents:
if isinstance(entry, Document):
tree.append(entry)
else:
tree += entry.deep_documents
return tree | list of all documents find in subtrees of this node | entailment |
def title(self):
"""
The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME
"""
name = c.namemap_lookup(self.id)
if name is None:
name = self._title + " " + client.get_semester_title(self)
c.namemap_set(self.id, name)
return secure_filename(name) | The title of the course. If no entry in the namemap of the configuration is found a new entry is created with name=$STUD.IP_NAME + $SEMESTER_NAME | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.