sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def new_driver(browser_name, *args, **kwargs):
"""Instantiates a new WebDriver instance, determining class by environment variables
"""
if browser_name == FIREFOX:
return webdriver.Firefox(*args, **kwargs)
# elif options['local'] and options['browser_name'] == CHROME:
# return webdriver.Chrome(*args, **kwargs)
#
# elif options['local'] and options['browser_name'] == IE:
# return webdriver.Ie(*args, **kwargs)
#
# elif options['local'] and options['browser_name'] == OPERA:
# return webdriver.Opera(*args, **kwargs)
elif browser_name == PHANTOMJS:
executable_path = os.path.join(os.path.dirname(__file__), 'phantomjs/executable/phantomjs_64bit')
driver = webdriver.PhantomJS(executable_path=executable_path, **kwargs)
driver.set_window_size(1280, 800) # Set a default because phantom needs it
return driver
else: # remote
driver = webdriver.Remote(*args, **kwargs)
return driver | Instantiates a new WebDriver instance, determining class by environment variables | entailment |
def build_follow_url(host=None, **params):
"""
Build a URL for the /follow page
"""
# template = '?{params}'
config = ExampleConfig()
template = '/follow?{params}'
if not host:
host = config.get('example_web_hostname')
return ExampleUrlBuilder.build(
template=template,
host=host,
params=ExampleUrlBuilder.encode_params(**params)
) | Build a URL for the /follow page | entailment |
def encode_collection(collection, encoding='utf-8'):
"""Encodes all the string keys and values in a collection with specified encoding"""
if isinstance(collection, dict):
return dict((encode_collection(key), encode_collection(value)) for key, value in collection.iteritems())
elif isinstance(collection, list):
return [encode_collection(element) for element in input]
elif isinstance(collection, unicode):
return collection.encode(encoding)
else:
return collection | Encodes all the string keys and values in a collection with specified encoding | entailment |
def get_delimited_string_from_list(_list, delimiter=', ', wrap_values_with_char=None, wrap_strings_with_char=None):
"""Given a list, returns a string representation of that list with specified delimiter and optional string chars
_list -- the list or tuple to stringify
delimiter -- the the character to seperate all values
wrap_values_with_char -- if specified, will wrap all values in list with this character in the representation
wrap_strings_with_char -- if specified, will wrap only values of type str with this character in the representation
"""
if wrap_values_with_char is not None:
return delimiter.join('{wrapper}{val}{wrapper}'.format(
val=v,
wrapper=wrap_values_with_char
) for v in _list)
elif wrap_strings_with_char is not None:
return delimiter.join(str(v) if not isinstance(v, str) else '{wrapper}{val}{wrapper}'.format(
val=v,
wrapper=wrap_strings_with_char
) for v in _list)
else:
return delimiter.join(str(v) for v in _list) | Given a list, returns a string representation of that list with specified delimiter and optional string chars
_list -- the list or tuple to stringify
delimiter -- the the character to seperate all values
wrap_values_with_char -- if specified, will wrap all values in list with this character in the representation
wrap_strings_with_char -- if specified, will wrap only values of type str with this character in the representation | entailment |
def execute_script(self, string, args=None):
"""
Execute script passed in to function
@type string: str
@value string: Script to execute
@type args: dict
@value args: Dictionary representing command line args
@rtype: int
@rtype: response code
"""
result = None
try:
result = self.driver_wrapper.driver.execute_script(string, args)
return result
except WebDriverException:
if result is not None:
message = 'Returned: ' + str(result)
else:
message = "No message. Check your Javascript source: {}".format(string)
raise WebDriverJavascriptException.WebDriverJavascriptException(self.driver_wrapper, message) | Execute script passed in to function
@type string: str
@value string: Script to execute
@type args: dict
@value args: Dictionary representing command line args
@rtype: int
@rtype: response code | entailment |
def execute_template(self, template_name, variables, args=None):
"""
Execute script from a template
@type template_name: str
@value template_name: Script template to implement
@type args: dict
@value args: Dictionary representing command line args
@rtype: bool
@rtype: Success or failure
"""
js_text = self.build_js_from_template(template_name, variables)
try:
self.execute_script(js_text, args)
except WebDriverException:
return False
return True | Execute script from a template
@type template_name: str
@value template_name: Script template to implement
@type args: dict
@value args: Dictionary representing command line args
@rtype: bool
@rtype: Success or failure | entailment |
def execute_template_and_return_result(self, template_name, variables, args=None):
"""
Execute script from a template and return result
@type template_name: str
@value template_name: Script template to implement
@type variables: dict
@value variables: Dictionary representing template construction args
@type args: dict
@value args: Dictionary representing command line args
@rtype: int
@rtype: exit code
"""
js_text = self.build_js_from_template(template_name, variables)
return self.execute_script(js_text, args) | Execute script from a template and return result
@type template_name: str
@value template_name: Script template to implement
@type variables: dict
@value variables: Dictionary representing template construction args
@type args: dict
@value args: Dictionary representing command line args
@rtype: int
@rtype: exit code | entailment |
def build_js_from_template(self, template_file, variables):
"""
Build a JS script from a template and args
@type template_file: str
@param template_file: Script template to implement; can be the name of a built-in script or full filepath to
a js file that contains the script. E.g. 'clickElementTemplate.js',
'clickElementTemplate', and '/path/to/custom/template/script.js' are all acceptable
@type variables: dict
@param variables: Dictionary representing template construction args
@rtype: int
@rtype: exit code
"""
template_variable_character = '%'
# raise an exception if user passed non-dictionary variables
if not isinstance(variables, dict):
raise TypeError('You must use a dictionary to populate variables in a javascript template')
# This filename is not a full file, attempt to locate the file in built-in templates
if not os.path.isfile(template_file):
# append the .js extension if not included
if '.js' not in template_file:
template_file += '.js'
# find the template and read the text into a string variable
templates_dir = os.path.join(os.path.dirname(__file__), 'jsTemplates')
template_full_path = os.path.join(templates_dir, template_file)
# The filename specified should be the full path
else:
template_full_path = template_file
# Ensure that the file exists
if not os.path.isfile(template_full_path):
raise ValueError('File "{}" was not found; you must specify the name of a built-in javascript template '
'or the full filepath of a custom template'.format(template_full_path))
try:
js_text = open(template_full_path).read()
except IOError:
raise IOError('The template was not found or did not have read permissions: {}'.format(template_full_path))
# replace all variables that match the keys in 'variables' dict
for key in variables.keys():
# double escape single and double quotes after variable replacement
if hasattr(variables[key], 'replace'):
variables[key] = variables[key].replace("'", "\\'")
variables[key] = variables[key].replace('"', '\\"')
else: # variable is not a string
variables[key] = str(variables[key])
js_text = js_text.replace(template_variable_character + key, variables[key])
return js_text | Build a JS script from a template and args
@type template_file: str
@param template_file: Script template to implement; can be the name of a built-in script or full filepath to
a js file that contains the script. E.g. 'clickElementTemplate.js',
'clickElementTemplate', and '/path/to/custom/template/script.js' are all acceptable
@type variables: dict
@param variables: Dictionary representing template construction args
@rtype: int
@rtype: exit code | entailment |
def build(template='/', host=None, scheme=None, port=None, **template_vars):
"""Builds a url with a string template and template variables; relative path if host is None, abs otherwise:
template format: "/staticendpoint/{dynamic_endpoint}?{params}"
"""
# TODO: refactor to build_absolute and build_relative instead of handling based on params
parsed_host = urlparse.urlparse(host if host is not None else '')
host_has_scheme = bool(parsed_host.scheme)
if host_has_scheme:
host = parsed_host.netloc
# Prioritize scheme parameter, but if not specified, use scheme implied from host
scheme = parsed_host.scheme if scheme is None else scheme
port = port or parsed_host.port # Default to port override
unparsed_path = urlparse.urlparse(template.format(**template_vars)).geturl()
# If a host was specified, try to return a full url
if host:
if not scheme:
raise ValueError('No scheme supplied and scheme could not be inferred from the host: {}'.format(host))
if port:
host_no_port = host.partition(':')[0] # Extract the host with no port supplied
host = '{host_no_port}:{port}'.format(host_no_port=host_no_port, port=port)
constructed_url = '//' + host + unparsed_path
url = urlparse.urlparse(constructed_url, scheme=scheme).geturl()
else:
url = unparsed_path
# Remove trailing parameter characters
url = url[:-1] if url[-1] == '?' else url
url = url[:-1] if url[-1] == '&' else url
return url | Builds a url with a string template and template variables; relative path if host is None, abs otherwise:
template format: "/staticendpoint/{dynamic_endpoint}?{params}" | entailment |
def poll(function, step=0.5, timeout=3, ignore_exceptions=(), exception_message='', message_builder=None,
args=(), kwargs=None, ontimeout=()):
"""Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True
"""
# Validate usage
try:
iter(ontimeout)
except TypeError:
raise ValueError('Please specify an iterable of callable functions for ontimeout')
kwargs = kwargs or dict()
end_time = time.time() + timeout
while True:
try:
value = function(*args, **kwargs)
if bool(value):
return value
except ignore_exceptions:
pass
time.sleep(step)
if time.time() > end_time:
break
# Execute the callbacks
for fn in ontimeout:
try:
fn(),
except:
continue
if message_builder:
exception_message = message_builder(*args, **kwargs)
raise AssertionError(exception_message) | Calls the function until bool(return value) is truthy
@param step: Wait time between each function call
@param timeout: Max amount of time that will elapse. If the function is in progress when timeout has passed, the
function will be allowed to complete.
@type ignore_exceptions: tuple
@param ignore_exceptions: A tuple of exceptions that will be ignored if they are raised
@param exception_message: The message that will be raised as an AssertionError if the function never
returns bool(True)
@param ontimeout: On timeout, execute the functions in order, but do not fail if execution fails
@return: True | entailment |
def build(self, **variables):
"""Formats the locator with specified parameters"""
return Locator(self.by, self.locator.format(**variables), self.description) | Formats the locator with specified parameters | entailment |
def random_words_string(count=1, maxchars=None, sep=''):
"""Gets a
"""
nouns = sep.join([random_word() for x in xrange(0, count)])
if maxchars is not None and nouns > maxchars:
nouns = nouns[0:maxchars-1]
return nouns | Gets a | entailment |
def is_subdomain(domain, reference):
"""Tests if a hostname is a subdomain of a reference hostname
e.g. www.domain.com is subdomain of reference
@param domain: Domain to test if it is a subdomain
@param reference: Reference "parent" domain
"""
index_of_reference = domain.find(reference)
if index_of_reference > 0 and domain[index_of_reference:] == reference:
return True
return False | Tests if a hostname is a subdomain of a reference hostname
e.g. www.domain.com is subdomain of reference
@param domain: Domain to test if it is a subdomain
@param reference: Reference "parent" domain | entailment |
def dump_requestdriver_cookies_into_webdriver(requestdriver, webdriverwrapper, handle_sub_domain=True):
"""Adds all cookies in the RequestDriver session to Webdriver
@type requestdriver: RequestDriver
@param requestdriver: RequestDriver with cookies
@type webdriverwrapper: WebDriverWrapper
@param webdriverwrapper: WebDriverWrapper to receive cookies
@param handle_sub_domain: If True, will check driver url and change cookies with subdomains of that domain to match
the current driver domain in order to avoid cross-domain cookie errors
@rtype: None
@return: None
"""
driver_hostname = urlparse(webdriverwrapper.current_url()).netloc
for cookie in requestdriver.session.cookies:
# Check if there will be a cross-domain violation and handle if necessary
cookiedomain = cookie.domain
if handle_sub_domain:
if is_subdomain(cookiedomain, driver_hostname):
# Cookies of requestdriver are subdomain cookies of webdriver; make them the base domain
cookiedomain = driver_hostname
try:
webdriverwrapper.add_cookie({
'name': cookie.name,
'value': cookie.value,
'domain': cookiedomain,
'path': cookie.path
})
except WebDriverException, e:
raise WebDriverException(
msg='Cannot set cookie "{name}" with domain "{domain}" on url "{url}" {override}: {message}'.format(
name=cookie.name,
domain=cookiedomain,
url=webdriverwrapper.current_url(),
override='(Note that subdomain override is set!)' if handle_sub_domain else '',
message=e.message),
screen=e.screen,
stacktrace=e.stacktrace
) | Adds all cookies in the RequestDriver session to Webdriver
@type requestdriver: RequestDriver
@param requestdriver: RequestDriver with cookies
@type webdriverwrapper: WebDriverWrapper
@param webdriverwrapper: WebDriverWrapper to receive cookies
@param handle_sub_domain: If True, will check driver url and change cookies with subdomains of that domain to match
the current driver domain in order to avoid cross-domain cookie errors
@rtype: None
@return: None | entailment |
def dump_webdriver_cookies_into_requestdriver(requestdriver, webdriverwrapper):
"""Adds all cookies in the Webdriver session to requestdriver
@type requestdriver: RequestDriver
@param requestdriver: RequestDriver with cookies
@type webdriver: WebDriverWrapper
@param webdriver: WebDriverWrapper to receive cookies
@rtype: None
@return: None
"""
for cookie in webdriverwrapper.get_cookies():
# Wedbriver uses "expiry"; requests uses "expires", adjust for this
expires = cookie.pop('expiry', {'expiry': None})
cookie.update({'expires': expires})
requestdriver.session.cookies.set(**cookie) | Adds all cookies in the Webdriver session to requestdriver
@type requestdriver: RequestDriver
@param requestdriver: RequestDriver with cookies
@type webdriver: WebDriverWrapper
@param webdriver: WebDriverWrapper to receive cookies
@rtype: None
@return: None | entailment |
def get_firefox_binary():
"""Gets the firefox binary
@rtype: FirefoxBinary
"""
browser_config = BrowserConfig()
constants_config = ConstantsConfig()
log_dir = os.path.join(constants_config.get('logs_dir'), 'firefox')
create_directory(log_dir)
log_path = os.path.join(log_dir, '{}_{}.log'.format(datetime.datetime.now().isoformat('_'), words.random_word()))
log_file = open(log_path, 'w')
log('Firefox log file: {}'.format(log_path))
binary = FirefoxBinary(log_file=log_file)
return binary | Gets the firefox binary
@rtype: FirefoxBinary | entailment |
def _log_fail_callback(driver, *args, **kwargs):
"""Raises an assertion error if the page has severe console errors
@param driver: ShapewaysDriver
@return: None
"""
try:
logs = driver.get_browser_log(levels=[BROWSER_LOG_LEVEL_SEVERE])
failure_message = 'There were severe console errors on this page: {}'.format(logs)
failure_message = failure_message.replace('{', '{{').replace('}', '}}') # Escape braces for error message
driver.assertion.assert_false(
logs,
failure_message=failure_message
)
except (urllib2.URLError, socket.error, WebDriverException):
# The session has ended, don't check the logs
pass | Raises an assertion error if the page has severe console errors
@param driver: ShapewaysDriver
@return: None | entailment |
def clone_and_update(self, **kwargs):
"""Clones the object and updates the clone with the args
@param kwargs: Keyword arguments to set
@return: The cloned copy with updated values
"""
cloned = self.clone()
cloned.update(**kwargs)
return cloned | Clones the object and updates the clone with the args
@param kwargs: Keyword arguments to set
@return: The cloned copy with updated values | entailment |
def message(self):
"""
Render the body of the message to a string.
"""
template_name = self.template_name() if \
callable(self.template_name) \
else self.template_name
return loader.render_to_string(
template_name, self.get_context(), request=self.request
) | Render the body of the message to a string. | entailment |
def subject(self):
"""
Render the subject of the message to a string.
"""
template_name = self.subject_template_name() if \
callable(self.subject_template_name) \
else self.subject_template_name
subject = loader.render_to_string(
template_name, self.get_context(), request=self.request
)
return ''.join(subject.splitlines()) | Render the subject of the message to a string. | entailment |
def get_context(self):
"""
Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``).
"""
if not self.is_valid():
raise ValueError(
"Cannot generate Context from invalid contact form"
)
return dict(self.cleaned_data, site=get_current_site(self.request)) | Return the context used to render the templates for the email
subject and body.
By default, this context includes:
* All of the validated values in the form, as variables of the
same names as their fields.
* The current ``Site`` object, as the variable ``site``.
* Any additional variables added by context processors (this
will be a ``RequestContext``). | entailment |
def get_message_dict(self):
"""
Generate the various parts of the message and return them in a
dictionary, suitable for passing directly as keyword arguments
to ``django.core.mail.send_mail()``.
By default, the following values are returned:
* ``from_email``
* ``message``
* ``recipient_list``
* ``subject``
"""
if not self.is_valid():
raise ValueError(
"Message cannot be sent from invalid contact form"
)
message_dict = {}
for message_part in ('from_email', 'message',
'recipient_list', 'subject'):
attr = getattr(self, message_part)
message_dict[message_part] = attr() if callable(attr) else attr
return message_dict | Generate the various parts of the message and return them in a
dictionary, suitable for passing directly as keyword arguments
to ``django.core.mail.send_mail()``.
By default, the following values are returned:
* ``from_email``
* ``message``
* ``recipient_list``
* ``subject`` | entailment |
def initialise_shopify_session():
"""
Initialise the Shopify session with the Shopify App's API credentials.
"""
if not settings.SHOPIFY_APP_API_KEY or not settings.SHOPIFY_APP_API_SECRET:
raise ImproperlyConfigured("SHOPIFY_APP_API_KEY and SHOPIFY_APP_API_SECRET must be set in settings")
shopify.Session.setup(api_key=settings.SHOPIFY_APP_API_KEY, secret=settings.SHOPIFY_APP_API_SECRET) | Initialise the Shopify session with the Shopify App's API credentials. | entailment |
def anonymous_required(function=None, redirect_url=None):
"""
Decorator requiring the current user to be anonymous (not logged in).
"""
if not redirect_url:
redirect_url = settings.LOGIN_REDIRECT_URL
actual_decorator = user_passes_test(
is_anonymous,
login_url=redirect_url,
redirect_field_name=None
)
if function:
return actual_decorator(function)
return actual_decorator | Decorator requiring the current user to be anonymous (not logged in). | entailment |
def login_required(f, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator that wraps django.contrib.auth.decorators.login_required, but supports extracting Shopify's authentication
query parameters (`shop`, `timestamp`, `signature` and `hmac`) and passing them on to the login URL (instead of just
wrapping them up and encoding them in to the `next` parameter).
This is useful for ensuring that users are automatically logged on when they first access a page through the Shopify
Admin, which passes these parameters with every page request to an embedded app.
"""
@wraps(f)
def wrapper(request, *args, **kwargs):
if is_authenticated(request.user):
return f(request, *args, **kwargs)
# Extract the Shopify-specific authentication parameters from the current request.
shopify_params = {
k: request.GET[k]
for k in ['shop', 'timestamp', 'signature', 'hmac']
if k in request.GET
}
# Get the login URL.
resolved_login_url = force_str(resolve_url(login_url or settings.LOGIN_URL))
# Add the Shopify authentication parameters to the login URL.
updated_login_url = add_query_parameters_to_url(resolved_login_url, shopify_params)
django_login_required_decorator = django_login_required(redirect_field_name=redirect_field_name,
login_url=updated_login_url)
return django_login_required_decorator(f)(request, *args, **kwargs)
return wrapper | Decorator that wraps django.contrib.auth.decorators.login_required, but supports extracting Shopify's authentication
query parameters (`shop`, `timestamp`, `signature` and `hmac`) and passing them on to the login URL (instead of just
wrapping them up and encoding them in to the `next` parameter).
This is useful for ensuring that users are automatically logged on when they first access a page through the Shopify
Admin, which passes these parameters with every page request to an embedded app. | entailment |
def create_user(self, myshopify_domain, password=None):
"""
Creates and saves a ShopUser with the given domain and password.
"""
if not myshopify_domain:
raise ValueError('ShopUsers must have a myshopify domain')
user = self.model(myshopify_domain=myshopify_domain)
# Never want to be able to log on externally.
# Authentication will be taken care of by Shopify OAuth.
user.set_unusable_password()
user.save(using=self._db)
return user | Creates and saves a ShopUser with the given domain and password. | entailment |
def add_query_parameters_to_url(url, query_parameters):
"""
Merge a dictionary of query parameters into the given URL.
Ensures all parameters are sorted in dictionary order when returning the URL.
"""
# Parse the given URL into parts.
url_parts = urllib.parse.urlparse(url)
# Parse existing parameters and add new parameters.
qs_args = urllib.parse.parse_qs(url_parts[4])
qs_args.update(query_parameters)
# Sort parameters to ensure consistent order.
sorted_qs_args = OrderedDict()
for k in sorted(qs_args.keys()):
sorted_qs_args[k] = qs_args[k]
# Encode the new parameters and return the updated URL.
new_qs = urllib.parse.urlencode(sorted_qs_args, True)
return urllib.parse.urlunparse(list(url_parts[0:4]) + [new_qs] + list(url_parts[5:])) | Merge a dictionary of query parameters into the given URL.
Ensures all parameters are sorted in dictionary order when returning the URL. | entailment |
def bind(self, event_name, callback, *args, **kwargs):
"""Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event.
"""
self.event_callbacks[event_name].append((callback, args, kwargs)) | Bind an event to a callback
:param event_name: The name of the event to bind to.
:type event_name: str
:param callback: The callback to notify of this event. | entailment |
def send_event(self, event_name, data, channel_name=None):
"""Send an event to the Pusher server.
:param str event_name:
:param Any data:
:param str channel_name:
"""
event = {'event': event_name, 'data': data}
if channel_name:
event['channel'] = channel_name
self.logger.info("Connection: Sending event - %s" % event)
try:
self.socket.send(json.dumps(event))
except Exception as e:
self.logger.error("Failed send event: %s" % e) | Send an event to the Pusher server.
:param str event_name:
:param Any data:
:param str channel_name: | entailment |
def trigger(self, event_name, data):
"""Trigger an event on this channel. Only available for private or
presence channels
:param event_name: The name of the event. Must begin with 'client-''
:type event_name: str
:param data: The data to send with the event.
"""
if self.connection:
if event_name.startswith("client-"):
if self.name.startswith("private-") or self.name.startswith("presence-"):
self.connection.send_event(event_name, data, channel_name=self.name) | Trigger an event on this channel. Only available for private or
presence channels
:param event_name: The name of the event. Must begin with 'client-''
:type event_name: str
:param data: The data to send with the event. | entailment |
def subscribe(self, channel_name, auth=None):
"""Subscribe to a channel.
:param str channel_name: The name of the channel to subscribe to.
:param str auth: The token to use if authenticated externally.
:rtype: pysher.Channel
"""
data = {'channel': channel_name}
if auth is None:
if channel_name.startswith('presence-'):
data['auth'] = self._generate_presence_token(channel_name)
data['channel_data'] = json.dumps(self.user_data)
elif channel_name.startswith('private-'):
data['auth'] = self._generate_auth_token(channel_name)
else:
data['auth'] = auth
self.connection.send_event('pusher:subscribe', data)
self.channels[channel_name] = Channel(channel_name, self.connection)
return self.channels[channel_name] | Subscribe to a channel.
:param str channel_name: The name of the channel to subscribe to.
:param str auth: The token to use if authenticated externally.
:rtype: pysher.Channel | entailment |
def unsubscribe(self, channel_name):
"""Unsubscribe from a channel
:param str channel_name: The name of the channel to unsubscribe from.
"""
if channel_name in self.channels:
self.connection.send_event(
'pusher:unsubscribe', {
'channel': channel_name,
}
)
del self.channels[channel_name] | Unsubscribe from a channel
:param str channel_name: The name of the channel to unsubscribe from. | entailment |
def _connection_handler(self, event_name, data, channel_name):
"""Handle incoming data.
:param str event_name: Name of the event.
:param Any data: Data received.
:param str channel_name: Name of the channel this event and data belongs to.
"""
if channel_name in self.channels:
self.channels[channel_name]._handle_event(event_name, data) | Handle incoming data.
:param str event_name: Name of the event.
:param Any data: Data received.
:param str channel_name: Name of the channel this event and data belongs to. | entailment |
def _reconnect_handler(self):
"""Handle a reconnect."""
for channel_name, channel in self.channels.items():
data = {'channel': channel_name}
if channel.auth:
data['auth'] = channel.auth
self.connection.send_event('pusher:subscribe', data) | Handle a reconnect. | entailment |
def _generate_auth_token(self, channel_name):
"""Generate a token for authentication with the given channel.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str
"""
subject = "{}:{}".format(self.connection.socket_id, channel_name)
h = hmac.new(self.secret_as_bytes, subject.encode('utf-8'), hashlib.sha256)
auth_key = "{}:{}".format(self.key, h.hexdigest())
return auth_key | Generate a token for authentication with the given channel.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str | entailment |
def _generate_presence_token(self, channel_name):
"""Generate a presence token.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str
"""
subject = "{}:{}:{}".format(self.connection.socket_id, channel_name, json.dumps(self.user_data))
h = hmac.new(self.secret_as_bytes, subject.encode('utf-8'), hashlib.sha256)
auth_key = "{}:{}".format(self.key, h.hexdigest())
return auth_key | Generate a presence token.
:param str channel_name: Name of the channel to generate a signature for.
:rtype: str | entailment |
def pos(self, element = None):
''' Tries to decide about the part of speech. '''
tags = []
if element:
if element.startswith(('de ', 'het ', 'het/de', 'de/het')) and not re.search('\[[\w|\s][\w|\s]+\]', element.split('\r\n')[0], re.U):
tags.append('NN')
if re.search('[\w|\s|/]+ \| [\w|\s|/]+ - [\w|\s|/]+', element, re.U):
tags.append('VB')
if re.search('[\w|\s]+ \| [\w|\s]+', element, re.U):
tags.append('JJ')
return tags
else:
for element in self.elements:
if self.word in unicode(element):
tag = self.pos(element)
if tag:
return tag | Tries to decide about the part of speech. | entailment |
def articles(self):
''' Tries to scrape the correct articles for singular and plural from uitmuntend.nl. '''
result = [None, None]
element = self._first('NN')
if element:
element = element.split('\r\n')[0]
if ' | ' in element:
# This means there is a plural
singular, plural = element.split(' | ')
singular, plural = singular.strip(), plural.strip()
else:
# This means there is no plural
singular, plural = element.strip(), ''
result[1] = ''
if singular:
result[0] = singular.split(' ')[0].split('/')
if plural:
result[1] = plural.split(' ')[0].split('/')
return result | Tries to scrape the correct articles for singular and plural from uitmuntend.nl. | entailment |
def plural(self):
''' Tries to scrape the plural version from uitmuntend.nl. '''
element = self._first('NN')
if element:
element = element.split('\r\n')[0]
if ' | ' in element:
# This means there is a plural
singular, plural = element.split(' | ')
return [plural.split(' ')[1]]
else:
# This means there is no plural
return ['']
return [None] | Tries to scrape the plural version from uitmuntend.nl. | entailment |
def download(url, filename, overwrite = False):
''' Downloads a file via HTTP. '''
from requests import get
from os.path import exists
debug('Downloading ' + unicode(url) + '...')
data = get(url)
if data.status_code == 200:
if not exists(filename) or overwrite:
f = open(filename, 'wb')
f.write(data.content)
f.close()
return True
return False | Downloads a file via HTTP. | entailment |
def warning(message):
''' Prints a message if warning mode is enabled. '''
import lltk.config as config
if config['warnings']:
try:
from termcolor import colored
except ImportError:
def colored(message, color):
return message
print colored('@LLTK-WARNING: ' + message, 'red') | Prints a message if warning mode is enabled. | entailment |
def trace(f, *args, **kwargs):
''' Decorator used to trace function calls for debugging purposes. '''
print 'Calling %s() with args %s, %s ' % (f.__name__, args, kwargs)
return f(*args,**kwargs) | Decorator used to trace function calls for debugging purposes. | entailment |
def articles(self):
''' Tries to scrape the correct articles for singular and plural from vandale.nl. '''
result = [None, None]
element = self._first('NN')
if element:
if re.search('(de|het/?de|het);', element, re.U):
result[0] = re.findall('(de|het/?de|het);', element, re.U)[0].split('/')
if re.search('meervoud: (\w+)', element, re.U):
# It's a noun with a plural form
result[1] = ['de']
else:
# It's a noun without a plural form
result[1] = ['']
return result | Tries to scrape the correct articles for singular and plural from vandale.nl. | entailment |
def plural(self):
''' Tries to scrape the plural version from vandale.nl. '''
element = self._first('NN')
if element:
if re.search('meervoud: ([\w|\s|\'|\-|,]+)', element, re.U):
results = re.search('meervoud: ([\w|\s|\'|\-|,]+)', element, re.U).groups()[0].split(', ')
results = [x.replace('ook ', '').strip() for x in results]
return results
else:
# There is no plural form
return ['']
return [None] | Tries to scrape the plural version from vandale.nl. | entailment |
def miniaturize(self):
''' Tries to scrape the miniaturized version from vandale.nl. '''
element = self._first('NN')
if element:
if re.search('verkleinwoord: (\w+)', element, re.U):
return re.findall('verkleinwoord: (\w+)', element, re.U)
else:
return ['']
return [None] | Tries to scrape the miniaturized version from vandale.nl. | entailment |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = super(VerbixDe, self)._normalize(string)
string = string.replace('sie; Sie', 'sie')
string = string.strip()
return string | Returns a sanitized string. | entailment |
def pos(self):
''' Tries to decide about the part of speech. '''
tags = []
if self.tree.xpath('//div[@class="grad733100"]/h2[@class="inline"]//text()'):
info = self.tree.xpath('//div[@class="grad733100"]/h2[@class="inline"]')[0].text_content()
info = info.strip('I ')
if info.startswith(('de', 'het')):
tags.append('NN')
if not info.startswith(('de', 'het')) and info.endswith('en'):
tags.append('VB')
if not info.startswith(('de', 'het')) and not info.endswith('en'):
tags.append('JJ')
return tags | Tries to decide about the part of speech. | entailment |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = super(VerbixFr, self)._normalize(string)
string = string.replace('il; elle', 'il/elle')
string = string.replace('ils; elles', 'ils/elles')
string = string.strip()
return string | Returns a sanitized string. | entailment |
def pos(self, element = None):
''' Tries to decide about the part of speech. '''
tags = []
if element:
if re.search('[\w|\s]+ [m|f]\.', element, re.U):
tags.append('NN')
if '[VERB]' in element:
tags.append('VB')
if 'adj.' in element and re.search('([\w|\s]+, [\w|\s]+)', element, re.U):
tags.append('JJ')
else:
for element in self.elements:
if element.startswith(self.word):
tags += self.pos(element)
return list(set(tags)) | Tries to decide about the part of speech. | entailment |
def gender(self):
''' Tries to scrape the gender for a given noun from leo.org. '''
element = self._first('NN')
if element:
if re.search('([m|f|n)])\.', element, re.U):
genus = re.findall('([m|f|n)])\.', element, re.U)[0]
return genus | Tries to scrape the gender for a given noun from leo.org. | entailment |
def isempty(result):
''' Finds out if a scraping result should be considered empty. '''
if isinstance(result, list):
for element in result:
if isinstance(element, list):
if not isempty(element):
return False
else:
if element is not None:
return False
else:
if result is not None:
return False
return True | Finds out if a scraping result should be considered empty. | entailment |
def method2pos(method):
''' Returns a list of valid POS-tags for a given method. '''
if method in ('articles', 'plural', 'miniaturize', 'gender'):
pos = ['NN']
elif method in ('conjugate',):
pos = ['VB']
elif method in ('comparative, superlative'):
pos = ['JJ']
else:
pos = ['*']
return pos | Returns a list of valid POS-tags for a given method. | entailment |
def register(scraper):
''' Registers a scraper to make it available for the generic scraping interface. '''
global scrapers
language = scraper('').language
if not language:
raise Exception('No language specified for your scraper.')
if scrapers.has_key(language):
scrapers[language].append(scraper)
else:
scrapers[language] = [scraper] | Registers a scraper to make it available for the generic scraping interface. | entailment |
def discover(language):
''' Discovers all registered scrapers to be used for the generic scraping interface. '''
debug('Discovering scrapers for \'%s\'...' % (language,))
global scrapers, discovered
for language in scrapers.iterkeys():
discovered[language] = {}
for scraper in scrapers[language]:
blacklist = ['download', 'isdownloaded', 'getelements']
methods = [method for method in dir(scraper) if method not in blacklist and not method.startswith('_') and callable(getattr(scraper, method))]
for method in methods:
if discovered[language].has_key(method):
discovered[language][method].append(scraper)
else:
discovered[language][method] = [scraper]
debug('%d scrapers with %d methods (overall) registered for \'%s\'.' % (len(scrapers[language]), len(discovered[language].keys()), language)) | Discovers all registered scrapers to be used for the generic scraping interface. | entailment |
def scrape(language, method, word, *args, **kwargs):
''' Uses custom scrapers and calls provided method. '''
scraper = Scrape(language, word)
if hasattr(scraper, method):
function = getattr(scraper, method)
if callable(function):
return function(*args, **kwargs)
else:
raise NotImplementedError('The method ' + method + '() is not implemented so far.') | Uses custom scrapers and calls provided method. | entailment |
def iterscrapers(self, method, mode = None):
''' Iterates over all available scrapers. '''
global discovered
if discovered.has_key(self.language) and discovered[self.language].has_key(method):
for Scraper in discovered[self.language][method]:
yield Scraper | Iterates over all available scrapers. | entailment |
def merge(self, elements):
''' Merges all scraping results to a list sorted by frequency of occurrence. '''
from collections import Counter
from lltk.utils import list2tuple, tuple2list
# The list2tuple conversion is necessary because mutable objects (e.g. lists) are not hashable
merged = tuple2list([value for value, count in Counter(list2tuple(list(elements))).most_common()])
return merged | Merges all scraping results to a list sorted by frequency of occurrence. | entailment |
def clean(self, elements):
''' Removes empty or incomplete answers. '''
cleanelements = []
for i in xrange(len(elements)):
if isempty(elements[i]):
return []
next = elements[i]
if isinstance(elements[i], (list, tuple)):
next = self.clean(elements[i])
if next:
cleanelements.append(elements[i])
return cleanelements | Removes empty or incomplete answers. | entailment |
def _needs_download(self, f):
''' Decorator used to make sure that the downloading happens prior to running the task. '''
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.isdownloaded():
self.download()
return f(self, *args, **kwargs)
return wrapper | Decorator used to make sure that the downloading happens prior to running the task. | entailment |
def download(self):
''' Downloads HTML from url. '''
self.page = requests.get(self.url)
self.tree = html.fromstring(self.page.text) | Downloads HTML from url. | entailment |
def _needs_elements(self, f):
''' Decorator used to make sure that there are elements prior to running the task. '''
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.elements == None:
self.getelements()
return f(self, *args, **kwargs)
return wrapper | Decorator used to make sure that there are elements prior to running the task. | entailment |
def _first(self, tag):
''' Returns the first element with required POS-tag. '''
self.getelements()
for element in self.elements:
if tag in self.pos(element):
return element
return None | Returns the first element with required POS-tag. | entailment |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = string.replace(u'\xb7', '')
string = string.replace(u'\u0331', '')
string = string.replace(u'\u0323', '')
string = string.strip(' \n\rI.')
return string | Returns a sanitized string. | entailment |
def pos(self, element = None):
''' Tries to decide about the part of speech. '''
tags = []
if element:
if element.startswith(('der', 'die', 'das')):
tags.append('NN')
if ' VERB' in element:
tags.append('VB')
if ' ADJ' in element:
tags.append('JJ')
else:
for element in self.elements:
if self.word in unicode(element):
return self.pos(element)
return tags | Tries to decide about the part of speech. | entailment |
def articles(self):
''' Tries to scrape the correct articles for singular and plural from de.pons.eu. '''
result = [None, None]
element = self._first('NN')
if element:
result[0] = [element.split(' ')[0].replace('(die)', '').strip()]
if 'kein Plur' in element:
# There is no plural
result[1] = ['']
else:
# If a plural form exists, there is only one possibility
result[1] = ['die']
return result | Tries to scrape the correct articles for singular and plural from de.pons.eu. | entailment |
def plural(self):
''' Tries to scrape the plural version from pons.eu. '''
element = self._first('NN')
if element:
if 'kein Plur' in element:
# There is no plural
return ['']
if re.search(', ([\w|\s|/]+)>', element, re.U):
# Plural form is provided
return re.findall(', ([\w|\s|/]+)>', element, re.U)[0].split('/')
if re.search(', -(\w+)>', element, re.U):
# Suffix is provided
suffix = re.findall(', -(\w+)>', element, re.U)[0]
return [self.word + suffix]
if element.endswith('->'):
# Plural is the same as singular
return [self.word]
return [None] | Tries to scrape the plural version from pons.eu. | entailment |
def reference(language, word):
''' Returns the articles (singular and plural) combined with singular and plural for a given noun. '''
sg, pl, art = word, '/'.join(plural(language, word) or ['-']), [[''], ['']]
art[0], art[1] = articles(language, word) or (['-'], ['-'])
result = ['%s %s' % ('/'.join(art[0]), sg), '%s %s' % ('/'.join(art[1]), pl)]
result = [None if x == '- -' else x for x in result]
return result | Returns the articles (singular and plural) combined with singular and plural for a given noun. | entailment |
def translate(src, dest, word):
''' Translates a word using Google Translate. '''
results = []
try:
from textblob import TextBlob
results.append(TextBlob(word).translate(from_lang = src, to = dest).string)
except ImportError:
pass
if not results:
return [None]
return results | Translates a word using Google Translate. | entailment |
def audiosamples(language, word, key = ''):
''' Returns a list of URLs to suitable audiosamples for a given word. '''
from lltk.audiosamples import forvo, google
urls = []
urls += forvo(language, word, key)
urls += google(language, word)
return urls | Returns a list of URLs to suitable audiosamples for a given word. | entailment |
def images(language, word, n = 20, *args, **kwargs):
''' Returns a list of URLs to suitable images for a given word.'''
from lltk.images import google
return google(language, word, n, *args, **kwargs) | Returns a list of URLs to suitable images for a given word. | entailment |
def articles(word):
''' Returns the articles (singular and plural) for a given noun. '''
from pattern.it import article
result = [[None], [None]]
genus = gender(word) or 'f'
result[0] = [article(word, function = 'definite', gender = genus)]
result[1] = [article(plural(word)[0], function = 'definite', gender = (genus, 'p'))]
return result | Returns the articles (singular and plural) for a given noun. | entailment |
def google(language, word, n = 8, *args, **kwargs):
''' Downloads suitable images for a given word from Google Images. '''
if not kwargs.has_key('start'):
kwargs['start'] = 0
if not kwargs.has_key('itype'):
kwargs['itype'] = 'photo|clipart|lineart'
if not kwargs.has_key('isize'):
kwargs['isize'] = 'small|medium|large|xlarge'
if not kwargs.has_key('filetype'):
kwargs['filetype'] = 'jpg'
info = {'q' : word, 'hl' : language, 'start' : str(kwargs['start']), 'as_filetype' : kwargs['filetype'], 'imgsz' : kwargs['isize'], 'imgtype' : kwargs['itype'], 'rsz' : '8', 'safe' : 'active'}
query = '&'.join([x[0] + '=' + x[1] for x in info.items()])
url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&' + query
debug('Loading ' + unicode(url) + '...')
page = requests.get(url)
data = json.loads(page.text)
images = []
if data and data.has_key('responseData') and data['responseData']:
items = data['responseData']['results']
if items:
images += [item['url'] for item in items]
if len(images) < int(n):
kwargs['start'] += 8
images += google(language, word, n, *args, **kwargs)
return images[:int(n)] | Downloads suitable images for a given word from Google Images. | entailment |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = string.replace(u'\xa0', '')
string = string.strip()
return string | Returns a sanitized string. | entailment |
def _extract(self, identifier):
''' Extracts data from conjugation table. '''
conjugation = []
if self.tree.xpath('//p/b[normalize-space(text()) = "' + identifier.decode('utf-8') + '"]'):
p = self.tree.xpath('//p/b[normalize-space(text()) = "' + identifier.decode('utf-8') + '"]')[0].getparent()
for font in p.iterfind('font'):
text = self._normalize(font.text_content())
next = font.getnext()
text += ' ' + self._normalize(next.text_content())
while True:
next = next.getnext()
if next.tag != 'span':
break
text += '/' + self._normalize(next.text_content())
conjugation.append(text)
return conjugation | Extracts data from conjugation table. | entailment |
def conjugate(self, tense = 'present'):
''' Tries to conjugate a given verb using verbix.com.'''
if self.tenses.has_key(tense):
return self._extract(self.tenses[tense])
elif self.tenses.has_key(tense.title()):
return self._extract(self.tenses[tense.title()])
return [None] | Tries to conjugate a given verb using verbix.com. | entailment |
def load(self, filename, replace = False):
''' Loads a configuration file (JSON). '''
import os, json, re
if os.path.exists(filename):
f = open(filename, 'r')
content = f.read()
content = re.sub('[\t ]*?[#].*?\n', '', content)
try:
settings = json.loads(content)
except ValueError:
# This means that the configuration file is not a valid JSON document
from lltk.exceptions import ConfigurationError
raise ConfigurationError('\'' + filename + '\' is not a valid JSON document.')
f.close()
if replace:
self.settings = settings
else:
self.settings.update(settings)
else:
lltkfilename = self.settings['module-path'] + '/' + self.settings['lltk-config-path'] + filename
if os.path.exists(lltkfilename):
# This means that filename was provided relative to the lltk module path
return self.load(lltkfilename)
from lltk.exceptions import ConfigurationError
raise ConfigurationError('\'' + filename + '\' seems to be non-existent.') | Loads a configuration file (JSON). | entailment |
def save(self, filename):
''' Saves the current configuration to file 'filename' (JSON). '''
import json
f = open(filename, 'w')
json.dump(self.settings, f, indent = 4)
f.close() | Saves the current configuration to file 'filename' (JSON). | entailment |
def forvo(language, word, key):
''' Returns a list of suitable audiosamples for a given word from Forvo.com. '''
from requests import get
url = 'http://apifree.forvo.com/action/word-pronunciations/format/json/word/%s/language/%s/key/%s/' % (word, language, key)
urls = []
page = get(url)
if page.status_code == 200:
if 'incorrect' in page.text:
from lltk.exceptions import IncorrectForvoAPIKey
raise IncorrectForvoAPIKey('Your Forvi API key seems to be wrong. Please check on http://api.forvo.com.')
data = page.json()
if data == ['Limit/day reached.']:
from lltk.exceptions import DailyForvoLimitExceeded
raise DailyForvoLimitExceeded('You have exceeded your daily Forvo API limit.')
if data.has_key('items') and len(data['items']):
items = sorted(data['items'], key = lambda x: int(x['num_votes']), reverse = True)
for item in items:
urls.append(item['pathmp3'])
return urls | Returns a list of suitable audiosamples for a given word from Forvo.com. | entailment |
def _normalize(self, string):
''' Returns a sanitized string. '''
string = string.replace(u'\xb7', '')
string = string.replace(u'\xa0', ' ')
string = string.replace('selten: ', '')
string = string.replace('Alte Rechtschreibung', '')
string = string.strip()
return string | Returns a sanitized string. | entailment |
def pos(self):
''' Tries to decide about the part of speech. '''
tags = []
if self.tree.xpath('//div[@id="mw-content-text"]//a[@title="Hilfe:Wortart"]/text()'):
info = self.tree.xpath('//div[@id="mw-content-text"]//a[@title="Hilfe:Wortart"]/text()')[0]
if info == 'Substantiv':
tags.append('NN')
if info == 'Verb':
tags.append('VB')
if info == 'Adjektiv':
tags.append('JJ')
return tags | Tries to decide about the part of speech. | entailment |
def register(cache):
''' Registers a cache. '''
global caches
name = cache().name
if not caches.has_key(name):
caches[name] = cache | Registers a cache. | entailment |
def enable(identifier = None, *args, **kwargs):
''' Enables a specific cache for the current session. Remember that is has to be registered. '''
global cache
if not identifier:
for item in (config['default-caches'] + ['NoCache']):
if caches.has_key(item):
debug('Enabling default cache %s...' % (item,))
cache = caches[item](*args, **kwargs)
if not cache.status():
warning('%s could not be loaded. Is the backend running (%s:%d)?' % (item, cache.server, cache.port))
continue
# This means that the cache backend was set up successfully
break
else:
debug('Cache backend %s is not registered. Are all requirements satisfied?' % (item,))
elif caches.has_key(identifier):
debug('Enabling cache %s...' % (identifier,))
previouscache = cache
cache = caches[identifier](*args, **kwargs)
if not cache.status():
warning('%s could not be loaded. Is the backend running (%s:%d)?' % (identifier, cache.server, cache.port))
cache = previouscache
else:
debug('Cache backend %s is not registered. Are all requirements satisfied?' % (identifier,)) | Enables a specific cache for the current session. Remember that is has to be registered. | entailment |
def cached(key = None, extradata = {}):
''' Decorator used for caching. '''
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
uid = key
if not uid:
from hashlib import md5
arguments = list(args) + [(a, kwargs[a]) for a in sorted(kwargs.keys())]
uid = md5(str(arguments)).hexdigest()
if exists(uid):
debug('Item \'%s\' is cached (%s).' % (uid, cache))
return get(uid)
else:
debug('Item \'%s\' is not cached (%s).' % (uid, cache))
result = f(*args, **kwargs)
debug('Caching result \'%s\' as \'%s\' (%s)...' % (result, uid, cache))
debug('Extra data: ' + (str(extradata) or 'None'))
put(uid, result, extradata)
return result
return wrapper
return decorator | Decorator used for caching. | entailment |
def needsconnection(self, f):
''' Decorator used to make sure that the connection has been established. '''
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.connection:
self.connect()
return f(self, *args, **kwargs)
return wrapper | Decorator used to make sure that the connection has been established. | entailment |
def language(l):
''' Use this as a decorator (implicitly or explicitly). '''
# Usage: @language('en') or function = language('en')(function)
def decorator(f):
''' Decorator used to prepend the language as an argument. '''
@wraps(f)
def wrapper(*args, **kwargs):
return f(l, *args, **kwargs)
return wrapper
return decorator | Use this as a decorator (implicitly or explicitly). | entailment |
def _load_language_or_die(f):
''' Decorator used to load a custom method for a given language. '''
# This decorator checks if there's a custom method for a given language.
# If so, prefer the custom method, otherwise raise exception NotImplementedError.
@wraps(f)
def loader(language, word, *args, **kwargs):
method = f.func_name
try:
if isinstance(language, (list, tuple)):
_lltk = __import__('lltk.' + language[0], globals(), locals(), [method], -1)
else:
_lltk = __import__('lltk.' + language, globals(), locals(), [method], -1)
except ImportError:
from lltk.exceptions import LanguageNotSupported
raise LanguageNotSupported('The language ' + language + ' is not supported so far.')
if hasattr(_lltk, method):
function = getattr(_lltk, method)
if callable(function):
return function(word, *args, **kwargs)
# No custom method implemented, yet.
raise NotImplementedError('Method lltk.' + language + '.' + method +'() not implemented, yet.')
return loader | Decorator used to load a custom method for a given language. | entailment |
def tatoeba(language, word, minlength = 10, maxlength = 100):
''' Returns a list of suitable textsamples for a given word using Tatoeba.org. '''
word, sentences = unicode(word), []
page = requests.get('http://tatoeba.org/deu/sentences/search?query=%s&from=%s&to=und' % (word, lltk.locale.iso639_1to3(language)))
tree = html.fromstring(page.text)
for sentence in tree.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " mainSentence ")]/div/a/text()'):
sentence = sentence.strip(u' "ββ').replace(u'β β', u' β ').replace('" "', u' β ')
if word in sentence and len(sentence) < maxlength and len(sentence) > minlength:
sentences.append(sentence)
return sentences | Returns a list of suitable textsamples for a given word using Tatoeba.org. | entailment |
def gender(self):
''' Tries to scrape the correct gender for a given word from wordreference.com '''
elements = self.tree.xpath('//table[@class="WRD"]')
if len(elements):
elements = self.tree.xpath('//table[@class="WRD"]')[0]
if len(elements):
if '/iten/' in self.page.url:
elements = elements.xpath('//td[@class="FrWrd"]/em[@class="POS2"]/text()')
elif '/enit/' in self.page.url:
elements = elements.xpath('//td[@class="ToWrd"]/em[@class="POS2"]/text()')
else:
return [None]
element = [element[1:] for element in elements if element in ['nm', 'nf']]
counter = Counter(element)
if len(counter.most_common(1)):
result = counter.most_common(1)[0][0]
return [result]
return [None] | Tries to scrape the correct gender for a given word from wordreference.com | entailment |
def humanize(iso639):
''' Converts ISO639 language identifier to the corresponding (human readable) language name. '''
for i, element in enumerate(LANGUAGES):
if element[1] == iso639 or element[2] == iso639:
return element[0]
return None | Converts ISO639 language identifier to the corresponding (human readable) language name. | entailment |
def add_host(self, host_id=None, host='localhost', port=6379,
unix_socket_path=None, db=0, password=None,
ssl=False, ssl_options=None):
"""Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense.
"""
if host_id is None:
raise RuntimeError('Host ID is required')
elif not isinstance(host_id, (int, long)):
raise ValueError('The host ID has to be an integer')
host_id = int(host_id)
with self._lock:
if host_id in self.hosts:
raise TypeError('Two hosts share the same host id (%r)' %
(host_id,))
self.hosts[host_id] = HostInfo(host_id=host_id, host=host,
port=port, db=db,
unix_socket_path=unix_socket_path,
password=password, ssl=ssl,
ssl_options=ssl_options)
self._hosts_age += 1 | Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense. | entailment |
def remove_host(self, host_id):
"""Removes a host from the client. This only really useful for
unittests.
"""
with self._lock:
rv = self._hosts.pop(host_id, None) is not None
pool = self._pools.pop(host_id, None)
if pool is not None:
pool.disconnect()
self._hosts_age += 1
return rv | Removes a host from the client. This only really useful for
unittests. | entailment |
def disconnect_pools(self):
"""Disconnects all connections from the internal pools."""
with self._lock:
for pool in self._pools.itervalues():
pool.disconnect()
self._pools.clear() | Disconnects all connections from the internal pools. | entailment |
def get_router(self):
"""Returns the router for the cluster. If the cluster reconfigures
the router will be recreated. Usually you do not need to interface
with the router yourself as the cluster's routing client does that
automatically.
This returns an instance of :class:`BaseRouter`.
"""
cached_router = self._router
ref_age = self._hosts_age
if cached_router is not None:
router, router_age = cached_router
if router_age == ref_age:
return router
with self._lock:
router = self.router_cls(self, **(self.router_options or {}))
self._router = (router, ref_age)
return router | Returns the router for the cluster. If the cluster reconfigures
the router will be recreated. Usually you do not need to interface
with the router yourself as the cluster's routing client does that
automatically.
This returns an instance of :class:`BaseRouter`. | entailment |
def get_pool_for_host(self, host_id):
"""Returns the connection pool for the given host.
This connection pool is used by the redis clients to make sure
that it does not have to reconnect constantly. If you want to use
a custom redis client you can pass this in as connection pool
manually.
"""
if isinstance(host_id, HostInfo):
host_info = host_id
host_id = host_info.host_id
else:
host_info = self.hosts.get(host_id)
if host_info is None:
raise LookupError('Host %r does not exist' % (host_id,))
rv = self._pools.get(host_id)
if rv is not None:
return rv
with self._lock:
rv = self._pools.get(host_id)
if rv is None:
opts = dict(self.pool_options or ())
opts['db'] = host_info.db
opts['password'] = host_info.password
if host_info.unix_socket_path is not None:
opts['path'] = host_info.unix_socket_path
opts['connection_class'] = UnixDomainSocketConnection
if host_info.ssl:
raise TypeError('SSL is not supported for unix '
'domain sockets.')
else:
opts['host'] = host_info.host
opts['port'] = host_info.port
if host_info.ssl:
if SSLConnection is None:
raise TypeError('This version of py-redis does '
'not support SSL connections.')
opts['connection_class'] = SSLConnection
opts.update(('ssl_' + k, v) for k, v in
(host_info.ssl_options or {}).iteritems())
rv = self.pool_cls(**opts)
self._pools[host_id] = rv
return rv | Returns the connection pool for the given host.
This connection pool is used by the redis clients to make sure
that it does not have to reconnect constantly. If you want to use
a custom redis client you can pass this in as connection pool
manually. | entailment |
def map(self, timeout=None, max_concurrency=64, auto_batch=True):
"""Shortcut context manager for getting a routing client, beginning
a map operation and joining over the result. `max_concurrency`
defines how many outstanding parallel queries can exist before an
implicit join takes place.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
"""
return self.get_routing_client(auto_batch).map(
timeout=timeout, max_concurrency=max_concurrency) | Shortcut context manager for getting a routing client, beginning
a map operation and joining over the result. `max_concurrency`
defines how many outstanding parallel queries can exist before an
implicit join takes place.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value) | entailment |
def fanout(self, hosts=None, timeout=None, max_concurrency=64,
auto_batch=True):
"""Shortcut context manager for getting a routing client, beginning
a fanout operation and joining over the result.
In the context manager the client available is a
:class:`FanoutClient`. Example usage::
with cluster.fanout(hosts='all') as client:
client.flushdb()
"""
return self.get_routing_client(auto_batch).fanout(
hosts=hosts, timeout=timeout, max_concurrency=max_concurrency) | Shortcut context manager for getting a routing client, beginning
a fanout operation and joining over the result.
In the context manager the client available is a
:class:`FanoutClient`. Example usage::
with cluster.fanout(hosts='all') as client:
client.flushdb() | entailment |
def all(self, timeout=None, max_concurrency=64, auto_batch=True):
"""Fanout to all hosts. Works otherwise exactly like :meth:`fanout`.
Example::
with cluster.all() as client:
client.flushdb()
"""
return self.fanout('all', timeout=timeout,
max_concurrency=max_concurrency,
auto_batch=auto_batch) | Fanout to all hosts. Works otherwise exactly like :meth:`fanout`.
Example::
with cluster.all() as client:
client.flushdb() | entailment |
def execute_commands(self, mapping, *args, **kwargs):
"""Concurrently executes a sequence of commands on a Redis cluster that
are associated with a routing key, returning a new mapping where
values are a list of results that correspond to the command in the same
position. For example::
>>> cluster.execute_commands({
... 'foo': [
... ('PING',),
... ('TIME',),
... ],
... 'bar': [
... ('CLIENT', 'GETNAME'),
... ],
... })
{'bar': [<Promise None>],
'foo': [<Promise True>, <Promise (1454446079, 418404)>]}
Commands that are instances of :class:`redis.client.Script` will first
be checked for their existence on the target nodes then loaded on the
targets before executing and can be interleaved with other commands::
>>> from redis.client import Script
>>> TestScript = Script(None, 'return {KEYS, ARGV}')
>>> cluster.execute_commands({
... 'foo': [
... (TestScript, ('key:1', 'key:2'), range(0, 3)),
... ],
... 'bar': [
... (TestScript, ('key:3', 'key:4'), range(3, 6)),
... ],
... })
{'bar': [<Promise [['key:3', 'key:4'], ['3', '4', '5']]>],
'foo': [<Promise [['key:1', 'key:2'], ['0', '1', '2']]>]}
Internally, :class:`FanoutClient` is used for issuing commands.
"""
def is_script_command(command):
return isinstance(command[0], Script)
def check_script_load_result(script, result):
if script.sha != result:
raise AssertionError(
'Hash mismatch loading {!r}: expected {!r}, got {!r}'.format(
script,
script.sha,
result,
)
)
# Run through all the commands and check to see if there are any
# scripts, and whether or not they have been loaded onto the target
# hosts.
exists = {}
with self.fanout(*args, **kwargs) as client:
for key, commands in mapping.items():
targeted = client.target_key(key)
for command in filter(is_script_command, commands):
script = command[0]
# Set the script hash if it hasn't already been set.
if not script.sha:
script.sha = sha1(script.script).hexdigest()
# Check if the script has been loaded on each host that it
# will be executed on.
for host in targeted._target_hosts:
if script not in exists.setdefault(host, {}):
exists[host][script] = targeted.execute_command('SCRIPT EXISTS', script.sha)
# Execute the pending commands, loading scripts onto servers where they
# do not already exist.
results = {}
with self.fanout(*args, **kwargs) as client:
for key, commands in mapping.items():
results[key] = []
targeted = client.target_key(key)
for command in commands:
# If this command is a script, we need to check and see if
# it needs to be loaded before execution.
if is_script_command(command):
script = command[0]
for host in targeted._target_hosts:
if script in exists[host]:
result = exists[host].pop(script)
if not result.value[0]:
targeted.execute_command('SCRIPT LOAD', script.script).done(
on_success=functools.partial(check_script_load_result, script)
)
keys, arguments = command[1:]
parameters = list(keys) + list(arguments)
results[key].append(targeted.execute_command('EVALSHA', script.sha, len(keys), *parameters))
else:
results[key].append(targeted.execute_command(*command))
return results | Concurrently executes a sequence of commands on a Redis cluster that
are associated with a routing key, returning a new mapping where
values are a list of results that correspond to the command in the same
position. For example::
>>> cluster.execute_commands({
... 'foo': [
... ('PING',),
... ('TIME',),
... ],
... 'bar': [
... ('CLIENT', 'GETNAME'),
... ],
... })
{'bar': [<Promise None>],
'foo': [<Promise True>, <Promise (1454446079, 418404)>]}
Commands that are instances of :class:`redis.client.Script` will first
be checked for their existence on the target nodes then loaded on the
targets before executing and can be interleaved with other commands::
>>> from redis.client import Script
>>> TestScript = Script(None, 'return {KEYS, ARGV}')
>>> cluster.execute_commands({
... 'foo': [
... (TestScript, ('key:1', 'key:2'), range(0, 3)),
... ],
... 'bar': [
... (TestScript, ('key:3', 'key:4'), range(3, 6)),
... ],
... })
{'bar': [<Promise [['key:3', 'key:4'], ['3', '4', '5']]>],
'foo': [<Promise [['key:1', 'key:2'], ['0', '1', '2']]>]}
Internally, :class:`FanoutClient` is used for issuing commands. | entailment |
def auto_batch_commands(commands):
"""Given a pipeline of commands this attempts to merge the commands
into more efficient ones if that is possible.
"""
pending_batch = None
for command_name, args, options, promise in commands:
# This command cannot be batched, return it as such.
if command_name not in AUTO_BATCH_COMMANDS:
if pending_batch:
yield merge_batch(*pending_batch)
pending_batch = None
yield command_name, args, options, promise
continue
assert not options, 'batch commands cannot merge options'
if pending_batch and pending_batch[0] == command_name:
pending_batch[1].append((args, promise))
else:
if pending_batch:
yield merge_batch(*pending_batch)
pending_batch = (command_name, [(args, promise)])
if pending_batch:
yield merge_batch(*pending_batch) | Given a pipeline of commands this attempts to merge the commands
into more efficient ones if that is possible. | entailment |
def enqueue_command(self, command_name, args, options):
"""Enqueue a new command into this pipeline."""
assert_open(self)
promise = Promise()
self.commands.append((command_name, args, options, promise))
return promise | Enqueue a new command into this pipeline. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.