sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
---|---|---|
def update(self, attributes=None):
"""
Updates the entry with attributes.
"""
if attributes is None:
attributes = {}
attributes['content_type_id'] = self.sys['content_type'].id
return super(Entry, self).update(attributes) | Updates the entry with attributes. | entailment |
def all(self, query=None):
"""
Gets resource collection for _resource_class.
"""
if query is None:
query = {}
return self.client._get(
self._url(),
query
) | Gets resource collection for _resource_class. | entailment |
def find(self, resource_id, query=None, **kwargs):
"""Gets a single resource."""
if query is None:
query = {}
return self.client._get(
self._url(resource_id),
query,
**kwargs
) | Gets a single resource. | entailment |
def create(self, resource_id=None, attributes=None):
"""
Creates a resource with the given ID (optional) and attributes.
"""
if attributes is None:
attributes = {}
result = None
if not resource_id:
result = self.client._post(
self._url(resource_id),
self._resource_class.create_attributes(attributes),
headers=self._resource_class.create_headers(attributes)
)
else:
result = self.client._put(
self._url(resource_id),
self._resource_class.create_attributes(attributes),
headers=self._resource_class.create_headers(attributes)
)
return result | Creates a resource with the given ID (optional) and attributes. | entailment |
def delete(self, resource_id, **kwargs):
"""
Deletes a resource by ID.
"""
return self.client._delete(self._url(resource_id), **kwargs) | Deletes a resource by ID. | entailment |
def to_json(self):
"""
Returns the JSON representation of the role.
"""
result = super(Role, self).to_json()
result.update({
'name': self.name,
'description': self.description,
'permissions': self.permissions,
'policies': self.policies
})
return result | Returns the JSON representation of the role. | entailment |
def to_json(self):
"""
Returns the JSON representation of the space membership.
"""
result = super(SpaceMembership, self).to_json()
result.update({
'admin': self.admin,
'roles': self.roles
})
return result | Returns the JSON representation of the space membership. | entailment |
def create(self, file_or_path, **kwargs):
"""
Creates an upload for the given file or path.
"""
opened = False
if isinstance(file_or_path, str_type()):
file_or_path = open(file_or_path, 'rb')
opened = True
elif not getattr(file_or_path, 'read', False):
raise Exception("A file or path to a file is required for this operation.")
try:
return self.client._post(
self._url(),
file_or_path,
headers=self._resource_class.create_headers({}),
file_upload=True
)
finally:
if opened:
file_or_path.close() | Creates an upload for the given file or path. | entailment |
def find(self, upload_id, **kwargs):
"""
Finds an upload by ID.
"""
return super(UploadsProxy, self).find(upload_id, file_upload=True) | Finds an upload by ID. | entailment |
def delete(self, upload_id):
"""
Deletes an upload by ID.
"""
return super(UploadsProxy, self).delete(upload_id, file_upload=True) | Deletes an upload by ID. | entailment |
def to_json(self):
"""
Returns the JSON Representation of the content type field.
"""
result = {
'name': self.name,
'id': self._real_id(),
'type': self.type,
'localized': self.localized,
'omitted': self.omitted,
'required': self.required,
'disabled': self.disabled,
'validations': [v.to_json() for v in self.validations]
}
if self.type == 'Array':
result['items'] = self.items
if self.type == 'Link':
result['linkType'] = self.link_type
return result | Returns the JSON Representation of the content type field. | entailment |
def coerce(self, value):
"""
Coerces value to location hash.
"""
return {
'lat': float(value.get('lat', value.get('latitude'))),
'lon': float(value.get('lon', value.get('longitude')))
} | Coerces value to location hash. | entailment |
def base_url(klass, space_id, parent_resource_id, resource_url='entries', resource_id=None, environment_id=None):
"""
Returns the URI for the snapshot.
"""
return "spaces/{0}{1}/{2}/{3}/snapshots/{4}".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
resource_url,
parent_resource_id,
resource_id if resource_id is not None else ''
) | Returns the URI for the snapshot. | entailment |
def to_json(self):
"""
Returns the JSON representation of the snapshot.
"""
result = super(Snapshot, self).to_json()
result.update({
'snapshot': self.snapshot.to_json(),
})
return result | Returns the JSON representation of the snapshot. | entailment |
def all(self, *args, **kwargs):
"""
Gets all usage periods.
"""
return self.client._get(
self._url(),
{},
headers={
'x-contentful-enable-alpha-feature': 'usage-insights'
}
) | Gets all usage periods. | entailment |
def create_attributes(klass, attributes, previous_object=None):
"""
Attributes for webhook creation.
"""
result = super(Webhook, klass).create_attributes(attributes, previous_object)
if 'topics' not in result:
raise Exception("Topics ('topics') must be provided for this operation.")
return result | Attributes for webhook creation. | entailment |
def calls(self):
"""
Provides access to call overview for the given webhook.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls
:return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object.
:rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy
Usage:
>>> webhook_webhooks_call_proxy = webhook.calls()
<WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook">
"""
return WebhookWebhooksCallProxy(self._client, self.sys['space'].id, self.sys['id']) | Provides access to call overview for the given webhook.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls
:return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object.
:rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy
Usage:
>>> webhook_webhooks_call_proxy = webhook.calls()
<WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook"> | entailment |
def health(self):
"""
Provides access to health overview for the given webhook.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls/webhook-health
:return: :class:`WebhookWebhooksHealthProxy <contentful_management.webhook_webhooks_health_proxy.WebhookWebhooksHealthProxy>` object.
:rtype: contentful.webhook_webhooks_health_proxy.WebhookWebhooksHealthProxy
Usage:
>>> webhook_webhooks_health_proxy = webhook.health()
<WebhookWebhooksHealthProxy space_id="cfexampleapi" webhook_id="my_webhook">
"""
return WebhookWebhooksHealthProxy(self._client, self.sys['space'].id, self.sys['id']) | Provides access to health overview for the given webhook.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls/webhook-health
:return: :class:`WebhookWebhooksHealthProxy <contentful_management.webhook_webhooks_health_proxy.WebhookWebhooksHealthProxy>` object.
:rtype: contentful.webhook_webhooks_health_proxy.WebhookWebhooksHealthProxy
Usage:
>>> webhook_webhooks_health_proxy = webhook.health()
<WebhookWebhooksHealthProxy space_id="cfexampleapi" webhook_id="my_webhook"> | entailment |
def to_json(self):
"""
Returns the JSON representation of the webhook.
"""
result = super(Webhook, self).to_json()
result.update({
'name': self.name,
'url': self.url,
'topics': self.topics,
'httpBasicUsername': self.http_basic_username,
'headers': self.headers
})
if self.filters:
result.update({'filters': self.filters})
if self.transformation:
result.update({'transformation': self.transformation})
return result | Returns the JSON representation of the webhook. | entailment |
def base_url(self, space_id, content_type_id, environment_id=None, **kwargs):
"""
Returns the URI for the editor interface.
"""
return "spaces/{0}{1}/content_types/{2}/editor_interface".format(
space_id,
'/environments/{0}'.format(environment_id) if environment_id is not None else '',
content_type_id
) | Returns the URI for the editor interface. | entailment |
def to_json(self):
"""
Returns the JSON representation of the editor interface.
"""
result = super(EditorInterface, self).to_json()
result.update({'controls': self.controls})
return result | Returns the JSON representation of the editor interface. | entailment |
def to_json(self):
"""
Returns the JSON Representation of the content type field validation.
"""
result = {}
for k, v in self._data.items():
result[camel_case(k)] = v
return result | Returns the JSON Representation of the content type field validation. | entailment |
def build(self):
"""
Creates the objects from the JSON response.
"""
if self.json['sys']['type'] == 'Array':
return self._build_array()
return self._build_item(self.json) | Creates the objects from the JSON response. | entailment |
def create(self, resource_id=None, attributes=None):
"""
Creates a resource with a given ID (optional) and attributes for the current content type.
"""
return self.proxy.create(resource_id=resource_id, attributes=attributes) | Creates a resource with a given ID (optional) and attributes for the current content type. | entailment |
def find(self, resource_id, query=None):
"""
Finds a single resource by ID related to the current space.
"""
return self.proxy.find(resource_id, query=query) | Finds a single resource by ID related to the current space. | entailment |
def get_ngroups(self, field=None):
'''
Returns ngroups count if it was specified in the query, otherwise ValueError.
If grouping on more than one field, provide the field argument to specify which count you are looking for.
'''
field = field if field else self._determine_group_field(field)
if 'ngroups' in self.data['grouped'][field]:
return self.data['grouped'][field]['ngroups']
raise ValueError("ngroups not found in response. specify group.ngroups in the query.") | Returns ngroups count if it was specified in the query, otherwise ValueError.
If grouping on more than one field, provide the field argument to specify which count you are looking for. | entailment |
def get_groups_count(self, field=None):
'''
Returns 'matches' from group response.
If grouping on more than one field, provide the field argument to specify which count you are looking for.
'''
field = field if field else self._determine_group_field(field)
if 'matches' in self.data['grouped'][field]:
return self.data['grouped'][field]['matches']
raise ValueError("group matches not found in response") | Returns 'matches' from group response.
If grouping on more than one field, provide the field argument to specify which count you are looking for. | entailment |
def get_flat_groups(self, field=None):
'''
Flattens the group response and just returns a list of documents.
'''
field = field if field else self._determine_group_field(field)
temp_groups = self.data['grouped'][field]['groups']
return [y for x in temp_groups for y in x['doclist']['docs']] | Flattens the group response and just returns a list of documents. | entailment |
def get_facets(self):
'''
Returns a dictionary of facets::
>>> res = solr.query('SolrClient_unittest',{
'q':'product_name:Lorem',
'facet':True,
'facet.field':'facet_test',
})... ... ... ...
>>> res.get_results_count()
4
>>> res.get_facets()
{'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}}
'''
if not hasattr(self,'facets'):
self.facets = {}
data = self.data
if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict:
if 'facet_fields' in data['facet_counts'].keys() and type(data['facet_counts']['facet_fields']) == dict:
for facetfield in data['facet_counts']['facet_fields']:
if type(data['facet_counts']['facet_fields'][facetfield] == list):
l = data['facet_counts']['facet_fields'][facetfield]
self.facets[facetfield] = OrderedDict(zip(l[::2],l[1::2]))
return self.facets
else:
raise SolrResponseError("No Facet Information in the Response")
else:
return self.facets | Returns a dictionary of facets::
>>> res = solr.query('SolrClient_unittest',{
'q':'product_name:Lorem',
'facet':True,
'facet.field':'facet_test',
})... ... ... ...
>>> res.get_results_count()
4
>>> res.get_facets()
{'facet_test': {'ipsum': 0, 'sit': 0, 'dolor': 2, 'amet,': 1, 'Lorem': 1}} | entailment |
def get_facets_ranges(self):
'''
Returns query facet ranges ::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'facet':True,
'facet.range':'price',
'facet.range.start':0,
'facet.range.end':100,
'facet.range.gap':10
})
>>> res.get_facets_ranges()
{'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}}
'''
if not hasattr(self,'facet_ranges'):
self.facet_ranges = {}
data = self.data
if 'facet_counts' in data.keys() and type(data['facet_counts']) == dict:
if 'facet_ranges' in data['facet_counts'].keys() and type(data['facet_counts']['facet_ranges']) == dict:
for facetfield in data['facet_counts']['facet_ranges']:
if type(data['facet_counts']['facet_ranges'][facetfield]['counts']) == list:
l = data['facet_counts']['facet_ranges'][facetfield]['counts']
self.facet_ranges[facetfield] = OrderedDict(zip(l[::2],l[1::2]))
return self.facet_ranges
else:
raise SolrResponseError("No Facet Ranges in the Response")
else:
return self.facet_ranges | Returns query facet ranges ::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'facet':True,
'facet.range':'price',
'facet.range.start':0,
'facet.range.end':100,
'facet.range.gap':10
})
>>> res.get_facets_ranges()
{'price': {'80': 9, '10': 5, '50': 3, '20': 7, '90': 3, '70': 4, '60': 7, '0': 3, '40': 5, '30': 4}} | entailment |
def get_facet_pivot(self):
'''
Parses facet pivot response. Example::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'fq':'price:[50 TO *]',
'facet':True,
'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas
})
>>> res.get_facet_pivot()
{'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}}
This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field.
'''
if not hasattr(self,'facet_pivot'):
self.facet_pivot = {}
if 'facet_counts' in self.data.keys():
pivots = self.data['facet_counts']['facet_pivot']
for fieldset in pivots:
self.facet_pivot[fieldset] = {}
for sub_field_set in pivots[fieldset]:
res = self._rec_subfield(sub_field_set)
self.facet_pivot[fieldset].update(res)
return self.facet_pivot
else:
return self.facet_pivot | Parses facet pivot response. Example::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'fq':'price:[50 TO *]',
'facet':True,
'facet.pivot':'facet_test,price' #Note how there is no space between fields. They are just separated by commas
})
>>> res.get_facet_pivot()
{'facet_test,price': {'Lorem': {89: 1, 75: 1}, 'ipsum': {53: 1, 70: 1, 55: 1, 89: 1, 74: 1, 93: 1, 79: 1}, 'dolor': {61: 1, 94: 1}, 'sit': {99: 1, 50: 1, 67: 1, 52: 1, 54: 1, 71: 1, 72: 1, 84: 1, 62: 1}, 'amet,': {68: 1}}}
This method has built in recursion and can support indefinite number of facets. However, note that the output format is significantly massaged since Solr by default outputs a list of fields in each pivot field. | entailment |
def get_field_values_as_list(self,field):
'''
:param str field: The name of the field for which to pull in values.
Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example::
>>> r.get_field_values_as_list('product_name_exact')
['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem']
'''
return [doc[field] for doc in self.docs if field in doc] | :param str field: The name of the field for which to pull in values.
Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example::
>>> r.get_field_values_as_list('product_name_exact')
['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem'] | entailment |
def get_first_field_values_as_list(self, field):
'''
:param str field: The name of the field for lookup.
Goes through all documents returned looking for specified field. At first encounter will return the field's value.
'''
for doc in self.docs:
if field in doc.keys():
return doc[field]
raise SolrResponseError("No field in result set") | :param str field: The name of the field for lookup.
Goes through all documents returned looking for specified field. At first encounter will return the field's value. | entailment |
def get_facet_values_as_list(self, field):
'''
:param str field: Name of facet field to retrieve values from.
Returns facet values as list for a given field. Example::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'facet':'true',
'facet.field':'facet_test',
})
>>> res.get_facet_values_as_list('facet_test')
[9, 6, 14, 10, 11]
>>> res.get_facets()
{'facet_test': {'Lorem': 9, 'ipsum': 6, 'amet,': 14, 'dolor': 10, 'sit': 11}}
'''
facets = self.get_facets()
out = []
if field in facets.keys():
for facetfield in facets[field]:
out.append(facets[field][facetfield])
return out
else:
raise SolrResponseError("No field in facet output") | :param str field: Name of facet field to retrieve values from.
Returns facet values as list for a given field. Example::
>>> res = solr.query('SolrClient_unittest',{
'q':'*:*',
'facet':'true',
'facet.field':'facet_test',
})
>>> res.get_facet_values_as_list('facet_test')
[9, 6, 14, 10, 11]
>>> res.get_facets()
{'facet_test': {'Lorem': 9, 'ipsum': 6, 'amet,': 14, 'dolor': 10, 'sit': 11}} | entailment |
def get_facet_keys_as_list(self,field):
'''
:param str field: Name of facet field to retrieve keys from.
Similar to get_facet_values_as_list but returns the list of keys as a list instead.
Example::
>>> r.get_facet_keys_as_list('facet_test')
['Lorem', 'ipsum', 'amet,', 'dolor', 'sit']
'''
facets = self.get_facets()
if facets == -1:
return facets
if field in facets.keys():
return [x for x in facets[field]] | :param str field: Name of facet field to retrieve keys from.
Similar to get_facet_values_as_list but returns the list of keys as a list instead.
Example::
>>> r.get_facet_keys_as_list('facet_test')
['Lorem', 'ipsum', 'amet,', 'dolor', 'sit'] | entailment |
def json_facet(self, field=None):
'''
EXPERIMENTAL
Tried to kick back the json.fact output.
'''
facets = self.data['facets']
if field is None:
temp_fields = [x for x in facets.keys() if x != 'count']
if len(temp_fields) != 1:
raise ValueError("field argument not specified and it looks like there is more than one field in facets. Specify the field to get json.facet from. ")
field = temp_fields[0]
if field not in self.data['facets']:
raise ValueError("Facet Field {} Not found in response, available fields are {}".format(
field, self.data['facets'].keys() ))
return self.data['facets'][field] | EXPERIMENTAL
Tried to kick back the json.fact output. | entailment |
def get_jsonfacet_counts_as_dict(self, field, data=None):
'''
EXPERIMENTAL
Takes facets and returns then as a dictionary that is easier to work with,
for example, if you are getting something this::
{'facets': {'count': 50,
'test': {'buckets': [{'count': 10,
'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 79},
{'count': 1, 'unique': 1, 'val': 9}]},
'pr_sum': 639.0,
'val': 'consectetur'},
{'count': 8,
'pr': {'buckets': [{'count': 1, 'unique': 1, 'val': 9},
{'count': 1, 'unique': 1, 'val': 31},
{'count': 1, 'unique': 1, 'val': 33}]},
'pr_sum': 420.0,
'val': 'auctor'},
{'count': 8,
'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 94},
{'count': 1, 'unique': 1, 'val': 25}]},
'pr_sum': 501.0,
'val': 'nulla'}]}}}
This should return you something like this::
{'test': {'auctor': {'count': 8,
'pr': {9: {'count': 1, 'unique': 1},
31: {'count': 1, 'unique': 1},
33: {'count': 1, 'unique': 1}},
'pr_sum': 420.0},
'consectetur': {'count': 10,
'pr': {9: {'count': 1, 'unique': 1},
79: {'count': 2, 'unique': 1}},
'pr_sum': 639.0},
'nulla': {'count': 8,
'pr': {25: {'count': 1, 'unique': 1},
94: {'count': 2, 'unique': 1}},
'pr_sum': 501.0}}}
'''
data = data if data else self.data['facets']
if field not in data:
raise ValueError("Field To start Faceting on not specified.")
out = { field: self._json_rec_dict(data[field]['buckets']) }
return out | EXPERIMENTAL
Takes facets and returns then as a dictionary that is easier to work with,
for example, if you are getting something this::
{'facets': {'count': 50,
'test': {'buckets': [{'count': 10,
'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 79},
{'count': 1, 'unique': 1, 'val': 9}]},
'pr_sum': 639.0,
'val': 'consectetur'},
{'count': 8,
'pr': {'buckets': [{'count': 1, 'unique': 1, 'val': 9},
{'count': 1, 'unique': 1, 'val': 31},
{'count': 1, 'unique': 1, 'val': 33}]},
'pr_sum': 420.0,
'val': 'auctor'},
{'count': 8,
'pr': {'buckets': [{'count': 2, 'unique': 1, 'val': 94},
{'count': 1, 'unique': 1, 'val': 25}]},
'pr_sum': 501.0,
'val': 'nulla'}]}}}
This should return you something like this::
{'test': {'auctor': {'count': 8,
'pr': {9: {'count': 1, 'unique': 1},
31: {'count': 1, 'unique': 1},
33: {'count': 1, 'unique': 1}},
'pr_sum': 420.0},
'consectetur': {'count': 10,
'pr': {9: {'count': 1, 'unique': 1},
79: {'count': 2, 'unique': 1}},
'pr_sum': 639.0},
'nulla': {'count': 8,
'pr': {25: {'count': 1, 'unique': 1},
94: {'count': 2, 'unique': 1}},
'pr_sum': 501.0}}} | entailment |
def _gen_file_name(self):
'''
Generates a random file name based on self._output_filename_pattern for the output to do file.
'''
date = datetime.datetime.now()
dt = "{}-{}-{}-{}-{}-{}-{}".format(str(date.year),str(date.month),str(date.day),str(date.hour),str(date.minute),str(date.second),str(random.randint(0,10000)))
return self._output_filename_pattern.format(dt) | Generates a random file name based on self._output_filename_pattern for the output to do file. | entailment |
def add(self, item=None, finalize=False, callback=None):
'''
Takes a string, dictionary or list of items for adding to queue. To help troubleshoot it will output the updated buffer size, however when the content gets written it will output the file path of the new file. Generally this can be safely discarded.
:param <dict,list> item: Item to add to the queue. If dict will be converted directly to a list and then to json. List must be a list of dictionaries. If a string is submitted, it will be written out as-is immediately and not buffered.
:param bool finalize: If items are buffered internally, it will flush them to disk and return the file name.
:param callback: A callback function that will be called when the item gets written to disk. It will be passed one position argument, the file path of the file written. Note that errors from the callback method will not be re-raised here.
'''
if item:
if type(item) is list:
check = list(set([type(d) for d in item]))
if len(check) > 1 or dict not in check:
raise ValueError("More than one data type detected in item (list). Make sure they are all dicts of data going to Solr")
elif type(item) is dict:
item = [item]
elif type(item) is str:
return self._write_file(item)
else:
raise ValueError("Not the right data submitted. Make sure you are sending a dict or list of dicts")
with self._rlock:
res = self._preprocess(item, finalize, callback)
return res | Takes a string, dictionary or list of items for adding to queue. To help troubleshoot it will output the updated buffer size, however when the content gets written it will output the file path of the new file. Generally this can be safely discarded.
:param <dict,list> item: Item to add to the queue. If dict will be converted directly to a list and then to json. List must be a list of dictionaries. If a string is submitted, it will be written out as-is immediately and not buffered.
:param bool finalize: If items are buffered internally, it will flush them to disk and return the file name.
:param callback: A callback function that will be called when the item gets written to disk. It will be passed one position argument, the file path of the file written. Note that errors from the callback method will not be re-raised here. | entailment |
def _lock(self):
'''
Locks, or returns False if already locked
'''
if not self._is_locked():
with open(self._lck,'w') as fh:
if self._devel: self.logger.debug("Locking")
fh.write(str(os.getpid()))
return True
else:
return False | Locks, or returns False if already locked | entailment |
def _is_locked(self):
'''
Checks to see if we are already pulling items from the queue
'''
if os.path.isfile(self._lck):
try:
import psutil
except ImportError:
return True #Lock file exists and no psutil
#If psutil is imported
with open(self._lck) as f:
pid = f.read()
return True if psutil.pid_exists(int(pid)) else False
else:
return False | Checks to see if we are already pulling items from the queue | entailment |
def _unlock(self):
'''
Unlocks the index
'''
if self._devel: self.logger.debug("Unlocking Index")
if self._is_locked():
os.remove(self._lck)
return True
else:
return True | Unlocks the index | entailment |
def get_all_as_list(self, dir='_todo_dir'):
'''
Returns a list of the the full path to all items currently in the todo directory. The items will be listed in ascending order based on filesystem time.
This will re-scan the directory on each execution.
Do not use this to process items, this method should only be used for troubleshooting or something axillary. To process items use get_todo_items() iterator.
'''
dir = getattr(self,dir)
list = [x for x in os.listdir(dir) if x.endswith('.json') or x.endswith('.json.gz')]
full = [os.path.join(dir,x) for x in list]
full.sort(key=lambda x: os.path.getmtime(x))
return full | Returns a list of the the full path to all items currently in the todo directory. The items will be listed in ascending order based on filesystem time.
This will re-scan the directory on each execution.
Do not use this to process items, this method should only be used for troubleshooting or something axillary. To process items use get_todo_items() iterator. | entailment |
def get_todo_items(self, **kwargs):
'''
Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator.
That will move the item to the done directory and prevent it from being retrieved in the future.
'''
def inner(self):
for item in self.get_all_as_list():
yield item
self._unlock()
if not self._is_locked():
if self._lock():
return inner(self)
raise RuntimeError("RuntimeError: Index Already Locked") | Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator.
That will move the item to the done directory and prevent it from being retrieved in the future. | entailment |
def complete(self, filepath):
'''
Marks the item as complete by moving it to the done directory and optionally gzipping it.
'''
if not os.path.exists(filepath):
raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath))
if self._devel: self.logger.debug("Completing - {} ".format(filepath))
if self.rotate_complete:
try:
complete_dir = str(self.rotate_complete())
except Exception as e:
self.logger.error("rotate_complete function failed with the following exception.")
self.logger.exception(e)
raise
newdir = os.path.join(self._done_dir, complete_dir)
newpath = os.path.join(newdir, os.path.split(filepath)[-1] )
if not os.path.isdir(newdir):
self.logger.debug("Making new directory: {}".format(newdir))
os.makedirs(newdir)
else:
newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] )
try:
if self._compress_complete:
if not filepath.endswith('.gz'):
# Compressing complete, but existing file not compressed
# Compress and move it and kick out
newpath += '.gz'
self._compress_and_move(filepath, newpath)
return newpath
# else the file is already compressed and can just be moved
#if not compressing completed file, just move it
shutil.move(filepath, newpath)
self.logger.info(" Completed - {}".format(filepath))
except Exception as e:
self.logger.error("Couldn't Complete {}".format(filepath))
self.logger.exception(e)
raise
return newpath | Marks the item as complete by moving it to the done directory and optionally gzipping it. | entailment |
def index(self, solr, collection, threads=1, send_method='stream_file', **kwargs):
'''
Will index the queue into a specified solr instance and collection. Specify multiple threads to make this faster, however keep in mind that if you specify multiple threads the items may not be in order.
Example::
solr = SolrClient('http://localhost:8983/solr/')
for doc in self.docs:
index.add(doc, finalize=True)
index.index(solr,'SolrClient_unittest')
:param object solr: SolrClient object.
:param string collection: The name of the collection to index document into.
:param int threads: Number of simultaneous threads to spin up for indexing.
:param string send_method: SolrClient method to execute for indexing. Default is stream_file
'''
try:
method = getattr(solr, send_method)
except AttributeError:
raise AttributeError("Couldn't find the send_method. Specify either stream_file or local_index")
self.logger.info("Indexing {} into {} using {}".format(self._queue_name,
collection,
send_method))
if threads > 1:
if hasattr(collection, '__call__'):
self.logger.debug("Overwriting send_method to index_json")
method = getattr(solr, 'index_json')
method = partial(self._wrap_dynamic, method, collection)
else:
method = partial(self._wrap, method, collection)
with ThreadPool(threads) as p:
p.map(method, self.get_todo_items())
else:
for todo_file in self.get_todo_items():
try:
result = method(collection, todo_file)
if result:
self.complete(todo_file)
except SolrError:
self.logger.error("Error Indexing Item: {}".format(todo_file))
self._unlock()
raise | Will index the queue into a specified solr instance and collection. Specify multiple threads to make this faster, however keep in mind that if you specify multiple threads the items may not be in order.
Example::
solr = SolrClient('http://localhost:8983/solr/')
for doc in self.docs:
index.add(doc, finalize=True)
index.index(solr,'SolrClient_unittest')
:param object solr: SolrClient object.
:param string collection: The name of the collection to index document into.
:param int threads: Number of simultaneous threads to spin up for indexing.
:param string send_method: SolrClient method to execute for indexing. Default is stream_file | entailment |
def get_all_json_from_indexq(self):
'''
Gets all data from the todo files in indexq and returns one huge list of all data.
'''
files = self.get_all_as_list()
out = []
for efile in files:
out.extend(self._open_file(efile))
return out | Gets all data from the todo files in indexq and returns one huge list of all data. | entailment |
def get_multi_q(self, sentinel='STOP'):
'''
This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety.
This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq.
The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data.
To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default).
Make sure you call join_indexer() after you are done to close out the queue and join the worker.
'''
self.in_q = JoinableQueue()
self.indexer_process = Process(target=self._indexer_process, args=(self.in_q, sentinel))
self.indexer_process.daemon = False
self.indexer_process.start()
return self.in_q | This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety.
This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq.
The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data.
To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default).
Make sure you call join_indexer() after you are done to close out the queue and join the worker. | entailment |
def _retry(function):
"""
Internal mechanism to try to send data to multiple Solr Hosts if
the query fails on the first one.
"""
def inner(self, **kwargs):
last_exception = None
#for host in self.router.get_hosts(**kwargs):
for host in self.host:
try:
return function(self, host, **kwargs)
except SolrError as e:
self.logger.exception(e)
raise
except ConnectionError as e:
self.logger.exception("Tried connecting to Solr, but couldn't because of the following exception.")
if '401' in e.__str__():
raise
last_exception = e
# raise the last exception after contacting all hosts instead of returning None
if last_exception is not None:
raise last_exception
return inner | Internal mechanism to try to send data to multiple Solr Hosts if
the query fails on the first one. | entailment |
def commit(self, collection, openSearcher=False, softCommit=False,
waitSearcher=True, commit=True, **kwargs):
"""
:param str collection: The name of the collection for the request
:param bool openSearcher: If new searcher is to be opened
:param bool softCommit: SoftCommit
:param bool waitServer: Blocks until the new searcher is opened
:param bool commit: Commit
Sends a commit to a Solr collection.
"""
comm = {
'openSearcher': str(openSearcher).lower(),
'softCommit': str(softCommit).lower(),
'waitSearcher': str(waitSearcher).lower(),
'commit': str(commit).lower()
}
self.logger.debug("Sending Commit to Collection {}".format(collection))
try:
resp, con_inf = self.transport.send_request(method='GET', endpoint='update', collection=collection,
params=comm, **kwargs)
except Exception as e:
raise
self.logger.debug("Commit Successful, QTime is {}".format(resp['responseHeader']['QTime'])) | :param str collection: The name of the collection for the request
:param bool openSearcher: If new searcher is to be opened
:param bool softCommit: SoftCommit
:param bool waitServer: Blocks until the new searcher is opened
:param bool commit: Commit
Sends a commit to a Solr collection. | entailment |
def query_raw(self, collection, query, request_handler='select', **kwargs):
"""
:param str collection: The name of the collection for the request
:param str request_handler: Request handler, default is 'select'
:param dict query: Python dictionary of Solr query parameters.
Sends a query to Solr, returns a dict. `query` should be a dictionary of solr request handler arguments.
Example::
res = solr.query_raw('SolrClient_unittest',{
'q':'*:*',
'facet':True,
'facet.field':'facet_test',
})
"""
headers = {'content-type': 'application/x-www-form-urlencoded'}
data = query
resp, con_inf = self.transport.send_request(method='POST',
endpoint=request_handler,
collection=collection,
data=data,
headers=headers,
**kwargs)
return resp | :param str collection: The name of the collection for the request
:param str request_handler: Request handler, default is 'select'
:param dict query: Python dictionary of Solr query parameters.
Sends a query to Solr, returns a dict. `query` should be a dictionary of solr request handler arguments.
Example::
res = solr.query_raw('SolrClient_unittest',{
'q':'*:*',
'facet':True,
'facet.field':'facet_test',
}) | entailment |
def query(self, collection, query, request_handler='select', **kwargs):
"""
:param str collection: The name of the collection for the request
:param str request_handler: Request handler, default is 'select'
:param dict query: Python dictonary of Solr query parameters.
Sends a query to Solr, returns a SolrResults Object. `query` should be a dictionary of solr request handler arguments.
Example::
res = solr.query('SolrClient_unittest',{
'q':'*:*',
'facet':True,
'facet.field':'facet_test',
})
"""
for field in ['facet.pivot']:
if field in query.keys():
if type(query[field]) is str:
query[field] = query[field].replace(' ', '')
elif type(query[field]) is list:
query[field] = [s.replace(' ', '') for s in query[field]]
method = 'POST'
headers = {'content-type': 'application/x-www-form-urlencoded'}
params = query
data = {}
resp, con_inf = self.transport.send_request(method=method,
endpoint=request_handler,
collection=collection,
params=params,
data=data,
headers=headers,
**kwargs)
if resp:
resp = SolrResponse(resp)
resp.url = con_inf['url']
return resp | :param str collection: The name of the collection for the request
:param str request_handler: Request handler, default is 'select'
:param dict query: Python dictonary of Solr query parameters.
Sends a query to Solr, returns a SolrResults Object. `query` should be a dictionary of solr request handler arguments.
Example::
res = solr.query('SolrClient_unittest',{
'q':'*:*',
'facet':True,
'facet.field':'facet_test',
}) | entailment |
def index(self, collection, docs, params=None, min_rf=None, **kwargs):
"""
:param str collection: The name of the collection for the request.
:param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}]
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied list of dicts to solr for indexing. ::
>>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}]
>>> solr.index('SolrClient_unittest', docs)
"""
data = json.dumps(docs)
return self.index_json(collection, data, params, min_rf=min_rf, **kwargs) | :param str collection: The name of the collection for the request.
:param docs list docs: List of dicts. ex: [{"title": "testing solr indexing", "id": "test1"}]
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied list of dicts to solr for indexing. ::
>>> docs = [{'id':'changeme','field1':'value1'}, {'id':'changeme1','field2':'value2'}]
>>> solr.index('SolrClient_unittest', docs) | entailment |
def index_json(self, collection, data, params=None, min_rf=None, **kwargs):
"""
:param str collection: The name of the collection for the request.
:param data str data: Valid Solr JSON as a string. ex: '[{"title": "testing solr indexing", "id": "test1"}]'
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied json to solr for indexing, supplied JSON must be a list of dictionaries. ::
>>> docs = [{'id':'changeme','field1':'value1'},
{'id':'changeme1','field2':'value2'}]
>>> solr.index_json('SolrClient_unittest',json.dumps(docs))
"""
if params is None:
params = {}
resp, con_inf = self.transport.send_request(method='POST',
endpoint='update',
collection=collection,
data=data,
params=params,
min_rf=min_rf,
**kwargs)
if min_rf is not None:
rf = resp['responseHeader']['rf']
if rf < min_rf:
raise MinRfError("couldn't satisfy rf:%s min_rf:%s" % (rf, min_rf), rf=rf, min_rf=min_rf)
if resp['responseHeader']['status'] == 0:
return True
return False | :param str collection: The name of the collection for the request.
:param data str data: Valid Solr JSON as a string. ex: '[{"title": "testing solr indexing", "id": "test1"}]'
:param min_rf int min_rf: Required number of replicas to write to'
Sends supplied json to solr for indexing, supplied JSON must be a list of dictionaries. ::
>>> docs = [{'id':'changeme','field1':'value1'},
{'id':'changeme1','field2':'value2'}]
>>> solr.index_json('SolrClient_unittest',json.dumps(docs)) | entailment |
def get(self, collection, doc_id, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str doc_id: ID of the document to be retrieved.
Retrieve document from Solr based on the ID. ::
>>> solr.get('SolrClient_unittest','changeme')
"""
resp, con_inf = self.transport.send_request(method='GET',
endpoint='get',
collection=collection,
params={'id': doc_id},
**kwargs)
if 'doc' in resp and resp['doc']:
return resp['doc']
raise NotFoundError | :param str collection: The name of the collection for the request
:param str doc_id: ID of the document to be retrieved.
Retrieve document from Solr based on the ID. ::
>>> solr.get('SolrClient_unittest','changeme') | entailment |
def mget(self, collection, doc_ids, **kwargs):
"""
:param str collection: The name of the collection for the request
:param tuple doc_ids: ID of the document to be retrieved.
Retrieve documents from Solr based on the ID. ::
>>> solr.get('SolrClient_unittest','changeme')
"""
resp, con_inf = self.transport.send_request(method='GET',
endpoint='get',
collection=collection,
params={'ids': doc_ids},
**kwargs)
if 'docs' in resp['response']:
return resp['response']['docs']
raise NotFoundError | :param str collection: The name of the collection for the request
:param tuple doc_ids: ID of the document to be retrieved.
Retrieve documents from Solr based on the ID. ::
>>> solr.get('SolrClient_unittest','changeme') | entailment |
def delete_doc_by_id(self, collection, doc_id, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str id: ID of the document to be deleted. Can specify '*' to delete everything.
Deletes items from Solr based on the ID. ::
>>> solr.delete_doc_by_id('SolrClient_unittest','changeme')
"""
if ' ' in doc_id:
doc_id = '"{}"'.format(doc_id)
temp = {"delete": {"query": 'id:{}'.format(doc_id)}}
resp, con_inf = self.transport.send_request(method='POST',
endpoint='update',
collection=collection,
data=json.dumps(temp),
**kwargs)
return resp | :param str collection: The name of the collection for the request
:param str id: ID of the document to be deleted. Can specify '*' to delete everything.
Deletes items from Solr based on the ID. ::
>>> solr.delete_doc_by_id('SolrClient_unittest','changeme') | entailment |
def delete_doc_by_query(self, collection, query, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str query: Query selecting documents to be deleted.
Deletes items from Solr based on a given query. ::
>>> solr.delete_doc_by_query('SolrClient_unittest','*:*')
"""
temp = {"delete": {"query": query}}
resp, con_inf = self.transport.send_request(method='POST',
endpoint='update',
collection=collection,
data=json.dumps(temp),
**kwargs)
return resp | :param str collection: The name of the collection for the request
:param str query: Query selecting documents to be deleted.
Deletes items from Solr based on a given query. ::
>>> solr.delete_doc_by_query('SolrClient_unittest','*:*') | entailment |
def local_index(self, collection, filename, **kwargs):
"""
:param str collection: The name of the collection for the request
:param str filename: String file path of the file to index.
Will index specified file into Solr. The `file` must be local to the server, this is faster than other indexing options.
If the files are already on the servers I suggest you use this.
For example::
>>> solr.local_index('SolrClient_unittest',
'/local/to/server/temp_file.json')
"""
filename = os.path.abspath(filename)
self.logger.info("Indexing {} into Solr Collection {}".format(filename, collection))
data = {'stream.file': filename,
'stream.contentType': 'text/json'}
resp, con_inf = self.transport.send_request(method='GET', endpoint='update/json', collection=collection,
params=data, **kwargs)
if resp['responseHeader']['status'] == 0:
return True
else:
return False | :param str collection: The name of the collection for the request
:param str filename: String file path of the file to index.
Will index specified file into Solr. The `file` must be local to the server, this is faster than other indexing options.
If the files are already on the servers I suggest you use this.
For example::
>>> solr.local_index('SolrClient_unittest',
'/local/to/server/temp_file.json') | entailment |
def paging_query(self, collection, query, rows=1000, start=0, max_start=200000):
"""
:param str collection: The name of the collection for the request.
:param dict query: Dictionary of solr args.
:param int rows: Number of rows to return in each batch. Default is 1000.
:param int start: What position to start with. Default is 0.
:param int max_start: Once the start will reach this number, the function will stop. Default is 200000.
Will page through the result set in increments of `row` WITHOUT using cursorMark until it has all items \
or until `max_start` is reached. Use max_start to protect your Solr instance if you are not sure how many items you \
will be getting. The default is 200,000, which is still a bit high.
Returns an iterator of SolrResponse objects. For Example::
>>> for res in solr.paging_query('SolrClient_unittest',{'q':'*:*'}):
print(res)
"""
query = dict(query)
while True:
query['start'] = start
query['rows'] = rows
res = self.query(collection, query)
if res.get_results_count():
yield res
start += rows
if res.get_results_count() < rows or start > max_start:
break | :param str collection: The name of the collection for the request.
:param dict query: Dictionary of solr args.
:param int rows: Number of rows to return in each batch. Default is 1000.
:param int start: What position to start with. Default is 0.
:param int max_start: Once the start will reach this number, the function will stop. Default is 200000.
Will page through the result set in increments of `row` WITHOUT using cursorMark until it has all items \
or until `max_start` is reached. Use max_start to protect your Solr instance if you are not sure how many items you \
will be getting. The default is 200,000, which is still a bit high.
Returns an iterator of SolrResponse objects. For Example::
>>> for res in solr.paging_query('SolrClient_unittest',{'q':'*:*'}):
print(res) | entailment |
def cursor_query(self, collection, query):
"""
:param str collection: The name of the collection for the request.
:param dict query: Dictionary of solr args.
Will page through the result set in increments using cursorMark until it has all items. Sort is required for cursorMark \
queries, if you don't specify it, the default is 'id desc'.
Returns an iterator of SolrResponse objects. For Example::
>>> for res in solr.cursor_query('SolrClient_unittest',{'q':'*:*'}):
print(res)
"""
cursor = '*'
if 'sort' not in query:
query['sort'] = 'id desc'
while True:
query['cursorMark'] = cursor
# Get data with starting cursorMark
results = self.query(collection, query)
if results.get_results_count():
cursor = results.get_cursor()
yield results
else:
self.logger.debug("Got zero Results with cursor: {}".format(cursor))
break | :param str collection: The name of the collection for the request.
:param dict query: Dictionary of solr args.
Will page through the result set in increments using cursorMark until it has all items. Sort is required for cursorMark \
queries, if you don't specify it, the default is 'id desc'.
Returns an iterator of SolrResponse objects. For Example::
>>> for res in solr.cursor_query('SolrClient_unittest',{'q':'*:*'}):
print(res) | entailment |
def get_shard_map(self, force_refresh=False):
"""
You can change this function to get the shard-map from somewhere/somehow place else in conjuction with
save_shard_map().
"""
now = datetime.utcnow()
if force_refresh is True or \
self.shard_map is None or \
(now - self.last_refresh).total_seconds() > self.refresh_ttl:
self.last_refresh = now
self.refresh_shard_map()
return self.shard_map | You can change this function to get the shard-map from somewhere/somehow place else in conjuction with
save_shard_map(). | entailment |
def check_zk(self):
'''
Will attempt to telnet to each zookeeper that is used by SolrClient and issue 'mntr' command. Response is parsed to check to see if the
zookeeper node is a leader or a follower and returned as a dict.
If the telnet collection fails or the proper response is not parsed, the zk node will be listed as 'down' in the dict. Desired values are
either follower or leader.
'''
import telnetlib
temp = self.zk_hosts.split('/')
zks = temp[0].split(',')
status = {}
for zk in zks:
self.logger.debug("Checking {}".format(zk))
host, port = zk.split(':')
try:
t = telnetlib.Telnet(host, port=int(port))
t.write('mntr'.encode('ascii'))
r = t.read_all()
for out in r.decode('utf-8').split('\n'):
if out:
param, val = out.split('\t')
if param == 'zk_server_state':
status[zk] = val
except Exception as e:
self.logger.error("Unable to reach ZK: {}".format(zk))
self.logger.exception(e)
status[zk] = 'down'
#assert len(zks) == len(status)
return status | Will attempt to telnet to each zookeeper that is used by SolrClient and issue 'mntr' command. Response is parsed to check to see if the
zookeeper node is a leader or a follower and returned as a dict.
If the telnet collection fails or the proper response is not parsed, the zk node will be listed as 'down' in the dict. Desired values are
either follower or leader. | entailment |
def copy_config(self, original, new):
'''
Copies collection configs into a new folder. Can be used to create new collections based on existing configs.
Basically, copies all nodes under /configs/original to /configs/new.
:param original str: ZK name of original config
:param new str: New name of the ZK config.
'''
if not self.kz.exists('/configs/{}'.format(original)):
raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {}".format(self.kz.get_children('/configs')))
base = '/configs/{}'.format(original)
nbase = '/configs/{}'.format(new)
self._copy_dir(base, nbase) | Copies collection configs into a new folder. Can be used to create new collections based on existing configs.
Basically, copies all nodes under /configs/original to /configs/new.
:param original str: ZK name of original config
:param new str: New name of the ZK config. | entailment |
def download_collection_configs(self, collection, fs_path):
'''
Downloads ZK Directory to the FileSystem.
:param collection str: Name of the collection (zk config name)
:param fs_path str: Destination filesystem path.
'''
if not self.kz.exists('/configs/{}'.format(collection)):
raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {} ".format(self.kz.get_children('/configs')))
self._download_dir('/configs/{}'.format(collection), fs_path + os.sep + collection) | Downloads ZK Directory to the FileSystem.
:param collection str: Name of the collection (zk config name)
:param fs_path str: Destination filesystem path. | entailment |
def upload_collection_configs(self, collection, fs_path):
'''
Uploads collection configurations from a specified directory to zookeeper.
'''
coll_path = fs_path
if not os.path.isdir(coll_path):
raise ValueError("{} Doesn't Exist".format(coll_path))
self._upload_dir(coll_path, '/configs/{}'.format(collection)) | Uploads collection configurations from a specified directory to zookeeper. | entailment |
def create_field(self, collection, field_dict):
'''
Creates a new field in managed schema, will raise ValueError if the field already exists. field_dict should look like this::
{
"name":"sell-by",
"type":"tdate",
"stored":True
}
Reference: https://cwiki.apache.org/confluence/display/solr/Defining+Fields
'''
if self.does_field_exist(collection,field_dict['name']):
raise ValueError("Field {} Already Exists in Solr Collection {}".format(field_dict['name'],collection))
temp = {"add-field":dict(field_dict)}
res, con_info =self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp))
return res | Creates a new field in managed schema, will raise ValueError if the field already exists. field_dict should look like this::
{
"name":"sell-by",
"type":"tdate",
"stored":True
}
Reference: https://cwiki.apache.org/confluence/display/solr/Defining+Fields | entailment |
def delete_field(self,collection,field_name):
'''
Deletes a field from the Solr Collection. Will raise ValueError if the field doesn't exist.
:param string collection: Name of the collection for the action
:param string field_name: String name of the field.
'''
if not self.does_field_exist(collection,field_name):
raise ValueError("Field {} Doesn't Exists in Solr Collection {}".format(field_name,collection))
else:
temp = {"delete-field" : { "name":field_name }}
res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp))
return res | Deletes a field from the Solr Collection. Will raise ValueError if the field doesn't exist.
:param string collection: Name of the collection for the action
:param string field_name: String name of the field. | entailment |
def does_field_exist(self,collection,field_name):
'''
Checks if the field exists will return a boolean True (exists) or False(doesn't exist).
:param string collection: Name of the collection for the action
:param string field_name: String name of the field.
'''
schema = self.get_schema_fields(collection)
logging.info(schema)
return True if field_name in [field['name'] for field in schema['fields']] else False | Checks if the field exists will return a boolean True (exists) or False(doesn't exist).
:param string collection: Name of the collection for the action
:param string field_name: String name of the field. | entailment |
def create_copy_field(self,collection,copy_dict):
'''
Creates a copy field.
copy_dict should look like ::
{'source':'source_field_name','dest':'destination_field_name'}
:param string collection: Name of the collection for the action
:param dict copy_field: Dictionary of field info
Reference: https://cwiki.apache.org/confluence/display/solr/Schema+API#SchemaAPI-AddaNewCopyFieldRule
'''
temp = {"add-copy-field":dict(copy_dict)}
res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp))
return res | Creates a copy field.
copy_dict should look like ::
{'source':'source_field_name','dest':'destination_field_name'}
:param string collection: Name of the collection for the action
:param dict copy_field: Dictionary of field info
Reference: https://cwiki.apache.org/confluence/display/solr/Schema+API#SchemaAPI-AddaNewCopyFieldRule | entailment |
def delete_copy_field(self, collection, copy_dict):
'''
Deletes a copy field.
copy_dict should look like ::
{'source':'source_field_name','dest':'destination_field_name'}
:param string collection: Name of the collection for the action
:param dict copy_field: Dictionary of field info
'''
#Fix this later to check for field before sending a delete
if self.devel:
self.logger.debug("Deleting {}".format(str(copy_dict)))
copyfields = self.get_schema_copyfields(collection)
if copy_dict not in copyfields:
self.logger.info("Fieldset not in Solr Copy Fields: {}".format(str(copy_dict)))
temp = {"delete-copy-field": dict(copy_dict)}
res, con_info = self.solr.transport.send_request(method='POST',endpoint=self.schema_endpoint,collection=collection, data=json.dumps(temp))
return res | Deletes a copy field.
copy_dict should look like ::
{'source':'source_field_name','dest':'destination_field_name'}
:param string collection: Name of the collection for the action
:param dict copy_field: Dictionary of field info | entailment |
def shuffle_hosts(self):
"""
Shuffle hosts so we don't always query the first one.
Example: using in a webapp with X processes in Y servers, the hosts contacted will be more random.
The user can also call this function to reshuffle every 'x' seconds or before every request.
:return:
"""
if len(self.hosts) > 1:
random.shuffle(self.hosts)
return self.hosts | Shuffle hosts so we don't always query the first one.
Example: using in a webapp with X processes in Y servers, the hosts contacted will be more random.
The user can also call this function to reshuffle every 'x' seconds or before every request.
:return: | entailment |
def start_virtual_display(self, width=1440, height=900,
colordepth=24, **kwargs):
"""Starts virtual display which will be
destroyed after test execution will be end
*Arguments:*
- width: a width to be set in pixels
- height: a height to be set in pixels
- color_depth: a color depth to be used
- kwargs: extra parameters
*Example:*
| Start Virtual Display |
| Start Virtual Display | 1920 | 1080 |
| Start Virtual Display | ${1920} | ${1080} | ${16} |
"""
if self._display is None:
logger.info("Using virtual display: '{0}x{1}x{2}'".format(
width, height, colordepth))
self._display = Xvfb(int(width), int(height),
int(colordepth), **kwargs)
self._display.start()
atexit.register(self._display.stop) | Starts virtual display which will be
destroyed after test execution will be end
*Arguments:*
- width: a width to be set in pixels
- height: a height to be set in pixels
- color_depth: a color depth to be used
- kwargs: extra parameters
*Example:*
| Start Virtual Display |
| Start Virtual Display | 1920 | 1080 |
| Start Virtual Display | ${1920} | ${1080} | ${16} | | entailment |
def api(self, action, args=None):
"""
Sends a request to Solr Collections API.
Documentation is here: https://cwiki.apache.org/confluence/display/solr/Collections+API
:param string action: Name of the collection for the action
:param dict args: Dictionary of specific parameters for action
"""
if args is None:
args = {}
args['action'] = action.upper()
try:
res, con_info = self.solr.transport.send_request(endpoint='admin/collections', params=args)
except Exception as e:
self.logger.error("Error querying SolrCloud Collections API. ")
self.logger.exception(e)
raise e
if 'responseHeader' in res and res['responseHeader']['status'] == 0:
return res, con_info
else:
raise SolrError("Error Issuing Collections API Call for: {} +".format(con_info, res)) | Sends a request to Solr Collections API.
Documentation is here: https://cwiki.apache.org/confluence/display/solr/Collections+API
:param string action: Name of the collection for the action
:param dict args: Dictionary of specific parameters for action | entailment |
def clusterstatus(self):
"""
Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns
it as doc_count key for each replica.
"""
res = self.cluster_status_raw()
cluster = res['cluster']['collections']
out = {}
try:
for collection in cluster:
out[collection] = {}
for shard in cluster[collection]['shards']:
out[collection][shard] = {}
for replica in cluster[collection]['shards'][shard]['replicas']:
out[collection][shard][replica] = cluster[collection]['shards'][shard]['replicas'][replica]
if out[collection][shard][replica]['state'] != 'active':
out[collection][shard][replica]['doc_count'] = False
else:
out[collection][shard][replica]['doc_count'] = self._get_collection_counts(
out[collection][shard][replica])
except Exception as e:
self.logger.error("Couldn't parse response from clusterstatus API call")
self.logger.exception(e)
return out | Returns a slightly slimmed down version of the clusterstatus api command. It also gets count of documents in each shard on each replica and returns
it as doc_count key for each replica. | entailment |
def create(self, name, numShards, params=None):
"""
Create a new collection.
"""
if params is None:
params = {}
params.update(
name=name,
numShards=numShards
)
return self.api('CREATE', params) | Create a new collection. | entailment |
def _get_collection_counts(self, core_data):
"""
Queries each core to get individual counts for each core for each shard.
"""
if core_data['base_url'] not in self.solr_clients:
from SolrClient import SolrClient
self.solr_clients['base_url'] = SolrClient(core_data['base_url'], log=self.logger)
try:
return self.solr_clients['base_url'].query(core_data['core'],
{'q': '*:*',
'rows': 0,
'distrib': 'false',
}).get_num_found()
except Exception as e:
self.logger.error("Couldn't get Counts for {}/{}".format(core_data['base_url'], core_data['core']))
self.logger.exception(e)
return False | Queries each core to get individual counts for each core for each shard. | entailment |
def check_status(self, ignore=(), status=None):
"""
Checks status of each collection and shard to make sure that:
a) Cluster state is active
b) Number of docs matches across replicas for a given shard.
Returns a dict of results for custom alerting.
"""
self.SHARD_CHECKS = [
{'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count},
{'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status}
]
if status is None:
status = self.clusterstatus()
out = {}
for collection in status:
out[collection] = {}
out[collection]['coll_status'] = True # Means it's fine
out[collection]['coll_messages'] = []
for shard in status[collection]:
self.logger.debug("Checking {}/{}".format(collection, shard))
s_dict = status[collection][shard]
for check in self.SHARD_CHECKS:
if check['check_msg'] in ignore:
continue
res = check['f'](s_dict)
if not res:
out[collection]['coll_status'] = False
if check['check_msg'] not in out[collection]['coll_messages']:
out[collection]['coll_messages'].append(check['check_msg'])
self.logger.debug(s_dict)
return out | Checks status of each collection and shard to make sure that:
a) Cluster state is active
b) Number of docs matches across replicas for a given shard.
Returns a dict of results for custom alerting. | entailment |
def reindex(self, fq= [], **kwargs):
'''
Starts Reindexing Process. All parameter arguments will be passed down to the getter function.
:param string fq: FilterQuery to pass to source Solr to retrieve items. This can be used to limit the results.
'''
for items in self._getter(fq=fq, **kwargs):
self._putter(items)
if type(self._dest) is SolrClient and self._dest_coll:
self.log.info("Finished Indexing, sending a commit")
self._dest.commit(self._dest_coll, openSearcher=True) | Starts Reindexing Process. All parameter arguments will be passed down to the getter function.
:param string fq: FilterQuery to pass to source Solr to retrieve items. This can be used to limit the results. | entailment |
def _from_solr(self, fq=[], report_frequency = 25):
'''
Method for retrieving batch data from Solr.
'''
cursor = '*'
stime = datetime.now()
query_count = 0
while True:
#Get data with starting cursorMark
query = self._get_query(cursor)
#Add FQ to the query. This is used by resume to filter on date fields and when specifying document subset.
#Not included in _get_query for more flexibiilty.
if fq:
if 'fq' in query:
[query['fq'].append(x) for x in fq]
else:
query['fq'] = fq
results = self._source.query(self._source_coll, query)
query_count += 1
if query_count % report_frequency == 0:
self.log.info("Processed {} Items in {} Seconds. Apprximately {} items/minute".format(
self._items_processed, int((datetime.now()-stime).seconds),
str(int(self._items_processed / ((datetime.now()-stime).seconds/60)))
))
if results.get_results_count():
#If we got items back, get the new cursor and yield the docs
self._items_processed += results.get_results_count()
cursor = results.get_cursor()
#Remove ignore fields
docs = self._trim_fields(results.docs)
yield docs
if results.get_results_count() < self._rows:
#Less results than asked, probably done
break
else:
#No Results, probably done :)
self.log.debug("Got zero Results with cursor: {}".format(cursor))
break | Method for retrieving batch data from Solr. | entailment |
def _trim_fields(self, docs):
'''
Removes ignore fields from the data that we got from Solr.
'''
for doc in docs:
for field in self._ignore_fields:
if field in doc:
del(doc[field])
return docs | Removes ignore fields from the data that we got from Solr. | entailment |
def _get_query(self, cursor):
'''
Query tempalte for source Solr, sorts by id by default.
'''
query = {'q':'*:*',
'sort':'id desc',
'rows':self._rows,
'cursorMark':cursor}
if self._date_field:
query['sort'] = "{} asc, id desc".format(self._date_field)
if self._per_shard:
query['distrib'] = 'false'
return query | Query tempalte for source Solr, sorts by id by default. | entailment |
def _to_solr(self, data):
'''
Sends data to a Solr instance.
'''
return self._dest.index_json(self._dest_coll, json.dumps(data,sort_keys=True)) | Sends data to a Solr instance. | entailment |
def _get_date_range_query(self, start_date, end_date, timespan= 'DAY', date_field= None):
'''
Gets counts of items per specified date range.
:param collection: Solr Collection to use.
:param timespan: Solr Date Math compliant value for faceting ex HOUR, MONTH, DAY
'''
if date_field is None:
date_field = self._date_field
query ={'q':'*:*',
'rows':0,
'facet':'true',
'facet.range': date_field,
'facet.range.gap': '+1{}'.format(timespan),
'facet.range.end': '{}'.format(end_date),
'facet.range.start': '{}'.format(start_date),
'facet.range.include': 'all'
}
if self._per_shard:
query['distrib'] = 'false'
return query | Gets counts of items per specified date range.
:param collection: Solr Collection to use.
:param timespan: Solr Date Math compliant value for faceting ex HOUR, MONTH, DAY | entailment |
def _get_edge_date(self, date_field, sort):
'''
This method is used to get start and end dates for the collection.
'''
return self._source.query(self._source_coll, {
'q':'*:*',
'rows':1,
'fq':'+{}:*'.format(date_field),
'sort':'{} {}'.format(date_field, sort)}).docs[0][date_field] | This method is used to get start and end dates for the collection. | entailment |
def _get_date_facet_counts(self, timespan, date_field, start_date=None, end_date=None):
'''
Returns Range Facet counts based on
'''
if 'DAY' not in timespan:
raise ValueError("At this time, only DAY date range increment is supported. Aborting..... ")
#Need to do this a bit better later. Don't like the string and date concatenations.
if not start_date:
start_date = self._get_edge_date(date_field, 'asc')
start_date = datetime.strptime(start_date,'%Y-%m-%dT%H:%M:%S.%fZ').date().isoformat()+'T00:00:00.000Z'
else:
start_date = start_date+'T00:00:00.000Z'
if not end_date:
end_date = self._get_edge_date(date_field, 'desc')
end_date = datetime.strptime(end_date,'%Y-%m-%dT%H:%M:%S.%fZ').date()
end_date += timedelta(days=1)
end_date = end_date.isoformat()+'T00:00:00.000Z'
else:
end_date = end_date+'T00:00:00.000Z'
self.log.info("Processing Items from {} to {}".format(start_date, end_date))
#Get facet counts for source and destination collections
source_facet = self._source.query(self._source_coll,
self._get_date_range_query(timespan=timespan, start_date=start_date, end_date=end_date)
).get_facets_ranges()[date_field]
dest_facet = self._dest.query(
self._dest_coll, self._get_date_range_query(
timespan=timespan, start_date=start_date, end_date=end_date
)).get_facets_ranges()[date_field]
return source_facet, dest_facet | Returns Range Facet counts based on | entailment |
def resume(self, start_date=None, end_date=None, timespan='DAY', check= False):
'''
This method may help if the original run was interrupted for some reason. It will only work under the following conditions
* You have a date field that you can facet on
* Indexing was stopped for the duration of the copy
The way this tries to resume re-indexing is by running a date range facet on the source and destination collections. It then compares
the counts in both collections for each timespan specified. If the counts are different, it will re-index items for each range where
the counts are off. You can also pass in a start_date to only get items after a certain time period. Note that each date range will be indexed in
it's entirety, even if there is only one item missing.
Keep in mind this only checks the counts and not actual data. So make the indexes weren't modified between the reindexing execution and
running the resume operation.
:param start_date: Date to start indexing from. If not specified there will be no restrictions and all data will be processed. Note that
this value will be passed to Solr directly and not modified.
:param end_date: The date to index items up to. Solr Date Math compliant value for faceting; currenlty only DAY is supported.
:param timespan: Solr Date Math compliant value for faceting; currenlty only DAY is supported.
:param check: If set to True it will only log differences between the two collections without actually modifying the destination.
'''
if type(self._source) is not SolrClient or type(self._dest) is not SolrClient:
raise ValueError("To resume, both source and destination need to be Solr.")
source_facet, dest_facet = self._get_date_facet_counts(timespan, self._date_field, start_date=start_date, end_date=end_date)
for dt_range in sorted(source_facet):
if dt_range in dest_facet:
self.log.info("Date Range: {} Source: {} Destination:{} Difference:{}".format(
dt_range, source_facet[dt_range], dest_facet[dt_range], (source_facet[dt_range]-dest_facet[dt_range])))
if check:
continue
if source_facet[dt_range] > dest_facet[dt_range]:
#Kicks off reindexing with an additional FQ
self.reindex(fq=['{}:[{} TO {}]'.format(self._date_field, dt_range, dt_range+'+1{}'.format(timespan))])
self.log.info("Complete Date Range {}".format(dt_range))
else:
self.log.error("Something went wrong; destinationSource: {}".format(source_facet))
self.log.error("Destination: {}".format(dest_facet))
raise ValueError("Date Ranges don't match up")
self._dest.commit(self._dest_coll, openSearcher=True) | This method may help if the original run was interrupted for some reason. It will only work under the following conditions
* You have a date field that you can facet on
* Indexing was stopped for the duration of the copy
The way this tries to resume re-indexing is by running a date range facet on the source and destination collections. It then compares
the counts in both collections for each timespan specified. If the counts are different, it will re-index items for each range where
the counts are off. You can also pass in a start_date to only get items after a certain time period. Note that each date range will be indexed in
it's entirety, even if there is only one item missing.
Keep in mind this only checks the counts and not actual data. So make the indexes weren't modified between the reindexing execution and
running the resume operation.
:param start_date: Date to start indexing from. If not specified there will be no restrictions and all data will be processed. Note that
this value will be passed to Solr directly and not modified.
:param end_date: The date to index items up to. Solr Date Math compliant value for faceting; currenlty only DAY is supported.
:param timespan: Solr Date Math compliant value for faceting; currenlty only DAY is supported.
:param check: If set to True it will only log differences between the two collections without actually modifying the destination. | entailment |
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new L{Transport}. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling L{auth_password <Transport.auth_password>} or
L{auth_publickey <Transport.auth_publickey>}.
@note: L{connect} is a simpler method for connecting as a client.
@note: After calling this method (or L{start_server} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete
(optional)
@type event: threading.Event
@raise SSHException: if negotiation fails (and no C{event} was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
Random.atfork()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break | Negotiate a new SSH2 session as a client. This is the first step after
creating a new L{Transport}. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling L{auth_password <Transport.auth_password>} or
L{auth_publickey <Transport.auth_publickey>}.
@note: L{connect} is a simpler method for connecting as a client.
@note: After calling this method (or L{start_server} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete
(optional)
@type event: threading.Event
@raise SSHException: if negotiation fails (and no C{event} was passed
in) | entailment |
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new L{Transport} and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods
L{get_allowed_auths <ServerInterface.get_allowed_auths>},
L{check_auth_none <ServerInterface.check_auth_none>},
L{check_auth_password <ServerInterface.check_auth_password>}, and
L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the
given C{server} object to control the authentication process.
After a successful authentication, the client should request to open
a channel. Override
L{check_channel_request <ServerInterface.check_channel_request>} in the
given C{server} object to allow channels to be opened.
@note: After calling this method (or L{start_client} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete.
@type event: threading.Event
@param server: an object used to perform authentication and create
L{Channel}s.
@type server: L{server.ServerInterface}
@raise SSHException: if negotiation fails (and no C{event} was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break | Negotiate a new SSH2 session as a server. This is the first step after
creating a new L{Transport} and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods
L{get_allowed_auths <ServerInterface.get_allowed_auths>},
L{check_auth_none <ServerInterface.check_auth_none>},
L{check_auth_password <ServerInterface.check_auth_password>}, and
L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the
given C{server} object to control the authentication process.
After a successful authentication, the client should request to open
a channel. Override
L{check_channel_request <ServerInterface.check_channel_request>} in the
given C{server} object to allow channels to be opened.
@note: After calling this method (or L{start_client} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete.
@type event: threading.Event
@param server: an object used to perform authentication and create
L{Channel}s.
@type server: L{server.ServerInterface}
@raise SSHException: if negotiation fails (and no C{event} was passed
in) | entailment |
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.active = False
self.packetizer.close()
self.join()
for chan in self._channels.values():
chan._unlink() | Close this session, and any open channels that are tied to it. | entailment |
def open_forwarded_tcpip_channel(self, (src_addr, src_port), (dest_addr, dest_port)):
"""
Request a new channel back to the client, of type C{"forwarded-tcpip"}.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
@param src_addr: originator's address
@param src_port: originator's port
@param dest_addr: local (server) connected address
@param dest_port: local (server) connected port
"""
return self.open_channel('forwarded-tcpip', (dest_addr, dest_port), (src_addr, src_port)) | Request a new channel back to the client, of type C{"forwarded-tcpip"}.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
@param src_addr: originator's address
@param src_port: originator's port
@param dest_addr: local (server) connected address
@param dest_port: local (server) connected port | entailment |
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port | Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request | entailment |
def send_ignore(self, bytes=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
@param bytes: the number of random bytes to send in the payload of the
ignored packet -- defaults to a random number from 10 to 41.
@type bytes: int
"""
m = Message()
m.add_byte(chr(MSG_IGNORE))
if bytes is None:
bytes = (ord(rng.read(1)) % 32) + 10
m.add_bytes(rng.read(bytes))
self._send_user_message(m) | Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
@param bytes: the number of random bytes to send in the payload of the
ignored packet -- defaults to a random number from 10 to 41.
@type bytes: int | entailment |
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return | Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end) | entailment |
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
C{interval} seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
@param interval: seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
@type interval: int
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('[email protected]', wait=False)) | Turn on/off keepalive packets (default is off). If this is set, after
C{interval} seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
@param interval: seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
@type interval: int | entailment |
def connect(self, hostkey=None, username='', password=None, pkey=None):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for L{start_client}, L{get_remote_server_key}, and
L{Transport.auth_password} or L{Transport.auth_publickey}. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call L{open_channel} or
L{open_session} to get a L{Channel} object, which is used for data
transfer.
@note: If you fail to supply a password or private key, this method may
succeed, but a subsequent L{open_channel} or L{open_session} call may
fail because you haven't authenticated yet.
@param hostkey: the host key expected from the server, or C{None} if
you don't want to do host key verification.
@type hostkey: L{PKey<pkey.PKey>}
@param username: the username to authenticate as.
@type username: str
@param password: a password to use for authentication, if you want to
use password authentication; otherwise C{None}.
@type password: str
@param pkey: a private key to use for authentication, if you want to
use private key authentication; otherwise C{None}.
@type pkey: L{PKey<pkey.PKey>}
@raise SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [ hostkey.get_name() ]
self.start_client()
# check host key if we were given one
if (hostkey is not None):
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey))))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key))))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None):
if password is not None:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
else:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
return | Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for L{start_client}, L{get_remote_server_key}, and
L{Transport.auth_password} or L{Transport.auth_publickey}. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call L{open_channel} or
L{open_session} to get a L{Channel} object, which is used for data
transfer.
@note: If you fail to supply a password or private key, this method may
succeed, but a subsequent L{open_channel} or L{open_session} call may
fail because you haven't authenticated yet.
@param hostkey: the host key expected from the server, or C{None} if
you don't want to do host key verification.
@type hostkey: L{PKey<pkey.PKey>}
@param username: the username to authenticate as.
@type username: str
@param password: a password to use for authentication, if you want to
use password authentication; otherwise C{None}.
@type password: str
@param pkey: a private key to use for authentication, if you want to
use private key authentication; otherwise C{None}.
@type pkey: L{PKey<pkey.PKey>}
@raise SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails. | entailment |
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
L{BadAuthenticationType} exception raised.
@param username: the username to authenticate as
@type username: string
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
@raise SSHException: if the authentication failed due to a network
error
@since: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event) | Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
L{BadAuthenticationType} exception raised.
@param username: the username to authenticate as
@type username: string
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
@raise SSHException: if the authentication failed due to a network
error
@since: 1.5 | entailment |
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release() | send a message, but block if we're in key negotiation. this is used
for user-initiated requests. | entailment |
def _set_K_H(self, k, h):
"used by a kex object to set the K (root key) and H (exchange hash)"
self.K = k
self.H = h
if self.session_id == None:
self.session_id = h | used by a kex object to set the K (root key) and H (exchange hash) | entailment |
def _compute_key(self, id, nbytes):
"id is 'A' - 'F' for the various keys used by ssh"
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(id)
m.add_bytes(self.session_id)
out = sofar = SHA.new(str(m)).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = SHA.new(str(m)).digest()
out += digest
sofar += digest
return out[:nbytes] | id is 'A' - 'F' for the various keys used by ssh | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.