Search is not available for this dataset
text
stringlengths
75
104k
def rename(self, id, name): """ Change the name of this domain record Parameters ---------- id: int domain record id name: str new name of record """ return super(DomainRecords, self).update(id, name=name)[self.singular]
def create(self, type, name=None, data=None, priority=None, port=None, weight=None): """ Parameters ---------- type: str {A, AAAA, CNAME, MX, TXT, SRV, NS} name: str Name of the record data: object, type-dependent type == 'A' : IPv4 address type == 'AAAA' : IPv6 address type == 'CNAME' : destination host name type == 'MX' : mail host name type == 'TXT' : txt contents type == 'SRV' : target host name to direct requests for the service type == 'NS' : name server that is authoritative for the domain priority: port: weight: """ if type == 'A' and name is None: name = self.domain return self.post(type=type, name=name, data=data, priority=priority, port=port, weight=weight)[self.singular]
def get(self, id, **kwargs): """ Retrieve a single domain record given the id """ return super(DomainRecords, self).get(id, **kwargs)
def logon(self, username, password): """ Logs the user on to FogBugz. Returns None for a successful login. """ if self._token: self.logoff() try: response = self.__makerequest( 'logon', email=username, password=password) except FogBugzAPIError: e = sys.exc_info()[1] raise FogBugzLogonError(e) self._token = response.token.string if type(self._token) == CData: self._token = self._token.encode('utf-8')
def __encode_multipart_formdata(self, fields, files): """ fields is a sequence of (key, value) elements for regular form fields. files is a sequence of (filename, filehandle) files to be uploaded returns (content_type, body) """ BOUNDARY = _make_boundary() if len(files) > 0: fields['nFileCount'] = str(len(files)) crlf = '\r\n' buf = BytesIO() for k, v in fields.items(): if DEBUG: print("field: %s: %s" % (repr(k), repr(v))) lines = [ '--' + BOUNDARY, 'Content-disposition: form-data; name="%s"' % k, '', str(v), '', ] buf.write(crlf.join(lines).encode('utf-8')) n = 0 for f, h in files.items(): n += 1 lines = [ '--' + BOUNDARY, 'Content-disposition: form-data; name="File%d"; ' 'filename="%s"' % (n, f), '', ] buf.write(crlf.join(lines).encode('utf-8')) lines = [ 'Content-type: application/octet-stream', '', '', ] buf.write(crlf.join(lines).encode('utf-8')) buf.write(h.read()) buf.write(crlf.encode('utf-8')) buf.write(('--' + BOUNDARY + '--' + crlf).encode('utf-8')) content_type = "multipart/form-data; boundary=%s" % BOUNDARY return content_type, buf.getvalue()
def chop(list_, n): "Chop list_ into n chunks. Returns a list." # could look into itertools also, might be implemented there size = len(list_) each = size // n if each == 0: return [list_] chopped = [] for i in range(n): start = i * each end = (i+1) * each if i == (n - 1): # make sure we get all items, let last worker do a litte more end = size chopped.append(list_[start:end]) return chopped
def get_first(): """ return first droplet """ client = po.connect() # this depends on the DIGITALOCEAN_API_KEY envvar all_droplets = client.droplets.list() id = all_droplets[0]['id'] # I'm cheating because I only have one droplet return client.droplets.get(id)
def take_snapshot(droplet, name): """ Take a snapshot of a droplet Parameters ---------- name: str name for snapshot """ print "powering off" droplet.power_off() droplet.wait() # wait for pending actions to complete print "taking snapshot" droplet.take_snapshot(name) droplet.wait() snapshots = droplet.snapshots() print "Current snapshots" print snapshots
def allowed_operations(self): """Retrieves the allowed operations for this request.""" if self.slug is not None: return self.meta.detail_allowed_operations return self.meta.list_allowed_operations
def assert_operations(self, *args): """Assets if the requested operations are allowed in this context.""" if not set(args).issubset(self.allowed_operations): raise http.exceptions.Forbidden()
def make_response(self, data=None): """Fills the response object from the passed data.""" if data is not None: # Prepare the data for transmission. data = self.prepare(data) # Encode the data using a desired encoder. self.response.write(data, serialize=True)
def get(self, request, response): """Processes a `GET` request.""" # Ensure we're allowed to read the resource. self.assert_operations('read') # Delegate to `read` to retrieve the items. items = self.read() # if self.slug is not None and not items: # # Requested a specific resource but nothing is returned. # # Attempt to resolve by changing what we understand as # # a slug to a path. # self.path = self.path + self.slug if self.path else self.slug # self.slug = None # # Attempt to retreive the resource again. # items = self.read() # Ensure that if we have a slug and still no items that a 404 # is rasied appropriately. if not items: raise http.exceptions.NotFound() if (isinstance(items, Iterable) and not isinstance(items, six.string_types)) and items: # Paginate over the collection. items = pagination.paginate(self.request, self.response, items) # Build the response object. self.make_response(items)
def post(self, request, response): """Processes a `POST` request.""" if self.slug is not None: # Don't know what to do an item access. raise http.exceptions.NotImplemented() # Ensure we're allowed to create a resource. self.assert_operations('create') # Deserialize and clean the incoming object. data = self._clean(None, self.request.read(deserialize=True)) # Delegate to `create` to create the item. item = self.create(data) # Build the response object. self.response.status = http.client.CREATED self.make_response(item)
def put(self, request, response): """Processes a `PUT` request.""" if self.slug is None: # Mass-PUT is not implemented. raise http.exceptions.NotImplemented() # Check if the resource exists. target = self.read() # Deserialize and clean the incoming object. data = self._clean(target, self.request.read(deserialize=True)) if target is not None: # Ensure we're allowed to update the resource. self.assert_operations('update') try: # Delegate to `update` to create the item. self.update(target, data) except AttributeError: # No read method defined. raise http.exceptions.NotImplemented() # Build the response object. self.make_response(target) else: # Ensure we're allowed to create the resource. self.assert_operations('create') # Delegate to `create` to create the item. target = self.create(data) # Build the response object. self.response.status = http.client.CREATED self.make_response(target)
def delete(self, request, response): """Processes a `DELETE` request.""" if self.slug is None: # Mass-DELETE is not implemented. raise http.exceptions.NotImplemented() # Ensure we're allowed to destroy a resource. self.assert_operations('destroy') # Delegate to `destroy` to destroy the item. self.destroy() # Build the response object. self.response.status = http.client.NO_CONTENT self.make_response()
def link(self, request, response): """Processes a `LINK` request. A `LINK` request is asking to create a relation from the currently represented URI to all of the `Link` request headers. """ from armet.resources.managed.request import read if self.slug is None: # Mass-LINK is not implemented. raise http.exceptions.NotImplemented() # Get the current target. target = self.read() # Collect all the passed link headers. links = self._parse_link_headers(request['Link']) # Pull targets for each represented link. for link in links: # Delegate to a connector. self.relate(target, read(self, link['uri'])) # Build the response object. self.response.status = http.client.NO_CONTENT self.make_response()
def create_project(self): ''' Creates a base Django project ''' if os.path.exists(self._py): prj_dir = os.path.join(self._app_dir, self._project_name) if os.path.exists(prj_dir): if self._force: logging.warn('Removing existing project') shutil.rmtree(prj_dir) else: logging.warn('Found existing project; not creating (use --force to overwrite)') return logging.info('Creating project') p = subprocess.Popen('cd {0} ; {1} startproject {2} > /dev/null'.format(self._app_dir, self._ve_dir + os.sep + self._project_name + \ os.sep + 'bin' + os.sep + 'django-admin.py', self._project_name), \ shell=True) os.waitpid(p.pid, 0) else: logging.error('Unable to find Python interpreter in virtualenv') return
def ilike_helper(default): """Helper function that performs an `ilike` query if a string value is passed, otherwise the normal default operation.""" @functools.wraps(default) def wrapped(x, y): # String values should use ILIKE queries. if isinstance(y, six.string_types) and not isinstance(x.type, sa.Enum): return x.ilike("%" + y + "%") else: return default(x, y) return wrapped
def parse(text, encoding='utf8'): """Parse the querystring into a normalized form.""" # Decode the text if we got bytes. if isinstance(text, six.binary_type): text = text.decode(encoding) return Query(text, split_segments(text))
def split_segments(text, closing_paren=False): """Return objects representing segments.""" buf = StringIO() # The segments we're building, and the combinators used to combine them. # Note that after this is complete, this should be true: # len(segments) == len(combinators) + 1 # Thus we can understand the relationship between segments and combinators # like so: # s1 (c1) s2 (c2) s3 (c3) where sN are segments and cN are combination # functions. # TODO: Figure out exactly where the querystring died and post cool # error messages about it. segments = [] combinators = [] # A flag dictating if the last character we processed was a group. # This is used to determine if the next character (being a combinator) # is allowed to last_group = False # The recursive nature of this function relies on keeping track of the # state of iteration. This iterator will be passed down to recursed calls. iterator = iter(text) # Detection for exclamation points. only matters for this situation: # foo=bar&!(bar=baz) last_negation = False for character in iterator: if character in COMBINATORS: if last_negation: buf.write(constants.OPERATOR_NEGATION) # The string representation of our segment. val = buf.getvalue() reset_stringio(buf) if not last_group and not len(val): raise ValueError('Unexpected %s.' % character) # When a group happens, the previous value is empty. if len(val): segments.append(parse_segment(val)) combinators.append(COMBINATORS[character]) elif character == constants.GROUP_BEGIN: # Recursively go into the next group. if buf.tell(): raise ValueError('Unexpected %s' % character) seg = split_segments(iterator, True) if last_negation: seg = UnarySegmentCombinator(seg) segments.append(seg) # Flag that the last entry was a grouping, so that we don't panic # when the next character is a logical combinator last_group = True continue elif character == constants.GROUP_END: # Build the segment for anything remaining, and then combine # all the segments. val = buf.getvalue() # Check for unbalanced parens or an empty thing: foo=bar&();bar=baz if not buf.tell() or not closing_paren: raise ValueError('Unexpected %s' % character) segments.append(parse_segment(val)) return combine(segments, combinators) elif character == constants.OPERATOR_NEGATION and not buf.tell(): last_negation = True continue else: if last_negation: buf.write(constants.OPERATOR_NEGATION) if last_group: raise ValueError('Unexpected %s' % character) buf.write(character) last_negation = False last_group = False else: # Check and see if the iterator exited early (unbalanced parens) if closing_paren: raise ValueError('Expected %s.' % constants.GROUP_END) if not last_group: # Add the final segment. segments.append(parse_segment(buf.getvalue())) # Everything completed normally, combine all the segments into one # and return them. return combine(segments, combinators)
def parse_directive(key): """ Takes a key of type (foo:bar) and returns either the key and the directive, or the key and None (for no directive.) """ if constants.DIRECTIVE in key: return key.split(constants.DIRECTIVE, 1) else: return key, None
def parse_segment(text): "we expect foo=bar" if not len(text): return NoopQuerySegment() q = QuerySegment() # First we need to split the segment into key/value pairs. This is done # by attempting to split the sequence for each equality comparison. Then # discard any that did not split properly. Then chose the smallest key # (greedily chose the first comparator we encounter in the string) # followed by the smallest value (greedily chose the largest comparator # possible.) # translate into [('=', 'foo=bar')] equalities = zip(constants.OPERATOR_EQUALITIES, itertools.repeat(text)) # Translate into [('=', ['foo', 'bar'])] equalities = map(lambda x: (x[0], x[1].split(x[0], 1)), equalities) # Remove unsplit entries and translate into [('=': ['foo', 'bar'])] # Note that the result from this stage is iterated over twice. equalities = list(filter(lambda x: len(x[1]) > 1, equalities)) # Get the smallest key and use the length of that to remove other items key_len = len(min((x[1][0] for x in equalities), key=len)) equalities = filter(lambda x: len(x[1][0]) == key_len, equalities) # Get the smallest value length. thus we have the earliest key and the # smallest value. op, (key, value) = min(equalities, key=lambda x: len(x[1][1])) key, directive = parse_directive(key) if directive: op = constants.OPERATOR_EQUALITY_FALLBACK q.directive = directive # Process negation. This comes in both foo.not= and foo!= forms. path = key.split(constants.SEP_PATH) last = path[-1] # Check for != if last.endswith(constants.OPERATOR_NEGATION): last = last[:-1] q.negated = not q.negated # Check for foo.not= if last == constants.PATH_NEGATION: path.pop(-1) q.negated = not q.negated q.values = value.split(constants.SEP_VALUE) # Check for suffixed operators (foo.gte=bar). Prioritize suffixed # entries over actual equality checks. if path[-1] in constants.OPERATOR_SUFFIXES: # The case where foo.gte<=bar, which obviously makes no sense. if op not in constants.OPERATOR_FALLBACK: raise ValueError( 'Both path-style operator and equality style operator ' 'provided. Please provide only a single style operator.') q.operator = constants.OPERATOR_SUFFIX_MAP[path[-1]] path.pop(-1) else: q.operator = constants.OPERATOR_EQUALITY_MAP[op] if not len(path): raise ValueError('No attribute navigation path provided.') q.path = path return q
def set(self, target, value): """Set the value of this attribute for the passed object. """ if not self._set: return if self.path is None: # There is no path defined on this resource. # We can do no magic to set the value. self.set = lambda *a: None return None if self._segments[target.__class__]: # Attempt to resolve access to this attribute. self.get(target) if self._segments[target.__class__]: # Attribute is not fully resolved; an interim segment is null. return # Resolve access to the parent object. # For a single-segment path this will effectively be a no-op. parent_getter = compose(*self._getters[target.__class__][:-1]) target = parent_getter(target) # Make the setter. func = self._make_setter(self.path.split('.')[-1], target.__class__) # Apply the setter now. func(target, value) # Replace this function with the constructed setter. def setter(target, value): func(parent_getter(target), value) self.set = setter
def parse(specifiers): """ Consumes set specifiers as text and forms a generator to retrieve the requested ranges. @param[in] specifiers Expected syntax is from the byte-range-specifier ABNF found in the [RFC 2616]; eg. 15-17,151,-16,26-278,15 @returns Consecutive tuples that describe the requested range; eg. (1, 72) or (1, 1) [read as 1 to 72 or 1 to 1]. """ specifiers = "".join(specifiers.split()) for specifier in specifiers.split(','): if len(specifier) == 0: raise ValueError("Range: Invalid syntax; missing specifier.") count = specifier.count('-') if (count and specifier[0] == '-') or not count: # Single specifier; return as a tuple to itself. yield int(specifier), int(specifier) continue specifier = list(map(int, specifier.split('-'))) if len(specifier) == 2: # Range specifier; return as a tuple. if specifier[0] < 0 or specifier[1] < 0: # Negative indexing is not supported in range specifiers # as stated in the HTTP/1.1 Range header specification. raise ValueError( "Range: Invalid syntax; negative indexing " "not supported in a range specifier.") if specifier[1] < specifier[0]: # Range must be for at least one item. raise ValueError( "Range: Invalid syntax; stop is less than start.") # Return them as a immutable tuple. yield tuple(specifier) continue # Something weird happened. raise ValueError("Range: Invalid syntax.")
def paginate(request, response, items): """Paginate an iterable during a request. Magically splicling an iterable in our supported ORMs allows LIMIT and OFFSET queries. We should probably delegate this to the ORM or something in the future. """ # TODO: support dynamic rangewords and page lengths # TODO: support multi-part range requests # Get the header header = request.headers.get('Range') if not header: # No range header; move along. return items # do some validation prefix = RANGE_SPECIFIER + '=' if not header.find(prefix) == 0: # This is not using a range specifier that we understand raise exceptions.RequestedRangeNotSatisfiable() else: # Chop the prefix off the header and parse it ranges = parse(header[len(prefix):]) ranges = list(ranges) if len(ranges) > 1: raise exceptions.RequestedRangeNotSatisfiable( 'Multiple ranges in a single request is not yet supported.') start, end = ranges[0] # Make sure the length is not higher than the total number allowed. max_length = request.resource.count(items) end = min(end, max_length) response.status = client.PARTIAL_CONTENT response.headers['Content-Range'] = '%d-%d/%d' % (start, end, max_length) response.headers['Accept-Ranges'] = RANGE_SPECIFIER # Splice and return the items. items = items[start:end + 1] return items
def indexesOptional(f): """Decorate test methods with this if you don't require strict index checking""" stack = inspect.stack() _NO_INDEX_CHECK_NEEDED.add('%s.%s.%s' % (f.__module__, stack[1][3], f.__name__)) del stack return f
def read(self, deserialize=False, format=None): """Read and return the request data. @param[in] deserialize True to deserialize the resultant text using a determiend format or the passed format. @param[in] format A specific format to deserialize in; if provided, no detection is done. If not provided, the content-type header is looked at to determine an appropriate deserializer. """ if deserialize: data, _ = self.deserialize(format=format) return data content = self._read() if not content: return '' if type(content) is six.binary_type: content = content.decode(self.encoding) return content
def use(**kwargs): """ Updates the active resource configuration to the passed keyword arguments. Invoking this method without passing arguments will just return the active resource configuration. @returns The previous configuration. """ config = dict(use.config) use.config.update(kwargs) return config
def try_delegation(method): '''This decorator wraps descriptor methods with a new method that tries to delegate to a function of the same name defined on the owner instance for convenience for dispatcher clients. ''' @functools.wraps(method) def delegator(self, *args, **kwargs): if self.try_delegation: # Try to dispatch to the instance's implementation. inst = getattr(self, 'inst', None) if inst is not None: method_name = (self.delegator_prefix or '') + method.__name__ func = getattr(inst, method_name, None) if func is not None: return func(*args, **kwargs) # Otherwise run the decorated func. return method(self, *args, **kwargs) return delegator
def register(self, method, args, kwargs): '''Given a single decorated handler function, prepare, append desired data to self.registry. ''' invoc = self.dump_invoc(*args, **kwargs) self.registry.append((invoc, method.__name__))
def gen_methods(self, *args, **kwargs): '''Find all method names this input dispatches to. This method can accept *args, **kwargs, but it's the gen_dispatch method's job of passing specific args to handler methods. ''' dispatched = False for invoc, methodname in self.registry: args, kwargs = self.loads(invoc) yield getattr(self.inst, methodname), args, kwargs dispatched = True if dispatched: return # Try the generic handler. generic_handler = getattr(self.inst, 'generic_handler', None) if generic_handler is not None: yield generic_handler, args, kwargs # Give up. msg = 'No method was found for %r on %r.' raise self.DispatchError(msg % ((args, kwargs), self.inst))
def get_method(self, *args, **kwargs): '''Find the first method this input dispatches to. ''' for method in self.gen_methods(*args, **kwargs): return method msg = 'No method was found for %r on %r.' raise self.DispatchError(msg % ((args, kwargs), self.inst))
def dispatch(self, *args, **kwargs): '''Find and evaluate/return the first method this input dispatches to. ''' for result in self.gen_dispatch(*args, **kwargs): return result
def gen_dispatch(self, *args, **kwargs): '''Find and evaluate/yield every method this input dispatches to. ''' dispatched = False for method_data in self.gen_methods(*args, **kwargs): dispatched = True result = self.apply_handler(method_data, *args, **kwargs) yield result # return self.yield_from_handler(result) if dispatched: return msg = 'No method was found for %r on %r.' raise self.DispatchError(msg % ((args, kwargs), self.inst))
def gen_method_keys(self, *args, **kwargs): '''Given a node, return the string to use in computing the matching visitor methodname. Can also be a generator of strings. ''' token = args[0] for mro_type in type(token).__mro__[:-1]: name = mro_type.__name__ yield name
def gen_methods(self, *args, **kwargs): '''Find all method names this input dispatches to. ''' token = args[0] inst = self.inst prefix = self._method_prefix for method_key in self.gen_method_keys(*args, **kwargs): method = getattr(inst, prefix + method_key, None) if method is not None: yield method # Fall back to built-in types, then types, then collections. typename = type(token).__name__ yield from self.check_basetype( token, typename, self.builtins.get(typename)) for basetype_name in self.interp_types: yield from self.check_basetype( token, basetype_name, getattr(self.types, basetype_name, None)) for basetype_name in self.abc_types: yield from self.check_basetype( token, basetype_name, getattr(self.collections, basetype_name, None)) # Try the generic handler. yield from self.gen_generic()
def apply_handler(self, method_data, *args, **kwargs): '''Call the dispatched function, optionally with other data stored/created during .register and .prepare. Assume the arguments passed in by the dispathcer are the only ones required. ''' if isinstance(method_data, tuple): len_method = len(method_data) method = method_data[0] if 1 < len_method: args = method_data[1] if 2 < len_method: kwargs = method_data[2] else: method = method_data return method(*args, **kwargs)
def parse(cls, s, required=False): """ Parse string to create an instance :param str s: String with requirement to parse :param bool required: Is this requirement required to be fulfilled? If not, then it is a filter. """ req = pkg_resources.Requirement.parse(s) return cls(req, required=required)
def add(self, requirements, required=None): """ Add requirements to be managed :param list/Requirement requirements: List of :class:`BumpRequirement` or :class:`pkg_resources.Requirement` :param bool required: Set required flag for each requirement if provided. """ if isinstance(requirements, RequirementsManager): requirements = list(requirements) elif not isinstance(requirements, list): requirements = [requirements] for req in requirements: name = req.project_name if not isinstance(req, BumpRequirement): req = BumpRequirement(req, required=required) elif required is not None: req.required = required add = True if name in self.requirements: for existing_req in self.requirements[name]: if req == existing_req: add = False break # Need to replace existing as the new req will be used to bump next, and req.required could be # updated. replace = False # Two pins: Use highest pinned version if (req.specs and req.specs[0][0] == '==' and existing_req.specs and existing_req.specs[0][0] == '=='): if pkg_resources.parse_version(req.specs[0][1]) < pkg_resources.parse_version( existing_req.specs[0][1]): req.requirement = existing_req.requirement replace = True # Replace Any if not (req.specs and existing_req.specs): if existing_req.specs: req.requirement = existing_req.requirement replace = True if replace: req.required |= existing_req.required if existing_req.required_by and not req.required_by: req.required_by = existing_req.required_by self.requirements[name].remove(existing_req) break if add: self.requirements[name].append(req)
def check(self, context, version=None): """ Check off requirements that are met by name/version. :param str|Bump|Requirement context: Either package name, requirement string, :class:`Bump`, :class:`BumpRequirement`, or :class:`pkg_resources.Requirement instance :return: True if any requirement was satisified by context """ req_str = None self.checked.append((context, version)) if isinstance(context, str) and not version: context = BumpRequirement.parse(context) if isinstance(context, Bump): name = context.name if context.new_version and context.new_version[0] == '==': version = context.new_version[1] else: req_str = str(context) elif isinstance(context, (pkg_resources.Requirement, BumpRequirement)): name = context.project_name if context.specs and context.specs[0][0] == '==': version = context.specs[0][1] else: req_str = str(context) else: name = context if name in self: self.matched_name = True for req in self[name]: if req.required and (version and version in req or req_str == str(req)): req.required = False return True return False
def satisfied_by_checked(self, req): """ Check if requirement is already satisfied by what was previously checked :param Requirement req: Requirement to check """ req_man = RequirementsManager([req]) return any(req_man.check(*checked) for checked in self.checked)
def from_requirement(cls, req, changes=None): """ Create an instance from :class:`pkg_resources.Requirement` instance """ return cls(req.project_name, req.specs and ''.join(req.specs[0]) or '', changes=changes)
def as_requirement(self): """ Convert back to a :class:`pkg_resources.Requirement` instance """ if self.new_version: return pkg_resources.Requirement.parse(self.name + ''.join(self.new_version)) else: return pkg_resources.Requirement.parse(self.name)
def require(self, req): """ Add new requirements that must be fulfilled for this bump to occur """ reqs = req if isinstance(req, list) else [req] for req in reqs: if not isinstance(req, BumpRequirement): req = BumpRequirement(req) req.required = True req.required_by = self self.requirements.append(req)
def requirements_for_changes(self, changes): """ Parse changes for requirements :param list changes: """ requirements = [] reqs_set = set() if isinstance(changes, str): changes = changes.split('\n') if not changes or changes[0].startswith('-'): return requirements for line in changes: line = line.strip(' -+*') if not line: continue match = IS_REQUIREMENTS_RE2.search(line) # or IS_REQUIREMENTS_RE.match(line) if match: for match in REQUIREMENTS_RE.findall(match.group(1)): if match[1]: version = '==' + match[2] if match[1].startswith(' to ') else match[1] req_str = match[0] + version else: req_str = match[0] if req_str not in reqs_set: reqs_set.add(req_str) try: requirements.append(pkg_resources.Requirement.parse(req_str)) except Exception as e: log.warn('Could not parse requirement "%s" from changes: %s', req_str, e) return requirements
def package_changes(self, name, current_version, new_version): """ List of changes for package name from current_version to new_version, in descending order. If current version is higher than new version (downgrade), then a minus sign will be prefixed to each change. """ if pkg_resources.parse_version(current_version) > pkg_resources.parse_version(new_version): downgrade_sign = '- ' (current_version, new_version) = (new_version, current_version) else: downgrade_sign = None changes = self._package_changes(name, current_version, new_version) if changes and downgrade_sign: changes = [downgrade_sign + c for c in changes] return changes
def _bump(self, existing_req=None, bump_reqs=None): """ Bump an existing requirement to the desired requirement if any. Subclass can override this `_bump` method to change how each requirement is bumped. BR = Bump to Requested Version BL = Bump to Latest Version BLR = Bump to Latest Version per Requested Requirement BROL = Bump to Requested Version or Latest (if Pin) N = No Bump ERR = Error C = Version Conflict Pin case "requires=" will be required. Filter case "requires=" will be: 1) From user = Required 2) From bump = bump/require if existing = One, otherwise print warning. Filter Case:: Bump: None Any One Many Existing: None N N N N Any N N BR BR One BL BL BR BR Many N N BR BR Pin Case:: Bump: None Any One Many Existing: None N N N N Any N N BR BLR* One BL BL BR BLR* Many N N BR BLR* Add/Require Case:: Bump: None Any One Many Existing: None N BROL BROL BROL :param pkg_resources.Requirement existing_req: Existing requirement if any :param list bump_reqs: List of `BumpRequirement` :return Bump: Either a :class:`Bump` instance or None :raise BumpAccident: """ if existing_req or bump_reqs and any(r.required for r in bump_reqs): name = existing_req and existing_req.project_name or bump_reqs[0].project_name log.info('Checking %s', name) bump = current_version = new_version = None if bump_reqs: # BLR: Pin with Many bump requirements if self.should_pin() and (len(bump_reqs) > 1 or bump_reqs[0] and bump_reqs[0].specs and bump_reqs[0].specs[0][0] != '=='): log.debug('Bump to latest within requirements: %s', bump_reqs) new_version = self.latest_version_for_requirements(bump_reqs) current_version = (existing_req and existing_req.specs and existing_req.specs[0][0] == '==' and existing_req.specs[0][1]) if current_version == new_version: return None bump = Bump(name, ('==', new_version)) elif len(bump_reqs) > 1: raise BumpAccident('Not sure which requirement to use for %s: %s' % ( name, ', '.join(str(r) for r in bump_reqs))) # BR: Pin with One bump requirement or Filter with One or Many bump requirements or # Bump to Any required. elif bump_reqs[0].specs or not (existing_req or self.should_pin() or bump_reqs[0].specs): log.debug('Bump to requirement: %s', bump_reqs) latest_version = self.latest_version_for_requirements(bump_reqs) new_version = (bump_reqs[0].specs and bump_reqs[0].specs[0][0] == '==' and bump_reqs[0].specs[0][1] or latest_version) current_version = (existing_req and existing_req.specs and existing_req.specs[0][0] == '==' and existing_req.specs[0][1]) if current_version == new_version: return None if len(bump_reqs[0].specs) > 1: version = (','.join(s[0] + s[1] for s in bump_reqs[0].specs),) elif bump_reqs[0].specs: version = bump_reqs[0].specs[0] else: version = None bump = Bump(name, version) # BL: Pin to Latest if not bump and (existing_req and existing_req.specs and existing_req.specs[0][0] == '==' or self.should_pin() and not existing_req): log.debug('Bump to latest: %s', bump_reqs or name) current_version = existing_req and existing_req.specs[0][1] new_version = self.latest_package_version(name) if current_version == new_version: return None if not new_version: raise BumpAccident('No published version found for %s' % name) bump = Bump(name, ('==', new_version)) if bump and current_version and new_version and self.detail: changes = self.package_changes(bump.name, current_version, new_version) bump.changes.extend(changes) if self.should_pin(): bump.require(self.requirements_for_changes(changes)) if bump: log.debug('Bumped %s', bump) if bump.requirements: log.info('Changes in %s require: %s', bump.name, ', '.join(sorted(str(r) for r in bump.requirements))) return bump if str(bump) != str(existing_req) else None
def bump(self, bump_reqs=None, **kwargs): """ Bump dependencies using given requirements. :param RequirementsManager bump_reqs: Bump requirements manager :param dict kwargs: Additional args from argparse. Some bumpers accept user options, and some not. :return: List of :class:`Bump` changes made. """ bumps = {} for existing_req in sorted(self.requirements(), key=lambda r: r.project_name): if bump_reqs and existing_req.project_name not in bump_reqs: continue bump_reqs.check(existing_req) try: bump = self._bump(existing_req, bump_reqs.get(existing_req.project_name)) if bump: bumps[bump.name] = bump bump_reqs.check(bump) except Exception as e: if bump_reqs and bump_reqs.get(existing_req.project_name) and all( r.required_by is None for r in bump_reqs.get(existing_req.project_name)): raise else: log.warn(e) for reqs in bump_reqs.required_requirements().values(): name = reqs[0].project_name if name not in bumps and self.should_add(name): try: bump = self._bump(None, reqs) if bump: bumps[bump.name] = bump bump_reqs.check(bump) except Exception as e: if all(r.required_by is None for r in reqs): raise else: log.warn(e) self.bumps.update(bumps.values()) return bumps.values()
def reverse(self): """ Restore content in target file to be before any changes """ if self._original_target_content: with open(self.target, 'w') as fp: fp.write(self._original_target_content)
def serialize(self, data=None): """ Transforms the object into an acceptable format for transmission. @throws ValueError To indicate this serializer does not support the encoding of the specified object. """ if data is not None and self.response is not None: # Set the content type. self.response['Content-Type'] = self.media_types[0] # Write the encoded and prepared data to the response. self.response.write(data) # Return the serialized data. # This has normally been transformed by a base class. return data
def cons(collection, value): """Extends a collection with a value.""" if isinstance(value, collections.Mapping): if collection is None: collection = {} collection.update(**value) elif isinstance(value, six.string_types): if collection is None: collection = [] collection.append(value) elif isinstance(value, collections.Iterable): if collection is None: collection = [] collection.extend(value) else: if collection is None: collection = [] collection.append(value) return collection
def _merge(options, name, bases, default=None): """Merges a named option collection.""" result = None for base in bases: if base is None: continue value = getattr(base, name, None) if value is None: continue result = utils.cons(result, value) value = options.get(name) if value is not None: result = utils.cons(result, value) return result or default
def parse_requirements(requirements, in_file=None): """ Parse string requirements into list of :class:`pkg_resources.Requirement` instances :param str requirements: Requirements text to parse :param str in_file: File the requirements came from :return: List of requirements :raises ValueError: if failed to parse """ try: return list(pkg_resources.parse_requirements(requirements)) except Exception as e: in_file = ' in %s' % in_file if in_file else '' raise ValueError('{} {}'.format(e, in_file))
def package_info(cls, package): """ All package info for given package """ if package not in cls.package_info_cache: package_json_url = 'https://pypi.python.org/pypi/%s/json' % package try: logging.getLogger('requests').setLevel(logging.WARN) response = requests.get(package_json_url) response.raise_for_status() cls.package_info_cache[package] = simplejson.loads(response.text) except Exception as e: log.debug('Could not get package info from %s: %s', package_json_url, e) cls.package_info_cache[package] = None return cls.package_info_cache[package]
def all_package_versions(package): """ All versions for package """ info = PyPI.package_info(package) return info and sorted(info['releases'].keys(), key=lambda x: x.split(), reverse=True) or []
def insert(self, name, index, value): """Insert a value at the passed index in the named header.""" return self._sequence[name].insert(index, value)
def close(self): """Flush and close the stream. This is called automatically by the base resource on resources unless the resource is operating asynchronously; in that case, this method MUST be called in order to signal the end of the request. If not the request will simply hang as it is waiting for some thread to tell it to return to the client. """ # Ensure we're not closed. self.require_not_closed() if not self.streaming or self.asynchronous: # We're not streaming, auto-write content-length if not # already set. if 'Content-Length' not in self.headers: self.headers['Content-Length'] = self.tell() # Flush out the current buffer. self.flush() # We're done with the response; inform the HTTP connector # to close the response stream. self._closed = True
def write(self, chunk, serialize=False, format=None): """Writes the given chunk to the output buffer. @param[in] chunk Either a byte array, a unicode string, or a generator. If `chunk` is a generator then calling `self.write(<generator>)` is equivalent to: @code for x in <generator>: self.write(x) self.flush() @endcode @param[in] serialize True to serialize the lines in a determined serializer. @param[in] format A specific format to serialize in; if provided, no detection is done. If not provided, the accept header (as well as the URL extension) is looked at to determine an appropriate serializer. """ # Ensure we're not closed. self.require_not_closed() if chunk is None: # There is nothing here. return if serialize or format is not None: # Forward to the serializer to serialize the chunk # before it gets written to the response. self.serialize(chunk, format=format) return # `serialize` invokes write(...) if type(chunk) is six.binary_type: # Update the stream length. self._length += len(chunk) # If passed a byte string, we hope the user encoded it properly. self._stream.write(chunk) elif isinstance(chunk, six.string_types): encoding = self.encoding if encoding is not None: # If passed a string, we can encode it for the user. chunk = chunk.encode(encoding) else: # Bail; we don't have an encoding. raise exceptions.InvalidOperation( 'Attempting to write textual data without an encoding.') # Update the stream length. self._length += len(chunk) # Write the encoded data into the byte stream. self._stream.write(chunk) elif isinstance(chunk, collections.Iterable): # If passed some kind of iterator, attempt to recurse into # oblivion. for section in chunk: self.write(section) else: # Bail; we have no idea what to do with this. raise exceptions.InvalidOperation( 'Attempting to write something not recognized.')
def serialize(self, data, format=None): """Serializes the data into this response using a serializer. @param[in] data The data to be serialized. @param[in] format A specific format to serialize in; if provided, no detection is done. If not provided, the accept header (as well as the URL extension) is looked at to determine an appropriate serializer. @returns A tuple of the serialized text and an instance of the serializer used. """ return self._resource.serialize(data, response=self, format=format)
def flush(self): """Flush the write buffers of the stream. This results in writing the current contents of the write buffer to the transport layer, initiating the HTTP/1.1 response. This initiates a streaming response. If the `Content-Length` header is not given then the chunked `Transfer-Encoding` is applied. """ # Ensure we're not closed. self.require_not_closed() # Pull out the accumulated chunk. chunk = self._stream.getvalue() self._stream.truncate(0) self._stream.seek(0) # Append the chunk to the body. self.body = chunk if (self._body is None) else (self._body + chunk) if self.asynchronous: # We are now streaming because we're asynchronous. self.streaming = True
def send(self, *args, **kwargs): """Writes the passed chunk and flushes it to the client.""" self.write(*args, **kwargs) self.flush()
def end(self, *args, **kwargs): """ Writes the passed chunk, flushes it to the client, and terminates the connection. """ self.send(*args, **kwargs) self.close()
def insert(self, name, index, value): """Insert a value at the passed index in the named header.""" return self.headers.insert(index, value)
def create_project(self): """ Creates a base Flask project """ if os.path.exists(self._py): prj_dir = os.path.join(self._app_dir, self._project_name) if os.path.exists(prj_dir): if self._force: logging.warn('Removing existing project') shutil.rmtree(prj_dir) else: logging.warn('Found existing project; not creating (use --force to overwrite)') return logging.info('Creating project') os.makedirs(prj_dir) # create the flask project stub app = """#!/usr/bin/env python\n"""\ """from flask import Flask\n"""\ """app = Flask(__name__)\n\n"""\ """@app.route(\"/\")\n"""\ """def hello():\n"""\ """ return \"Hello from Flask...\"\n\n"""\ """if __name__==\"__main__\":\n"""\ """ app.run()\n\n""" with open(os.path.join(prj_dir, 'app.py'), 'w') as f: f.write(app) else: logging.error('Unable to find Python interpreter in virtualenv') return
def replaced_directory(dirname): """This ``Context Manager`` is used to move the contents of a directory elsewhere temporarily and put them back upon exit. This allows testing code to use the same file directories as normal code without fear of damage. The name of the temporary directory which contains your files is yielded. :param dirname: Path name of the directory to be replaced. Example: .. code-block:: python with replaced_directory('/foo/bar/') as rd: # "/foo/bar/" has been moved & renamed with open('/foo/bar/thing.txt', 'w') as f: f.write('stuff') f.close() # got here? => "/foo/bar/ is now restored and temp has been wiped, # "thing.txt" is gone """ if dirname[-1] == '/': dirname = dirname[:-1] full_path = os.path.abspath(dirname) if not os.path.isdir(full_path): raise AttributeError('dir_name must be a directory') base, name = os.path.split(full_path) # create a temporary directory, move provided dir into it and recreate the # directory for the user tempdir = tempfile.mkdtemp() shutil.move(full_path, tempdir) os.mkdir(full_path) try: yield tempdir finally: # done context, undo everything shutil.rmtree(full_path) moved = os.path.join(tempdir, name) shutil.move(moved, base) shutil.rmtree(tempdir)
def capture_stdout(): """This ``Context Manager`` redirects STDOUT to a ``StringIO`` objects which is returned from the ``Context``. On exit STDOUT is restored. Example: .. code-block:: python with capture_stdout() as capture: print('foo') # got here? => capture.getvalue() will now have "foo\\n" """ stdout = sys.stdout try: capture_out = StringIO() sys.stdout = capture_out yield capture_out finally: sys.stdout = stdout
def capture_stderr(): """This ``Context Manager`` redirects STDERR to a ``StringIO`` objects which is returned from the ``Context``. On exit STDERR is restored. Example: .. code-block:: python with capture_stderr() as capture: print('foo') # got here? => capture.getvalue() will now have "foo\\n" """ stderr = sys.stderr try: capture_out = StringIO() sys.stderr = capture_out yield capture_out finally: sys.stderr = stderr
def create(self, a, b, c): """ .. _createoptions: Create an option object used to start the manager :param a: The path of the config directory :type a: str :param b: The path of the user directory :type b: str :param c: The "command line" options of the openzwave library :type c: str :see: destroyoptions_ """ self.options = CreateOptions( str_to_cppstr(a), str_to_cppstr(b), str_to_cppstr(c)) return True
def addOptionBool(self, name, value): """ .. _addOptionBool: Add a boolean option. :param name: The name of the option. :type name: str :param value: The value of the option. :type value: boolean :return: The result of the operation. :rtype: bool :see: addOption_, addOptionInt_, addOptionString_ """ return self.options.AddOptionBool(str_to_cppstr(name), value)
def addOptionInt(self, name, value): """ .. _addOptionInt: Add an integer option. :param name: The name of the option. :type name: str :param value: The value of the option. :type value: boolean :return: The result of the operation. :rtype: bool :see: addOption_, addOptionBool_, addOptionString_ """ return self.options.AddOptionInt(str_to_cppstr(name), value)
def addOptionString(self, name, value, append=False): """ .. _addOptionString: Add a string option. :param name: The name of the option. Option names are case insensitive and must be unique. :type name: str :param value: The value of the option. :type value: str :param append: Setting append to true will cause values read from the command line or XML file to be concatenated into a comma delimited set. If _append is false, newer values will overwrite older ones. :type append: boolean :return: The result of the operation. :rtype: bool :see: addOption_, addOptionBool_, addOptionInt_ """ return self.options.AddOptionString( str_to_cppstr(name), str_to_cppstr(value), append)
def addOption(self, name, value): """ .. _addOption: Add an option. :param name: The name of the option. :type name: string :param value: The value of the option. :type value: boolean, integer, string :return: The result of the operation. :rtype: bool :see: addOptionBool_, addOptionInt_, addOptionString_ """ if name not in PyOptionList: return False if PyOptionList[name]['type'] == "String": return self.addOptionString(name, value) elif PyOptionList[name]['type'] == "Bool": return self.addOptionBool(name, value) elif PyOptionList[name]['type'] == "Int": return self.addOptionInt(name, value) return False
def getOption(self, name): """ .. _getOption: Retrieve option of a value. :param name: The name of the option. :type name: string :return: The value :rtype: boolean, integer, string or None :see: getOptionAsBool_, getOptionAsInt_, getOptionAsString_ """ if name not in PyOptionList: return None if PyOptionList[name]['type'] == "String": return self.getOptionAsString(name) elif PyOptionList[name]['type'] == "Bool": return self.getOptionAsBool(name) elif PyOptionList[name]['type'] == "Int": return self.getOptionAsInt(name) return False
def urls(cls): """Builds the URL configuration for this resource.""" return urls.patterns('', urls.url( r'^{}(?:$|(?P<path>[/:(.].*))'.format(cls.meta.name), cls.view, name='armet-api-{}'.format(cls.meta.name), kwargs={'resource': cls.meta.name}))
def dump(obj, fp, startindex=1, separator=DEFAULT, index_separator=DEFAULT): '''Dump an object in req format to the fp given. :param Mapping obj: The object to serialize. Must have a keys method. :param fp: A writable that can accept all the types given. :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types. :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types. ''' if startindex < 0: raise ValueError('startindex must be non-negative, but was {}'.format(startindex)) try: firstkey = next(iter(obj.keys())) except StopIteration: return if isinstance(firstkey, six.text_type): converter = six.u else: converter = six.b default_separator = converter('|') default_index_separator = converter('_') newline = converter('\n') if separator is DEFAULT: separator = default_separator if index_separator is DEFAULT: index_separator = default_index_separator for key, value in six.iteritems(obj): if isinstance(value, (list, tuple, set)): for index, item in enumerate(value, start=startindex): fp.write(key) fp.write(index_separator) fp.write(converter(str(index))) fp.write(separator) fp.write(item) fp.write(newline) else: fp.write(key) fp.write(separator) fp.write(value) fp.write(newline)
def dumps(obj, startindex=1, separator=DEFAULT, index_separator=DEFAULT): '''Dump an object in req format to a string. :param Mapping obj: The object to serialize. Must have a keys method. :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types. :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types. ''' try: firstkey = next(iter(obj.keys())) except StopIteration: return str() if isinstance(firstkey, six.text_type): io = StringIO() else: io = BytesIO() dump( obj=obj, fp=io, startindex=startindex, separator=separator, index_separator=index_separator, ) return io.getvalue()
def load(fp, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list): '''Load an object from the file pointer. :param fp: A readable filehandle. :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types. :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types. :param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict. :param list_cls: A callable that takes an iterable and returns a sequence. ''' converter = None output = cls() arraykeys = set() for line in fp: if converter is None: if isinstance(line, six.text_type): converter = six.u else: converter = six.b default_separator = converter('|') default_index_separator = converter('_') newline = converter('\n') if separator is DEFAULT: separator = default_separator if index_separator is DEFAULT: index_separator = default_index_separator key, value = line.strip().split(separator, 1) keyparts = key.split(index_separator) try: index = int(keyparts[-1]) endwithint = True except ValueError: endwithint = False # We do everything in-place to ensure that we maintain order when using # an OrderedDict. if len(keyparts) > 1 and endwithint: # If this is an array key basekey = key.rsplit(index_separator, 1)[0] if basekey not in arraykeys: arraykeys.add(basekey) if basekey in output: # If key already exists as non-array, fix it if not isinstance(output[basekey], dict): output[basekey] = {-1: output[basekey]} else: output[basekey] = {} output[basekey][index] = value else: if key in output and isinstance(output[key], dict): output[key][-1] = value else: output[key] = value # Convert array keys for key in arraykeys: output[key] = list_cls(pair[1] for pair in sorted(six.iteritems(output[key]))) return output
def loads(s, separator=DEFAULT, index_separator=DEFAULT, cls=dict, list_cls=list): '''Loads an object from a string. :param s: An object to parse :type s: bytes or str :param separator: The separator between key and value. Defaults to u'|' or b'|', depending on the types. :param index_separator: The separator between key and index. Defaults to u'_' or b'_', depending on the types. :param cls: A callable that returns a Mapping that is filled with pairs. The most common alternate option would be OrderedDict. :param list_cls: A callable that takes an iterable and returns a sequence. ''' if isinstance(s, six.text_type): io = StringIO(s) else: io = BytesIO(s) return load( fp=io, separator=separator, index_separator=index_separator, cls=cls, list_cls=list_cls, )
def bump(): """ CLI entry point to bump requirements in requirements.txt or pinned.txt """ parser = argparse.ArgumentParser(description=bump.__doc__) parser.add_argument('names', nargs='*', help=""" Only bump dependencies that match the name. Name can be a product group name defined in workspace.cfg. To bump to a specific version instead of latest, append version to name (i.e. requests==1.2.3 or 'requests>=1.2.3'). When > or < is used, be sure to quote.""") parser.add_argument('--add', '--require', action='store_true', help='Add the `names` to the requirements file if they don\'t exist.') parser.add_argument('--file', help='Requirement file to bump. Defaults to requirements.txt and pinned.txt') parser.add_argument('--force', action='store_true', help='Force a bump even when certain bump requirements are not met.') parser.add_argument('-d', '--detail', '--dependencies', action='store_true', help='If available, show detailed changes. ' 'For pinned.txt, pin parsed dependency requirements from changes') parser.add_argument('-n', '--dry-run', action='store_true', help='Perform a dry run without making changes') parser.add_argument('--debug', action='store_true', help='Turn on debug mode') args = parser.parse_args() targets = [args.file] if args.file else ['requirements.txt', 'pinned.txt'] level = logging.DEBUG if args.debug else logging.INFO logging.basicConfig(level=level, format='[%(levelname)s] %(message)s') try: bumper = BumperDriver(targets, full_throttle=args.force, detail=args.detail, test_drive=args.dry_run) bumper.bump(args.names, required=args.add, show_detail=args.detail) except Exception as e: if args.debug: raise else: log.error(e) sys.exit(1)
def bump(self, filter_requirements, required=False, show_summary=True, show_detail=False, **kwargs): """ Bump dependency requirements using filter. :param list filter_requirements: List of dependency filter requirements. :param bool required: Require the filter_requirements to be met (by adding if possible). :param bool show_summary: Show summary for each bump made. :param bool show_detail: Show detail for each bump made if available. :return: Tuple with two elements: Dict of target file to bump message, List of :class:`Bump` :raise BumpAccident: for any bump errors """ found_targets = [target for target in self.targets if os.path.exists(target)] if not found_targets: raise BumpAccident('None of the requirement file(s) were found: %s' % ', '.join(self.targets)) bump_reqs = RequirementsManager() if filter_requirements: requirements = parse_requirements(filter_requirements) bump_reqs.add(requirements, required=required) try: for target in found_targets: log.debug('Target: %s', target) target_bumpers = [] target_bump_reqs = RequirementsManager(bump_reqs) loops = 0 while True: # Insurance to ensure that we don't get stuck forever. loops += 1 if loops > 5: log.debug('Too many transitive bump loops. Bailing out.') break if not target_bumpers: target_bumpers = [model(target, detail=self.detail, test_drive=self.test_drive) for model in self.bumper_models if model.likes(target)] if not target_bumpers: log.debug('No bumpers found that can bump %s. Defaulting to %s', target, self.default_model) target_bumpers = [self.default_model(target, detail=self.detail, test_drive=self.test_drive)] self.bumpers.extend(target_bumpers) new_target_bump_reqs = RequirementsManager() for bumper in target_bumpers: target_bumps = bumper.bump(target_bump_reqs) self.bumps.update(dict((b.name, b) for b in target_bumps)) for bump in target_bumps: for new_req in bump.requirements: if not (bump_reqs.satisfied_by_checked(new_req) or target_bump_reqs.satisfied_by_checked(new_req)): new_target_bump_reqs.add(new_req) bump_reqs.matched_name |= target_bump_reqs.matched_name bump_reqs.checked.extend(target_bump_reqs.checked) if new_target_bump_reqs: bump_reqs.add(new_target_bump_reqs) target_bump_reqs = RequirementsManager(list( r for r in new_target_bump_reqs if r.project_name not in self.bumps)) if not target_bump_reqs: break if not self.bumpers: raise BumpAccident('No bumpers found for %s' % ', '.join(found_targets)) if bump_reqs and not bump_reqs.matched_name: raise BumpAccident('None of the provided filter names were found in %s' % ', '.join(found_targets)) if self.bumps: for bump in self.bumps.values(): bump_reqs.check(bump) for reqs in bump_reqs.required_requirements().values(): for req in reqs: if not self.full_throttle: use_force = 'Use --force to ignore / force the bump' if req.required_by else '' raise BumpAccident('Requirement "%s" could not be met so ' 'bump can not proceed. %s' % (req, use_force)) if self.test_drive: log.info("Changes that would be made:\n") messages = {} for bumper in self.bumpers: if bumper.bumps: if not self.test_drive: bumper.update_requirements() if self.test_drive or show_summary: msg = bumper.bump_message(self.test_drive or show_detail) if self.test_drive: print(msg) else: rewords = [('Bump ', 'Bumped '), ('Pin ', 'Pinned '), ('Require ', 'Updated requirements: ')] for word, new_word in rewords: if msg.startswith(word): msg = msg.replace(word, new_word, 1) break log.info(msg) messages[bumper.target] = bumper.bump_message(True) return messages, self.bumps else: log.info('No need to bump. Everything is up to date!') return {}, [] except Exception: if not self.test_drive and self.bumps: map(lambda b: b.reverse(), self.bumpers) raise
def reverse(self): """ Reverse all bumpers """ if not self.test_drive and self.bumps: map(lambda b: b.reverse(), self.bumpers)
def _expand_targets(self, targets, base_dir=None): """ Expand targets by looking for '-r' in targets. """ all_targets = [] for target in targets: target_dirs = [p for p in [base_dir, os.path.dirname(target)] if p] target_dir = target_dirs and os.path.join(*target_dirs) or '' target = os.path.basename(target) target_path = os.path.join(target_dir, target) if os.path.exists(target_path): all_targets.append(target_path) with open(target_path) as fp: for line in fp: if line.startswith('-r '): _, new_target = line.split(' ', 1) all_targets.extend(self._expand_targets([new_target.strip()], base_dir=target_dir)) return all_targets
def get_nginx_config(self): """ Gets the Nginx config for the project """ if os.path.exists(self._nginx_config): return open(self._nginx_config, 'r').read() else: return None
def check_directories(self): """ Creates base directories for app, virtualenv, and nginx """ self.log.debug('Checking directories') if not os.path.exists(self._ve_dir): os.makedirs(self._ve_dir) if not os.path.exists(self._app_dir): os.makedirs(self._app_dir) if not os.path.exists(self._conf_dir): os.makedirs(self._conf_dir) if not os.path.exists(self._var_dir): os.makedirs(self._var_dir) if not os.path.exists(self._log_dir): os.makedirs(self._log_dir) if not os.path.exists(self._script_dir): os.makedirs(self._script_dir) # copy uswgi_params for nginx uwsgi_params = '/etc/nginx/uwsgi_params' if os.path.exists(uwsgi_params): shutil.copy(uwsgi_params, self._conf_dir) else: logging.warning('Unable to find Nginx uwsgi_params. You must manually copy this to {0}.'.format(self._conf_dir)) # copy mime.types for nginx mime_types = '/etc/nginx/mime.types' if os.path.exists(mime_types): shutil.copy(mime_types, self._conf_dir) self._include_mimetypes = True else: logging.warn('Unable to find mime.types for Nginx. You must manually copy this to {0}.'.format(self._conf_dir))
def create_virtualenv(self): """ Creates the virtualenv for the project """ if check_command('virtualenv'): ve_dir = os.path.join(self._ve_dir, self._project_name) if os.path.exists(ve_dir): if self._force: logging.warn('Removing existing virtualenv') shutil.rmtree(ve_dir) else: logging.warn('Found existing virtualenv; not creating (use --force to overwrite)') return logging.info('Creating virtualenv') p = subprocess.Popen('virtualenv --no-site-packages {0} > /dev/null'.format(ve_dir), shell=True) os.waitpid(p.pid, 0) # install modules for m in self._modules: self.log.info('Installing module {0}'.format(m)) p = subprocess.Popen('{0} install {1} > /dev/null'.format(os.path.join(self._ve_dir, \ self._project_name) + os.sep + 'bin' + os.sep + 'pip', m), shell=True) os.waitpid(p.pid, 0)
def create_nginx_config(self): """ Creates the Nginx configuration for the project """ cfg = '# nginx config for {0}\n'.format(self._project_name) if not self._shared_hosting: # user if self._user: cfg += 'user {0};\n'.format(self._user) # misc nginx config cfg += 'worker_processes 1;\nerror_log {0}-errors.log;\n\ pid {1}_ nginx.pid;\n\n'.format(os.path.join(self._log_dir, \ self._project_name), os.path.join(self._var_dir, self._project_name)) cfg += 'events {\n\tworker_connections 32;\n}\n\n' # http section cfg += 'http {\n' if self._include_mimetypes: cfg += '\tinclude mime.types;\n' cfg += '\tdefault_type application/octet-stream;\n' cfg += '\tclient_max_body_size 1G;\n' cfg += '\tproxy_max_temp_file_size 0;\n' cfg += '\tproxy_buffering off;\n' cfg += '\taccess_log {0}-access.log;\n'.format(os.path.join \ (self._log_dir, self._project_name)) cfg += '\tsendfile on;\n' cfg += '\tkeepalive_timeout 65;\n' # server section cfg += '\tserver {\n' cfg += '\t\tlisten 0.0.0.0:{0};\n'.format(self._port) if self._server_name: cfg += '\t\tserver_name {0};\n'.format(self._server_name) # location section cfg += '\t\tlocation / {\n' cfg += '\t\t\tuwsgi_pass unix:///{0}.sock;\n'.format(\ os.path.join(self._var_dir, self._project_name)) cfg += '\t\t\tinclude uwsgi_params;\n' cfg += '\t\t}\n\n' # end location # error page templates cfg += '\t\terror_page 500 502 503 504 /50x.html;\n' cfg += '\t\tlocation = /50x.html {\n' cfg += '\t\t\troot html;\n' # end error page section cfg += '\t\t}\n' # end server section cfg += '\t}\n' if not self._shared_hosting: # end http section cfg += '}\n' # create conf f = open(self._nginx_config, 'w') f.write(cfg) f.close()
def create_manage_scripts(self): """ Creates scripts to start and stop the application """ # create start script start = '# start script for {0}\n\n'.format(self._project_name) # start uwsgi start += 'echo \'Starting uWSGI...\'\n' start += 'sh {0}.uwsgi\n'.format(os.path.join(self._conf_dir, self._project_name)) start += 'sleep 1\n' # start nginx start += 'echo \'Starting Nginx...\'\n' start += 'nginx -c {0}_nginx.conf\n'.format(os.path.join(self._conf_dir, self._project_name)) start += 'sleep 1\n' start += 'echo \'{0} started\'\n\n'.format(self._project_name) # stop script stop = '# stop script for {0}\n\n'.format(self._project_name) # stop nginx stop += 'if [ -e {0}_nginx.pid ]; then nginx -c {1}_nginx.conf -s stop ; fi\n'.format(os.path.join(self._var_dir, self._project_name), os.path.join(self._conf_dir, self._project_name)) # stop uwsgi stop += 'if [ -e {0}_uwsgi.pid ]; then kill -9 `cat {0}_uwsgi.pid` ; rm {0}_uwsgi.pid 2>&1 > /dev/null ; fi\n'.format(os.path.join(self._var_dir, self._project_name)) stop += 'echo \'{0} stopped\'\n'.format(self._project_name) # write scripts start_file = '{0}_start.sh'.format(os.path.join(self._script_dir, self._project_name)) stop_file = '{0}_stop.sh'.format(os.path.join(self._script_dir, self._project_name)) f = open(start_file, 'w') f.write(start) f.close() f = open(stop_file, 'w') f.write(stop) f.close() # make executable os.chmod(start_file, 0754) os.chmod(stop_file, 0754)
def create(self): """ Creates the full project """ # create virtualenv self.create_virtualenv() # create project self.create_project() # generate uwsgi script self.create_uwsgi_script() # generate nginx config self.create_nginx_config() # generate management scripts self.create_manage_scripts() logging.info('** Make sure to set proper permissions for the webserver user account on the var and log directories in the project root')
def dasherize(value): """Dasherizes the passed value.""" value = value.strip() value = re.sub(r'([A-Z])', r'-\1', value) value = re.sub(r'[-_\s]+', r'-', value) value = re.sub(r'^-', r'', value) value = value.lower() return value
def redirect(cls, request, response): """Redirect to the canonical URI for this resource.""" if cls.meta.legacy_redirect: if request.method in ('GET', 'HEAD',): # A SAFE request is allowed to redirect using a 301 response.status = http.client.MOVED_PERMANENTLY else: # All other requests must use a 307 response.status = http.client.TEMPORARY_REDIRECT else: # Modern redirects are allowed. Let's have some fun. # Hopefully you're client supports this. # The RFC explicitly discourages UserAgent sniffing. response.status = http.client.PERMANENT_REDIRECT # Terminate the connection. response.close()
def view(cls, request, response): """ Entry-point of the request / response cycle; Handles resource creation and delegation. @param[in] requset The HTTP request object; containing accessors for information about the request. @param[in] response The HTTP response object; contains accessors for modifying the information that will be sent to the client. """ # Determine if we need to redirect. test = cls.meta.trailing_slash if test ^ request.path.endswith('/'): # Construct a new URL by removing or adding the trailing slash. path = request.path + '/' if test else request.path[:-1] response['Location'] = '{}://{}{}{}{}'.format( request.protocol.lower(), request.host, request.mount_point, path, '?' + request.query if request.query else '') # Redirect to the version with the correct trailing slash. return cls.redirect(request, response) try: # Instantiate the resource. obj = cls(request, response) # Bind the request and response objects to the constructed # resource. request.bind(obj) response.bind(obj) # Bind the request object to the resource. # This is used to facilitate the serializer and deserializer. obj._request = request # Initiate the dispatch cycle and handle its result on # synchronous requests. result = obj.dispatch(request, response) if not response.asynchronous: # There is several things that dispatch is allowed to return. if (isinstance(result, collections.Iterable) and not isinstance(result, six.string_types)): # Return the stream generator. return cls.stream(response, result) else: # Leave it up to the response to throw or write whatever # we got back. response.end(result) if response.body: # Return the body if there was any set. return response.body except http.exceptions.BaseHTTPException as e: # Something that we can handle and return properly happened. # Set response properties from the exception. response.status = e.status response.headers.update(e.headers) if e.content: # Write the exception body if present and close # the response. # TODO: Use the plain-text encoder. response.send(e.content, serialize=True, format='json') # Terminate the connection and return the body. response.close() if response.body: return response.body except Exception: # Something unexpected happened. # Log error message to the logger. logger.exception('Internal server error') # Write a debug message for the client. if not response.streaming and not response.closed: response.status = http.client.INTERNAL_SERVER_ERROR response.headers.clear() response.close()
def parse(cls, path): """Parses out parameters and separates them out of the path. This uses one of the many defined patterns on the options class. But, it defaults to a no-op if there are no defined patterns. """ # Iterate through the available patterns. for resource, pattern in cls.meta.patterns: # Attempt to match the path. match = re.match(pattern, path) if match is not None: # Found something. return resource, match.groupdict(), match.string[match.end():] # No patterns at all; return unsuccessful. return None if not cls.meta.patterns else False
def traverse(cls, request, params=None): """Traverses down the path and determines the accessed resource. This makes use of the patterns array to implement simple traversal. This defaults to a no-op if there are no defined patterns. """ # Attempt to parse the path using a pattern. result = cls.parse(request.path) if result is None: # No parsing was requested; no-op. return cls, {} elif not result: # Parsing failed; raise 404. raise http.exceptions.NotFound() # Partition out the result. resource, data, rest = result if params: # Append params to data. data.update(params) if resource is None: # No traversal; return parameters. return cls, data # Modify the path appropriately. if data.get('path') is not None: request.path = data.pop('path') elif rest is not None: request.path = rest # Send us through traversal again. result = resource.traverse(request, params=data) return result
def stream(cls, response, sequence): """ Helper method used in conjunction with the view handler to stream responses to the client. """ # Construct the iterator and run the sequence once in order # to capture any headers and status codes set. iterator = iter(sequence) data = {'chunk': next(iterator)} response.streaming = True def streamer(): # Iterate through the iterator and yield its content while True: if response.asynchronous: # Yield our current chunk. yield data['chunk'] else: # Write the chunk to the response response.send(data['chunk']) # Yield its body yield response.body # Unset the body. response.body = None try: # Get the next chunk. data['chunk'] = next(iterator) except StopIteration: # Get out of the loop. break if not response.asynchronous: # Close the response. response.close() # Return the streaming function. return streamer()
def deserialize(self, request=None, text=None, format=None): """Deserializes the text using a determined deserializer. @param[in] request The request object to pull information from; normally used to determine the deserialization format (when `format` is not provided). @param[in] text The text to be deserialized. Can be left blank and the request will be read. @param[in] format A specific format to deserialize in; if provided, no detection is done. If not provided, the content-type header is looked at to determine an appropriate deserializer. @returns A tuple of the deserialized data and an instance of the deserializer used. """ if isinstance(self, Resource): if not request: # Ensure we have a response object. request = self._request Deserializer = None if format: # An explicit format was given; do not attempt to auto-detect # a deserializer. Deserializer = self.meta.deserializers[format] if not Deserializer: # Determine an appropriate deserializer to use by # introspecting the request object and looking at # the `Content-Type` header. media_ranges = request.get('Content-Type') if media_ranges: # Parse the media ranges and determine the deserializer # that is the closest match. media_types = six.iterkeys(self._deserializer_map) media_type = mimeparse.best_match(media_types, media_ranges) if media_type: format = self._deserializer_map[media_type] Deserializer = self.meta.deserializers[format] else: # Client didn't provide a content-type; we're supposed # to auto-detect. # TODO: Implement this. pass if Deserializer: try: # Attempt to deserialize the data using the determined # deserializer. deserializer = Deserializer() data = deserializer.deserialize(request=request, text=text) return data, deserializer except ValueError: # Failed to deserialize the data. pass # Failed to determine a deserializer; or failed to deserialize. raise http.exceptions.UnsupportedMediaType()
def serialize(self, data, response=None, request=None, format=None): """Serializes the data using a determined serializer. @param[in] data The data to be serialized. @param[in] response The response object to serialize the data to. If this method is invoked as an instance method, the response object can be omitted and it will be taken from the instance. @param[in] request The request object to pull information from; normally used to determine the serialization format (when `format` is not provided). May be used by some serializers as well to pull additional headers. If this method is invoked as an instance method, the request object can be omitted and it will be taken from the instance. @param[in] format A specific format to serialize in; if provided, no detection is done. If not provided, the accept header (as well as the URL extension) is looked at to determine an appropriate serializer. @returns A tuple of the serialized text and an instance of the serializer used. """ if isinstance(self, Resource): if not request: # Ensure we have a response object. request = self._request Serializer = None if format: # An explicit format was given; do not attempt to auto-detect # a serializer. Serializer = self.meta.serializers[format] if not Serializer: # Determine an appropriate serializer to use by # introspecting the request object and looking at the `Accept` # header. media_ranges = (request.get('Accept') or '*/*').strip() if not media_ranges: # Default the media ranges to */* media_ranges = '*/*' if media_ranges != '*/*': # Parse the media ranges and determine the serializer # that is the closest match. media_types = six.iterkeys(self._serializer_map) media_type = mimeparse.best_match(media_types, media_ranges) if media_type: format = self._serializer_map[media_type] Serializer = self.meta.serializers[format] else: # Client indicated no preference; use the default. default = self.meta.default_serializer Serializer = self.meta.serializers[default] if Serializer: try: # Attempt to serialize the data using the determined # serializer. serializer = Serializer(request, response) return serializer.serialize(data), serializer except ValueError: # Failed to serialize the data. pass # Either failed to determine a serializer or failed to serialize # the data; construct a list of available and valid encoders. available = {} for name in self.meta.allowed_serializers: Serializer = self.meta.serializers[name] instance = Serializer(request, None) if instance.can_serialize(data): available[name] = Serializer.media_types[0] # Raise a Not Acceptable exception. raise http.exceptions.NotAcceptable(available)
def _process_cross_domain_request(cls, request, response): """Facilitate Cross-Origin Requests (CORs). """ # Step 1 # Check for Origin header. origin = request.get('Origin') if not origin: return # Step 2 # Check if the origin is in the list of allowed origins. if not (origin in cls.meta.http_allowed_origins or '*' == cls.meta.http_allowed_origins): return # Step 3 # Try to parse the Request-Method header if it exists. method = request.get('Access-Control-Request-Method') if method and method not in cls.meta.http_allowed_methods: return # Step 4 # Try to parse the Request-Header header if it exists. headers = request.get('Access-Control-Request-Headers', ()) if headers: headers = [h.strip() for h in headers.split(',')] # Step 5 # Check if the headers are allowed on this resource. allowed_headers = [h.lower() for h in cls.meta.http_allowed_headers] if any(h.lower() not in allowed_headers for h in headers): return # Step 6 # Always add the origin. response['Access-Control-Allow-Origin'] = origin # TODO: Check if we can provide credentials. response['Access-Control-Allow-Credentials'] = 'true' # Step 7 # TODO: Optionally add Max-Age header. # Step 8 # Add the allowed methods. allowed_methods = ', '.join(cls.meta.http_allowed_methods) response['Access-Control-Allow-Methods'] = allowed_methods # Step 9 # Add any allowed headers. allowed_headers = ', '.join(cls.meta.http_allowed_headers) if allowed_headers: response['Access-Control-Allow-Headers'] = allowed_headers # Step 10 # Add any exposed headers. exposed_headers = ', '.join(cls.meta.http_exposed_headers) if exposed_headers: response['Access-Control-Expose-Headers'] = exposed_headers
def dispatch(self, request, response): """Entry-point of the dispatch cycle for this resource. Performs common work such as authentication, decoding, etc. before handing complete control of the result to a function with the same name as the request method. """ # Assert authentication and attempt to get a valid user object. self.require_authentication(request) # Assert accessibiltiy of the resource in question. self.require_accessibility(request.user, request.method) # Facilitate CORS by applying various headers. # This must be done on every request. # TODO: Provide cross_domain configuration that turns this off. self._process_cross_domain_request(request, response) # Route the HTTP/1.1 request to an appropriate method. return self.route(request, response)
def require_authentication(self, request): """Ensure we are authenticated.""" request.user = user = None if request.method == 'OPTIONS': # Authentication should not be checked on an OPTIONS request. return for auth in self.meta.authentication: user = auth.authenticate(request) if user is False: # Authentication protocol failed to authenticate; # pass the baton. continue if user is None and not auth.allow_anonymous: # Authentication protocol determined the user is # unauthenticated. auth.unauthenticated() # Authentication protocol determined the user is indeed # authenticated (or not); Store the user for later reference. request.user = user return if not user and not auth.allow_anonymous: # No authenticated user found and protocol doesn't allow # anonymous users. auth.unauthenticated()
def require_accessibility(self, user, method): """Ensure we are allowed to access this resource.""" if method == 'OPTIONS': # Authorization should not be checked on an OPTIONS request. return authz = self.meta.authorization if not authz.is_accessible(user, method, self): # User is not authorized; raise an appropriate message. authz.unaccessible()