Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def count_cycles(series, ndigits=None, left=False, right=False):
"""Count cycles in the series.
Parameters
----------
series : iterable sequence of numbers
ndigits : int, optional
Round cycle magnitudes to the given number of digits before counting.
left: bool, optional
If True, treat the first point in the series as a reversal.
right: bool, optional
If True, treat the last point in the series as a reversal.
Returns
-------
A sorted list containing pairs of cycle magnitude and count.
One-half cycles are counted as 0.5, so the returned counts may not be
whole numbers.
"""
counts = defaultdict(float)
round_ = _get_round_function(ndigits)
for low, high, mult in extract_cycles(series, left=left, right=right):
delta = round_(abs(high - low))
counts[delta] += mult
return sorted(counts.items()) |
def render(node, strict=False):
"""Recipe to render a given FST node.
The FST is composed of branch nodes which are either lists or dicts
and of leaf nodes which are strings. Branch nodes can have other
list, dict or leaf nodes as childs.
To render a string, simply output it. To render a list, render each
of its elements in order. To render a dict, you must follow the
node's entry in the nodes_rendering_order dictionary and its
dependents constraints.
This function hides all this algorithmic complexity by returning
a structured rendering recipe, whatever the type of node. But even
better, you should subclass the RenderWalker which simplifies
drastically working with the rendered FST.
The recipe is a list of steps, each step correspond to a child and is actually a 3-uple composed of the following fields:
- `key_type` is a string determining the type of the child in the second field (`item`) of the tuple. It can be one of:
- 'constant': the child is a string
- 'node': the child is a dict
- 'key': the child is an element of a dict
- 'list': the child is a list
- 'formatting': the child is a list specialized in formatting
- `item` is the child itself: either a string, a dict or a list.
- `render_key` gives the key used to access this child from the parent node. It's a string if the node is a dict or a number if its a list.
Please note that "bool" `key_types` are never rendered, that's why
they are not shown here.
"""
if isinstance(node, list):
return render_list(node)
elif isinstance(node, dict):
return render_node(node, strict=strict)
else:
raise NotImplementedError("You tried to render a %s. Only list and dicts can be rendered." % node.__class__.__name__) |
def path_to_node(tree, path):
"""FST node located at the given path"""
if path is None:
return None
node = tree
for key in path:
node = child_by_key(node, key)
return node |
def before_constant(self, constant, key):
"""Determine if we're on the targetted node.
If the targetted column is reached, `stop` and `path_found` are
set. If the targetted line is passed, only `stop` is set. This
prevents unnecessary tree travelling when the targetted column
is out of bounds.
"""
newlines_split = split_on_newlines(constant)
for c in newlines_split:
if is_newline(c):
self.current.advance_line()
# if target line is passed
if self.current.line > self.target.line:
return self.STOP
else:
advance_by = len(c)
if self.is_on_targetted_node(advance_by):
self.found_path = deepcopy(self.current_path)
return self.STOP
self.current.advance_columns(advance_by) |
def get_prefix(multicodec):
"""
Returns prefix for a given multicodec
:param str multicodec: multicodec codec name
:return: the prefix for the given multicodec
:rtype: byte
:raises ValueError: if an invalid multicodec name is provided
"""
try:
prefix = varint.encode(NAME_TABLE[multicodec])
except KeyError:
raise ValueError('{} multicodec is not supported.'.format(multicodec))
return prefix |
def add_prefix(multicodec, bytes_):
"""
Adds multicodec prefix to the given bytes input
:param str multicodec: multicodec to use for prefixing
:param bytes bytes_: data to prefix
:return: prefixed byte data
:rtype: bytes
"""
prefix = get_prefix(multicodec)
return b''.join([prefix, bytes_]) |
def remove_prefix(bytes_):
"""
Removes prefix from a prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: prefix removed data bytes
:rtype: bytes
"""
prefix_int = extract_prefix(bytes_)
prefix = varint.encode(prefix_int)
return bytes_[len(prefix):] |
def get_codec(bytes_):
"""
Gets the codec used for prefix the multicodec prefixed data
:param bytes bytes_: multicodec prefixed data bytes
:return: name of the multicodec used to prefix
:rtype: str
"""
prefix = extract_prefix(bytes_)
try:
return CODE_TABLE[prefix]
except KeyError:
raise ValueError('Prefix {} not present in the lookup table'.format(prefix)) |
def capture(
target_url,
user_agent="archiveis (https://github.com/pastpages/archiveis)",
proxies={}
):
"""
Archives the provided URL using archive.is
Returns the URL where the capture is stored.
"""
# Put together the URL that will save our request
domain = "http://archive.vn"
save_url = urljoin(domain, "/submit/")
# Configure the request headers
headers = {
'User-Agent': user_agent,
"host": "archive.vn",
}
# Request a unique identifier for our activity
logger.debug("Requesting {}".format(domain + "/"))
get_kwargs = dict(
timeout=120,
allow_redirects=True,
headers=headers,
)
if proxies:
get_kwargs['proxies'] = proxies
response = requests.get(domain + "/", **get_kwargs)
response.raise_for_status()
# It will need to be parsed from the homepage response headers
html = str(response.content)
try:
unique_id = html.split('name="submitid', 1)[1].split('value="', 1)[1].split('"', 1)[0]
logger.debug("Unique identifier: {}".format(unique_id))
except IndexError:
logger.warn("Unable to extract unique identifier from archive.is. Submitting without it.")
unique_id = None
# Send the capture request to archive.is with the unique id included
data = {
"url": target_url,
"anyway": 1,
}
if unique_id:
data.update({"submitid": unique_id})
post_kwargs = dict(
timeout=120,
allow_redirects=True,
headers=headers,
data=data
)
if proxies:
post_kwargs['proxies'] = proxies
logger.debug("Requesting {}".format(save_url))
response = requests.post(save_url, **post_kwargs)
response.raise_for_status()
# There are a couple ways the header can come back
if 'Refresh' in response.headers:
memento = str(response.headers['Refresh']).split(';url=')[1]
logger.debug("Memento from Refresh header: {}".format(memento))
return memento
if 'Location' in response.headers:
memento = response.headers['Location']
logger.debug("Memento from Location header: {}".format(memento))
return memento
logger.debug("Memento not found in response headers. Inspecting history.")
for i, r in enumerate(response.history):
logger.debug("Inspecting history request #{}".format(i))
logger.debug(r.headers)
if 'Location' in r.headers:
memento = r.headers['Location']
logger.debug("Memento from the Location header of {} history response: {}".format(i+1, memento))
return memento
# If there's nothing at this point, throw an error
logger.error("No memento returned by archive.is")
logger.error("Status code: {}".format(response.status_code))
logger.error(response.headers)
logger.error(response.text)
raise Exception("No memento returned by archive.is") |
def cli(url, user_agent):
"""
Archives the provided URL using archive.is.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent
archive_url = capture(url, **kwargs)
click.echo(archive_url) |
def get_channel_image(self, channel, img_size=300, skip_cache=False):
"""Get the logo for a channel"""
from bs4 import BeautifulSoup
from wikipedia.exceptions import PageError
import re
import wikipedia
wikipedia.set_lang('fr')
if not channel:
_LOGGER.error('Channel is not set. Could not retrieve image.')
return
# Check if the image is in cache
if channel in self._cache_channel_img and not skip_cache:
img = self._cache_channel_img[channel]
_LOGGER.debug('Cache hit: %s -> %s', channel, img)
return img
channel_info = self.get_channel_info(channel)
query = channel_info['wiki_page']
if not query:
_LOGGER.debug('Wiki page is not set for channel %s', channel)
return
_LOGGER.debug('Query: %s', query)
# If there is a max image size defined use it.
if 'max_img_size' in channel_info:
if img_size > channel_info['max_img_size']:
_LOGGER.info(
'Requested image size is bigger than the max, '
'setting it to %s', channel_info['max_img_size']
)
img_size = channel_info['max_img_size']
try:
page = wikipedia.page(query)
_LOGGER.debug('Wikipedia article title: %s', page.title)
soup = BeautifulSoup(page.html(), 'html.parser')
images = soup.find_all('img')
img_src = None
for i in images:
if i['alt'].startswith('Image illustrative'):
img_src = re.sub(r'\d+px', '{}px'.format(img_size),
i['src'])
img = 'https:{}'.format(img_src) if img_src else None
# Cache result
self._cache_channel_img[channel] = img
return img
except PageError:
_LOGGER.error('Could not fetch channel image for %s', channel) |
def press_key(self, key, mode=0):
'''
modes:
0 -> simple press
1 -> long press
2 -> release after long press
'''
if isinstance(key, str):
assert key in KEYS, 'No such key: {}'.format(key)
key = KEYS[key]
_LOGGER.info('Press key %s', self.__get_key_name(key))
return self.rq('01', OrderedDict([('key', key), ('mode', mode)])) |
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
for translation in orm['people.PersonTranslation'].objects.all():
if translation.language in ['en', 'de']:
translation.roman_first_name = translation.first_name
translation.roman_last_name = translation.last_name
else:
translation.non_roman_first_name = translation.first_name
translation.non_roman_last_name = translation.last_name
translation.save() |
def forwards(self, orm):
"Write your forwards methods here."
for translation in orm['people.PersonTranslation'].objects.all():
translation.person.roman_first_name = translation.roman_first_name
translation.person.roman_last_name = translation.roman_last_name
translation.person.non_roman_first_name = translation.non_roman_first_name
translation.person.non_roman_last_name = translation.non_roman_last_name
translation.person.save() |
def parse(self, scope):
"""Parse block node.
args:
scope (Scope): Current scope
raises:
SyntaxError
returns:
self
"""
if not self.parsed:
scope.push()
self.name, inner = self.tokens
scope.current = self.name
scope.real.append(self.name)
if not self.name.parsed:
self.name.parse(scope)
if not inner:
inner = []
inner = list(utility.flatten([p.parse(scope) for p in inner if p]))
self.parsed = []
self.inner = []
if not hasattr(self, "inner_media_queries"):
self.inner_media_queries = []
for p in inner:
if p is not None:
if isinstance(p, Block):
if (len(scope) == 2 and p.tokens[1] is not None):
p_is_mediaquery = p.name.tokens[0] == '@media'
# Inner block @media ... { ... } is a nested media
# query. But double-nested media queries have to be
# removed and marked as well. While parsing ".foo",
# both nested "@media print" and double-nested
# "@media all" will be handled as we have to
# re-arrange the scope and block layout quite a bit:
#
# .foo {
# @media print {
# color: blue;
# @media screen { font-size: 12em; }
# }
# }
#
# Expected result:
#
# @media print {
# .foo { color: blue; }
# }
# @media print and screen {
# .foo { font-size: 12 em; }
# }
append_list = []
reparse_p = False
for child in p.tokens[1]:
if isinstance(child, Block) and child.name.raw(
).startswith("@media"):
# Remove child from the nested media query, it will be re-added to
# the parent with 'merged' media query (see above example).
p.tokens[1].remove(child)
if p_is_mediaquery: # Media query inside a & block
# Double-nested media query found. We remove it from 'p' and add
# it to this block with a new 'name'.
reparse_p = True
part_a = p.name.tokens[2:][0][0][0]
part_b = child.name.tokens[2:][0][0]
new_ident_tokens = [
'@media', ' ', [
part_a, (' ', 'and', ' '),
part_b
]
]
# Parse child again with new @media $BLA {} part
child.tokens[0] = Identifier(
new_ident_tokens)
child.parsed = None
child = child.parse(scope)
else:
child.block_name = p.name
append_list.append(child)
if reparse_p:
p.parsed = None
p = p.parse(scope)
if not p_is_mediaquery and not append_list:
self.inner.append(p)
else:
append_list.insert(
0, p
) # This media query should occur before it's children
for media_query in append_list:
self.inner_media_queries.append(
media_query)
# NOTE(saschpe): The code is not recursive but we hope that people
# wont use triple-nested media queries.
else:
self.inner.append(p)
else:
self.parsed.append(p)
if self.inner_media_queries:
# Nested media queries, we have to remove self from scope and
# push all nested @media ... {} blocks.
scope.remove_block(self, index=-2)
for mb in self.inner_media_queries:
# New inner block with current name and media block contents
if hasattr(mb, 'block_name'):
cb_name = mb.block_name
else:
cb_name = self.tokens[0]
cb = Block([cb_name, mb.tokens[1]]).parse(scope)
# Replace inner block contents with new block
new_mb = Block([mb.tokens[0], [cb]]).parse(scope)
self.inner.append(new_mb)
scope.add_block(new_mb)
scope.real.pop()
scope.pop()
return self |
def raw(self, clean=False):
"""Raw block name
args:
clean (bool): clean name
returns:
str
"""
try:
return self.tokens[0].raw(clean)
except (AttributeError, TypeError):
pass |
def fmt(self, fills):
"""Format block (CSS)
args:
fills (dict): Fill elements
returns:
str (CSS)
"""
f = "%(identifier)s%(ws)s{%(nl)s%(proplist)s}%(eb)s"
out = []
name = self.name.fmt(fills)
if self.parsed and any(
p for p in self.parsed
if str(type(p)) != "<class 'lesscpy.plib.variable.Variable'>"):
fills.update({
'identifier':
name,
'proplist':
''.join([p.fmt(fills) for p in self.parsed if p]),
})
out.append(f % fills)
if hasattr(self, 'inner'):
if self.name.subparse and len(self.inner) > 0: # @media
inner = ''.join([p.fmt(fills) for p in self.inner])
inner = inner.replace(fills['nl'],
fills['nl'] + fills['tab']).rstrip(
fills['tab'])
if not fills['nl']:
inner = inner.strip()
fills.update({
'identifier': name,
'proplist': fills['tab'] + inner
})
out.append(f % fills)
else:
out.append(''.join([p.fmt(fills) for p in self.inner]))
return ''.join(out) |
def copy(self):
""" Return a full copy of self
returns: Block object
"""
name, inner = self.tokens
if inner:
inner = [u.copy() if u else u for u in inner]
if name:
name = name.copy()
return Block([name, inner], 0) |
def copy_inner(self, scope):
"""Copy block contents (properties, inner blocks).
Renames inner block from current scope.
Used for mixins.
args:
scope (Scope): Current scope
returns:
list (block contents)
"""
if self.tokens[1]:
tokens = [u.copy() if u else u for u in self.tokens[1]]
out = [p for p in tokens if p]
utility.rename(out, scope, Block)
return out
return None |
def parse(self, scope):
"""Parse node
args:
scope (Scope): current scope
raises:
SyntaxError
returns:
self
"""
self.parsed = list(utility.flatten(self.tokens))
if self.parsed[0] == '@import':
if len(self.parsed) > 4:
# Media @import
self.parsed.insert(3, ' ')
return self |
def parse(self, scope, error=False, depth=0):
""" Parse function. We search for mixins
first within current scope then fallback
to global scope. The special scope.deferred
is used when local scope mixins are called
within parent mixins.
If nothing is found we fallback to block-mixin
as lessc.js allows calls to blocks and mixins to
be inter-changable.
clx: This method is a HACK that stems from
poor design elsewhere. I will fix it
when I have more time.
args:
scope (Scope): Current scope
returns:
mixed
"""
res = False
ident, args = self.tokens
ident.parse(scope)
mixins = scope.mixins(ident.raw())
if not mixins:
ident.parse(None)
mixins = scope.mixins(ident.raw())
if depth > 64:
raise SyntaxError('NameError `%s`' % ident.raw(True))
if not mixins:
if scope.deferred:
store = [t for t in scope.deferred.parsed[-1]]
i = 0
while scope.deferred.parsed[-1]:
scope.current = scope.deferred
ident.parse(scope)
mixins = scope.mixins(ident.raw())
scope.current = None
if mixins or i > 64:
break
scope.deferred.parsed[-1].pop()
i += 1
scope.deferred.parsed[-1] = store
if not mixins:
# Fallback to blocks
block = scope.blocks(ident.raw())
if not block:
ident.parse(None)
block = scope.blocks(ident.raw())
if block:
scope.current = scope.real[-1] if scope.real else None
res = block.copy_inner(scope)
scope.current = None
if mixins:
for mixin in mixins:
scope.current = scope.real[-1] if scope.real else None
res = mixin.call(scope, args)
if res:
# Add variables to scope to support
# closures
[scope.add_variable(v) for v in mixin.vars]
scope.deferred = ident
break
if res:
store = [t for t in scope.deferred.parsed[-1]
] if scope.deferred else False
tmp_res = []
for p in res:
if p:
if isinstance(p, Deferred):
tmp_res.append(p.parse(scope, depth=depth + 1))
else:
tmp_res.append(p.parse(scope))
res = tmp_res
#res = [p.parse(scope, depth=depth+1) for p in res if p]
while (any(t for t in res if isinstance(t, Deferred))):
res = [p.parse(scope) for p in res if p]
if store:
scope.deferred.parsed[-1] = store
if error and not res:
raise SyntaxError('NameError `%s`' % ident.raw(True))
return res |
def ldirectory(inpath, outpath, args, scope):
"""Compile all *.less files in directory
Args:
inpath (str): Path to compile
outpath (str): Output directory
args (object): Argparse Object
scope (Scope): Scope object or None
"""
yacctab = 'yacctab' if args.debug else None
if not outpath:
sys.exit("Compile directory option needs -o ...")
else:
if not os.path.isdir(outpath):
if args.verbose:
print("Creating '%s'" % outpath, file=sys.stderr)
if not args.dry_run:
os.mkdir(outpath)
less = glob.glob(os.path.join(inpath, '*.less'))
f = formatter.Formatter(args)
for lf in less:
outf = os.path.splitext(os.path.basename(lf))
minx = '.min' if args.min_ending else ''
outf = "%s/%s%s.css" % (outpath, outf[0], minx)
if not args.force and os.path.exists(outf):
recompile = os.path.getmtime(outf) < os.path.getmtime(lf)
else:
recompile = True
if recompile:
print('%s -> %s' % (lf, outf))
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
scope=scope,
tabfile=yacctab,
verbose=args.verbose)
p.parse(filename=lf, debuglevel=0)
css = f.format(p)
if not args.dry_run:
with open(outf, 'w') as outfile:
outfile.write(css)
elif args.verbose:
print('skipping %s, not modified' % lf, file=sys.stderr)
sys.stdout.flush()
if args.recurse:
[
ldirectory(
os.path.join(inpath, name), os.path.join(outpath, name), args,
scope) for name in os.listdir(inpath)
if os.path.isdir(os.path.join(inpath, name))
and not name.startswith('.') and not name == outpath
] |
def run():
"""Run compiler
"""
aparse = argparse.ArgumentParser(
description='LessCss Compiler', epilog='<< [email protected] @_o >>')
aparse.add_argument(
'-v', '--version', action='version', version=VERSION_STR)
aparse.add_argument(
'-I',
'--include',
action="store",
type=str,
help="Included less-files (comma separated)")
aparse.add_argument(
'-V',
'--verbose',
action="store_true",
default=False,
help="Verbose mode")
aparse.add_argument(
'-C',
'--dont_create_dirs',
action="store_true",
default=False,
help="Creates directories when outputing files (lessc non-compatible)")
fgroup = aparse.add_argument_group('Formatting options')
fgroup.add_argument(
'-x',
'--minify',
action="store_true",
default=False,
help="Minify output")
fgroup.add_argument(
'-X',
'--xminify',
action="store_true",
default=False,
help="Minify output, no end of block newlines")
fgroup.add_argument('-t', '--tabs', help="Use tabs", action="store_true")
fgroup.add_argument(
'-s',
'--spaces',
help="Number of startline spaces (default 2)",
default=2)
dgroup = aparse.add_argument_group(
'Directory options', 'Compiles all *.less files in directory that '
'have a newer timestamp than it\'s css file.')
dgroup.add_argument('-o', '--out', action="store", help="Output directory")
dgroup.add_argument(
'-r',
'--recurse',
action="store_true",
help="Recursive into subdirectorys")
dgroup.add_argument(
'-f',
'--force',
action="store_true",
help="Force recompile on all files")
dgroup.add_argument(
'-m',
'--min-ending',
action="store_true",
default=False,
help="Add '.min' into output filename. eg, name.min.css")
dgroup.add_argument(
'-D',
'--dry-run',
action="store_true",
default=False,
help="Dry run, do not write files")
group = aparse.add_argument_group('Debugging')
group.add_argument(
'-g',
'--debug',
action="store_true",
default=False,
help="Debugging information")
group.add_argument(
'-S',
'--scopemap',
action="store_true",
default=False,
help="Scopemap")
group.add_argument(
'-L',
'--lex-only',
action="store_true",
default=False,
help="Run lexer on target")
group.add_argument(
'-N',
'--no-css',
action="store_true",
default=False,
help="No css output")
aparse.add_argument('target', help="less file or directory")
aparse.add_argument('output', nargs='?', help="output file path")
args = aparse.parse_args()
try:
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
if args.lex_only:
lex = lexer.LessLexer()
ll = lex.file(args.target)
while True:
tok = ll.token()
if not tok:
break
if hasattr(tok,
"lexer"): # literals don't have the lexer attribute
print(tok, "State:", tok.lexer.lexstate)
else:
print(tok)
print('EOF')
sys.exit()
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
yacctab = 'yacctab' if args.debug else None
scope = None
if args.include:
for u in args.include.split(','):
if os.path.exists(u):
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
tabfile=yacctab,
verbose=args.verbose)
p.parse(filename=u, debuglevel=args.debug)
if not scope:
scope = p.scope
else:
scope.update(p.scope)
else:
sys.exit('included file `%s` not found ...' % u)
sys.stdout.flush()
p = None
f = formatter.Formatter(args)
if not os.path.exists(args.target):
sys.exit("Target not found '%s' ..." % args.target)
if os.path.isdir(args.target):
ldirectory(args.target, args.out, args, scope)
if args.dry_run:
print('Dry run, nothing done.', file=sys.stderr)
else:
p = parser.LessParser(
yacc_debug=(args.debug),
lex_optimize=True,
yacc_optimize=(not args.debug),
scope=copy.deepcopy(scope),
verbose=args.verbose)
p.parse(filename=args.target, debuglevel=args.debug)
if args.scopemap:
args.no_css = True
p.scopemap()
if not args.no_css and p:
out = f.format(p)
if args.output:
if not args.dont_create_dirs and not os.path.exists(
os.path.dirname(args.output)):
try:
os.makedirs(os.path.dirname(args.output))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(args.output, "w") as f:
f.write(out)
else:
print(out)
except (KeyboardInterrupt, SystemExit, IOError):
sys.exit('\nAborting...') |
def parse(self, scope):
"""Parse node.
args:
scope (Scope): Current scope
raises:
SyntaxError
returns:
self
"""
self.keyframe, = [
e[0] if isinstance(e, tuple) else e for e in self.tokens
if str(e).strip()
]
self.subparse = False
return self |
def t_mediaquery_t_semicolon(self, t):
r';'
# This can happen only as part of a CSS import statement. The
# "mediaquery" state is reused there. Ordinary media queries always
# end at '{', i.e. when a block is opened.
t.lexer.pop_state() # state mediaquery
# We have to pop the 'import' state here because we already ate the
# t_semicolon and won't trigger t_import_t_semicolon.
t.lexer.pop_state() # state import
return t |
def t_less_variable(self, t):
r'@@?[\w-]+|@\{[^@\}]+\}'
v = t.value.lower()
if v in reserved.tokens:
t.type = reserved.tokens[v]
if t.type == "css_media":
t.lexer.push_state("mediaquery")
elif t.type == "css_import":
t.lexer.push_state("import")
return t |
def t_t_eopen(self, t):
r'~"|~\''
if t.value[1] == '"':
t.lexer.push_state('escapequotes')
elif t.value[1] == '\'':
t.lexer.push_state('escapeapostrophe')
return t |
def t_css_string(self, t):
r'"[^"@]*"|\'[^\'@]*\''
t.lexer.lineno += t.value.count('\n')
return t |
def t_t_isopen(self, t):
r'"|\''
if t.value[0] == '"':
t.lexer.push_state('istringquotes')
elif t.value[0] == '\'':
t.lexer.push_state('istringapostrophe')
return t |
def t_istringapostrophe_css_string(self, t):
r'[^\'@]+'
t.lexer.lineno += t.value.count('\n')
return t |
def t_istringquotes_css_string(self, t):
r'[^"@]+'
t.lexer.lineno += t.value.count('\n')
return t |
def file(self, filename):
"""
Lex file.
"""
with open(filename) as f:
self.lexer.input(f.read())
return self |
def input(self, file):
"""
Load lexer with content from `file` which can be a path or a file
like object.
"""
if isinstance(file, string_types):
with open(file) as f:
self.lexer.input(f.read())
else:
self.lexer.input(file.read()) |
def token(self):
"""
Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing.
"""
if self.next_:
t = self.next_
self.next_ = None
return t
while True:
t = self.lexer.token()
if not t:
return t
if t.type == 't_ws' and (
self.pretok or
(self.last and self.last.type not in self.significant_ws)):
continue
self.pretok = False
if t.type == 't_bclose' and self.last and self.last.type not in ['t_bopen', 't_bclose'] and self.last.type != 't_semicolon' \
and not (hasattr(t, 'lexer') and (t.lexer.lexstate == 'escapequotes' or t.lexer.lexstate == 'escapeapostrophe')):
self.next_ = t
tok = lex.LexToken()
tok.type = 't_semicolon'
tok.value = ';'
tok.lineno = t.lineno
tok.lexpos = t.lexpos
self.last = tok
self.lexer.in_property_decl = False
return tok
self.last = t
break
return t |
def parse(self, scope):
"""Parse node. Block identifiers are stored as
strings with spaces replaced with ?
args:
scope (Scope): Current scope
raises:
SyntaxError
returns:
self
"""
names = []
name = []
self._subp = ('@media', '@keyframes', '@-moz-keyframes',
'@-webkit-keyframes', '@-ms-keyframes')
if self.tokens and hasattr(self.tokens, 'parse'):
self.tokens = list(
utility.flatten([
id.split() + [',']
for id in self.tokens.parse(scope).split(',')
]))
self.tokens.pop()
if self.tokens and any(hasattr(t, 'parse') for t in self.tokens):
tmp_tokens = []
for t in self.tokens:
if hasattr(t, 'parse'):
tmp_tokens.append(t.parse(scope))
else:
tmp_tokens.append(t)
self.tokens = list(utility.flatten(tmp_tokens))
if self.tokens and self.tokens[0] in self._subp:
name = list(utility.flatten(self.tokens))
self.subparse = True
else:
self.subparse = False
for n in utility.flatten(self.tokens):
if n == '*':
name.append('* ')
elif n in '>+~':
if name and name[-1] == ' ':
name.pop()
name.append('?%s?' % n)
elif n == ',':
names.append(name)
name = []
else:
name.append(n)
names.append(name)
parsed = self.root(scope, names) if scope else names
# Interpolated selectors need another step, we have to replace variables. Avoid reserved words though
#
# Example: '.@{var}' results in [['.', '@{var}']]
# But: '@media print' results in [['@media', ' ', 'print']]
#
def replace_variables(tokens, scope):
return [
scope.swap(t)
if (utility.is_variable(t) and not t in reserved.tokens) else t
for t in tokens
]
parsed = [
list(utility.flatten(replace_variables(part, scope)))
for part in parsed
]
self.parsed = [[
i for i, j in utility.pairwise(part)
if i != ' ' or (j and '?' not in j)
] for part in parsed]
return self |
def root(self, scope, names):
"""Find root of identifier, from scope
args:
scope (Scope): current scope
names (list): identifier name list (, separated identifiers)
returns:
list
"""
parent = scope.scopename
if parent:
parent = parent[-1]
if parent.parsed:
parsed_names = []
for name in names:
ampersand_count = name.count('&')
if ampersand_count:
filtered_parts = []
for part in parent.parsed:
if part and part[0] not in self._subp:
filtered_parts.append(part)
permutations = list(
utility.permutations_with_replacement(
filtered_parts, ampersand_count))
for permutation in permutations:
parsed = []
for name_part in name:
if name_part == "&":
parent_part = permutation.pop(0)
if parsed and parsed[-1].endswith(']'):
parsed.extend(' ')
if parent_part[-1] == ' ':
parent_part.pop()
parsed.extend(parent_part)
else:
parsed.append(name_part)
parsed_names.append(parsed)
else:
# NOTE(saschpe): Maybe this code can be expressed with permutations too?
for part in parent.parsed:
if part and part[0] not in self._subp:
parsed = []
if name[0] == "@media":
parsed.extend(name)
else:
parsed.extend(part)
if part[-1] != ' ':
parsed.append(' ')
parsed.extend(name)
parsed_names.append(parsed)
else:
parsed_names.append(name)
return parsed_names
return names |
def raw(self, clean=False):
"""Raw identifier.
args:
clean (bool): clean name
returns:
str
"""
if clean:
return ''.join(''.join(p) for p in self.parsed).replace('?', ' ')
return '%'.join('%'.join(p) for p in self.parsed).strip().strip('%') |
def copy(self):
""" Return copy of self
Returns:
Identifier object
"""
tokens = ([t for t in self.tokens]
if isinstance(self.tokens, list) else self.tokens)
return Identifier(tokens, 0) |
def fmt(self, fills):
"""Format identifier
args:
fills (dict): replacements
returns:
str (CSS)
"""
name = ',$$'.join(''.join(p).strip() for p in self.parsed)
name = re.sub('\?(.)\?', '%(ws)s\\1%(ws)s', name) % fills
return name.replace('$$', fills['nl']).replace(' ', ' ') |
def add_block(self, block):
"""Add block element to scope
Args:
block (Block): Block object
"""
self[-1]['__blocks__'].append(block)
self[-1]['__names__'].append(block.raw()) |
def remove_block(self, block, index="-1"):
"""Remove block element from scope
Args:
block (Block): Block object
"""
self[index]["__blocks__"].remove(block)
self[index]["__names__"].remove(block.raw()) |
def add_mixin(self, mixin):
"""Add mixin to scope
Args:
mixin (Mixin): Mixin object
"""
raw = mixin.tokens[0][0].raw()
if raw in self._mixins:
self._mixins[raw].append(mixin)
else:
self._mixins[raw] = [mixin] |
def variables(self, name):
"""Search for variable by name. Searches scope top down
Args:
name (string): Search term
Returns:
Variable object OR False
"""
if isinstance(name, tuple):
name = name[0]
if name.startswith('@{'):
name = '@' + name[2:-1]
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__variables__']:
return self[i]['__variables__'][name]
return False |
def mixins(self, name):
""" Search mixins for name.
Allow '>' to be ignored. '.a .b()' == '.a > .b()'
Args:
name (string): Search term
Returns:
Mixin object list OR False
"""
m = self._smixins(name)
if m:
return m
return self._smixins(name.replace('?>?', ' ')) |
def _smixins(self, name):
"""Inner wrapper to search for mixins by name.
"""
return (self._mixins[name] if name in self._mixins else False) |
def blocks(self, name):
"""
Search for defined blocks recursively.
Allow '>' to be ignored. '.a .b' == '.a > .b'
Args:
name (string): Search term
Returns:
Block object OR False
"""
b = self._blocks(name)
if b:
return b
return self._blocks(name.replace('?>?', ' ')) |
def _blocks(self, name):
"""Inner wrapper to search for blocks by name.
"""
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__names__']:
for b in self[i]['__blocks__']:
r = b.raw()
if r and r == name:
return b
else:
for b in self[i]['__blocks__']:
r = b.raw()
if r and name.startswith(r):
b = utility.blocksearch(b, name)
if b:
return b
return False |
def update(self, scope, at=0):
"""Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update
"""
if hasattr(scope, '_mixins') and not at:
self._mixins.update(scope._mixins)
self[at]['__variables__'].update(scope[at]['__variables__'])
self[at]['__blocks__'].extend(scope[at]['__blocks__'])
self[at]['__names__'].extend(scope[at]['__names__']) |
def swap(self, name):
""" Swap variable name for variable value
Args:
name (str): Variable name
Returns:
Variable value (Mixed)
"""
if name.startswith('@@'):
var = self.variables(name[1:])
if var is False:
raise SyntaxError('Unknown variable %s' % name)
name = '@' + utility.destring(var.value[0])
var = self.variables(name)
if var is False:
raise SyntaxError('Unknown variable %s' % name)
elif name.startswith('@{'):
var = self.variables('@' + name[2:-1])
if var is False:
raise SyntaxError('Unknown escaped variable %s' % name)
if isinstance(var.value[0], string_types):
var.value[0] = utility.destring(var.value[0])
else:
var = self.variables(name)
if var is False:
raise SyntaxError('Unknown variable %s' % name)
return var.value |
def process(self, tokens, scope):
""" Process tokenslist, flattening and parsing it
args:
tokens (list): tokenlist
scope (Scope): Current scope
returns:
list
"""
while True:
tokens = list(utility.flatten(tokens))
done = True
if any(t for t in tokens if hasattr(t, 'parse')):
tokens = [
t.parse(scope) if hasattr(t, 'parse') else t
for t in tokens
]
done = False
if any(
t for t in tokens
if (utility.is_variable(t)) or str(type(t)) ==
"<class 'lesscpy.plib.variable.Variable'>"):
tokens = self.replace_variables(tokens, scope)
done = False
if done:
break
return tokens |
def replace_variables(self, tokens, scope):
""" Replace variables in tokenlist
args:
tokens (list): tokenlist
scope (Scope): Current scope
returns:
list
"""
list = []
for t in tokens:
if utility.is_variable(t):
list.append(scope.swap(t))
elif str(type(t)) == "<class 'lesscpy.plib.variable.Variable'>":
list.append(scope.swap(t.name))
else:
list.append(t)
return list |
def parse(self, scope):
"""Parse node
args:
scope (Scope): current scope
raises:
SyntaxError
returns:
self
"""
if not self.parsed:
if len(self.tokens) > 2:
property, style, _ = self.tokens
self.important = True
else:
property, style = self.tokens
self.important = False
self.property = ''.join(property)
self.parsed = []
if style:
style = self.preprocess(style)
self.parsed = self.process(style, scope)
return self |
def preprocess(self, style):
"""Hackish preprocessing from font shorthand tags.
Skips expression parse on certain tags.
args:
style (list): .
returns:
list
"""
if self.property == 'font':
style = [
''.join(u.expression()) if hasattr(u, 'expression') else u
for u in style
]
else:
style = [(u, ' ') if hasattr(u, 'expression') else u
for u in style]
return style |
def fmt(self, fills):
""" Format node
args:
fills (dict): replacements
returns:
str
"""
f = "%(tab)s%(property)s:%(ws)s%(style)s%(important)s;%(nl)s"
imp = ' !important' if self.important else ''
if fills['nl']:
self.parsed = [
',%s' % fills['ws'] if p == ',' else p for p in self.parsed
]
style = ''.join([
p.fmt(fills) if hasattr(p, 'fmt') else str(p) for p in self.parsed
])
# IE cannot handle no space after url()
style = re.sub("(url\([^\)]*\))([^\s,])", "\\1 \\2", style)
fills.update({
'property': self.property,
'style': style.strip(),
'important': imp
})
return f % fills |
def parse(self, scope):
"""Parse node
args:
scope (Scope): current scope
raises:
SyntaxError
returns:
self
"""
self.name, args, self.guards = self.tokens[0]
self.args = [a for a in utility.flatten(args) if a]
self.body = Block([None, self.tokens[1]], 0)
self.vars = list(
utility.flatten([
list(v.values()) for v in [s['__variables__'] for s in scope]
]))
return self |
def parse_args(self, args, scope):
"""Parse arguments to mixin. Add them to scope
as variables. Sets upp special variable @arguments
as well.
args:
args (list): arguments
scope (Scope): current scope
raises:
SyntaxError
"""
arguments = list(zip(args,
[' '] * len(args))) if args and args[0] else None
zl = itertools.zip_longest if sys.version_info[
0] == 3 else itertools.izip_longest
if self.args:
parsed = [
v if hasattr(v, 'parse') else v for v in copy.copy(self.args)
]
args = args if isinstance(args, list) else [args]
vars = [
self._parse_arg(var, arg, scope)
for arg, var in zl([a for a in args], parsed)
]
for var in vars:
if var:
var.parse(scope)
if not arguments:
arguments = [v.value for v in vars if v]
if not arguments:
arguments = ''
Variable(['@arguments', None, arguments]).parse(scope) |
def _parse_arg(self, var, arg, scope):
""" Parse a single argument to mixin.
args:
var (Variable object): variable
arg (mixed): argument
scope (Scope object): current scope
returns:
Variable object or None
"""
if isinstance(var, Variable):
# kwarg
if arg:
if utility.is_variable(arg[0]):
tmp = scope.variables(arg[0])
if not tmp:
return None
val = tmp.value
else:
val = arg
var = Variable(var.tokens[:-1] + [val])
else:
# arg
if utility.is_variable(var):
if arg is None:
raise SyntaxError('Missing argument to mixin')
elif utility.is_variable(arg[0]):
tmp = scope.variables(arg[0])
if not tmp:
return None
val = tmp.value
else:
val = arg
var = Variable([var, None, val])
else:
return None
return var |
def parse_guards(self, scope):
"""Parse guards on mixin.
args:
scope (Scope): current scope
raises:
SyntaxError
returns:
bool (passes guards)
"""
if self.guards:
cor = True if ',' in self.guards else False
for g in self.guards:
if isinstance(g, list):
res = (g[0].parse(scope)
if len(g) == 1 else Expression(g).parse(scope))
if cor:
if res:
return True
elif not res:
return False
return True |
def call(self, scope, args=[]):
"""Call mixin. Parses a copy of the mixins body
in the current scope and returns it.
args:
scope (Scope): current scope
args (list): arguments
raises:
SyntaxError
returns:
list or False
"""
ret = False
if args:
args = [[
a.parse(scope) if isinstance(a, Expression) else a for a in arg
] if arg else arg for arg in args]
try:
self.parse_args(args, scope)
except SyntaxError:
pass
else:
if self.parse_guards(scope):
body = self.body.copy()
ret = body.tokens[1]
if ret:
utility.rename(ret, scope, Block)
return ret |
def parse(self, scope):
""" Parse function
args:
scope (Scope): Scope object
returns:
self
"""
self.name, _, self.value = self.tokens
if isinstance(self.name, tuple):
if len(self.name) > 1:
self.name, pad = self.name
self.value.append(pad)
else:
self.name = self.name[0]
scope.add_variable(self)
return self |
def parse(self, scope):
"""Parse Node within scope.
the functions ~( and e( map to self.escape
and %( maps to self.sformat
args:
scope (Scope): Current scope
"""
name = ''.join(self.tokens[0])
parsed = self.process(self.tokens[1:], scope)
if name == '%(':
name = 'sformat'
elif name in ('~', 'e'):
name = 'escape'
color = Color.Color()
args = [
t for t in parsed
if not isinstance(t, string_types) or t not in '(),'
]
if hasattr(self, name):
try:
return getattr(self, name)(*args)
except ValueError:
pass
if hasattr(color, name):
try:
result = getattr(color, name)(*args)
try:
return result + ' '
except TypeError:
return result
except ValueError:
pass
return name + ''.join([p for p in parsed]) |
def sformat(self, string, *args):
""" String format.
args:
string (str): string to format
args (list): format options
returns:
str
"""
format = string
items = []
m = re.findall('(%[asdA])', format)
if m and not args:
raise SyntaxError('Not enough arguments...')
i = 0
for n in m:
v = {
'%A': urlquote,
'%s': utility.destring,
}.get(n, str)(args[i])
items.append(v)
i += 1
format = format.replace('%A', '%s')
format = format.replace('%d', '%s')
return format % tuple(items) |
def isnumber(self, string, *args):
"""Is number
args:
string (str): match
returns:
bool
"""
try:
n, u = utility.analyze_number(string)
except SyntaxError:
return False
return True |
def isurl(self, string, *args):
"""Is url
args:
string (str): match
returns:
bool
"""
arg = utility.destring(string)
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
# localhost...
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
# optional port
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$',
re.IGNORECASE)
return regex.match(arg) |
def isstring(self, string, *args):
"""Is string
args:
string (str): match
returns:
bool
"""
regex = re.compile(r'\'[^\']*\'|"[^"]*"')
return regex.match(string) |
def increment(self, value, *args):
""" Increment function
args:
value (str): target
returns:
str
"""
n, u = utility.analyze_number(value)
return utility.with_unit(n + 1, u) |
def add(self, *args):
""" Add integers
args:
args (list): target
returns:
str
"""
if (len(args) <= 1):
return 0
return sum([int(v) for v in args]) |
def round(self, value, *args):
""" Round number
args:
value (str): target
returns:
str
"""
n, u = utility.analyze_number(value)
return utility.with_unit(
int(utility.away_from_zero_round(float(n))), u) |
def ceil(self, value, *args):
""" Ceil number
args:
value (str): target
returns:
str
"""
n, u = utility.analyze_number(value)
return utility.with_unit(int(math.ceil(n)), u) |
def percentage(self, value, *args):
""" Return percentage value
args:
value (str): target
returns:
str
"""
n, u = utility.analyze_number(value)
n = int(n * 100.0)
u = '%'
return utility.with_unit(n, u) |
def process(self, expression):
""" Process color expression
args:
expression (tuple): color expression
returns:
str
"""
a, o, b = expression
c1 = self._hextorgb(a)
c2 = self._hextorgb(b)
r = ['#']
for i in range(3):
v = self.operate(c1[i], c2[i], o)
if v > 0xff:
v = 0xff
if v < 0:
v = 0
r.append("%02x" % int(v))
return ''.join(r) |
def operate(self, left, right, operation):
""" Do operation on colors
args:
left (str): left side
right (str): right side
operation (str): Operation
returns:
str
"""
operation = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv
}.get(operation)
return operation(left, right) |
def rgb(self, *args):
""" Translate rgb(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
args = args[:3]
if len(args) == 3:
try:
return self._rgbatohex(list(map(int, args)))
except ValueError:
if all((a for a in args
if a[-1] == '%' and 100 >= int(a[:-1]) >= 0)):
return self._rgbatohex(
[int(a[:-1]) * 255 / 100.0 for a in args])
raise ValueError('Illegal color values') |
def rgba(self, *args):
""" Translate rgba(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
try:
falpha = float(list(args)[3])
if falpha > 1:
args = args[:3]
if falpha == 0:
values = self._rgbatohex_raw(list(map(int, args)))
return "rgba(%s)" % ','.join([str(a) for a in values])
return self._rgbatohex(list(map(int, args)))
except ValueError:
if all((a for a in args
if a[-1] == '%' and 100 >= int(a[:-1]) >= 0)):
alpha = list(args)[3]
if alpha[-1] == '%' and float(alpha[:-1]) == 0:
values = self._rgbatohex_raw(
[int(a[:-1]) * 255 / 100.0 for a in args])
return "rgba(%s)" % ','.join([str(a) for a in values])
return self._rgbatohex(
[int(a[:-1]) * 255 / 100.0 for a in args])
raise ValueError('Illegal color values') |
def argb(self, *args):
""" Translate argb(...) to color string
Creates a hex representation of a color in #AARRGGBB format (NOT
#RRGGBBAA!). This format is used in Internet Explorer, and .NET
and Android development.
raises:
ValueError
returns:
str
"""
if len(args) == 1 and type(args[0]) is str:
match = re.match(r'rgba\((.*)\)', args[0])
if match:
# NOTE(saschpe): Evil hack to cope with rgba(.., .., .., 0.5) passed through untransformed
rgb = re.sub(r'\s+', '', match.group(1)).split(',')
else:
rgb = list(self._hextorgb(args[0]))
else:
rgb = list(args)
if len(rgb) == 3:
return self._rgbatohex([255] + list(map(int, rgb)))
elif len(rgb) == 4:
rgb = [rgb.pop()] + rgb # Move Alpha to front
try:
fval = float(list(rgb)[0])
if fval > 1:
rgb = [255] + rgb[1:] # Clip invalid integer/float values
elif 1 >= fval >= 0:
rgb = [
fval * 256
] + rgb[1:] # Convert 0-1 to 0-255 range for _rgbatohex
else:
rgb = [0] + rgb[1:] # Clip lower bound
return self._rgbatohex(list(map(int, rgb)))
except ValueError:
if all((a for a in rgb
if a[-1] == '%' and 100 >= int(a[:-1]) >= 0)):
return self._rgbatohex(
[int(a[:-1]) * 255 / 100.0 for a in rgb])
raise ValueError('Illegal color values') |
def hsl(self, *args):
""" Translate hsl(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
return self.hsla(*args)
elif len(args) == 3:
h, s, l = args
rgb = colorsys.hls_to_rgb(
int(h) / 360.0, utility.pc_or_float(l), utility.pc_or_float(s))
color = (utility.convergent_round(c * 255) for c in rgb)
return self._rgbatohex(color)
raise ValueError('Illegal color values') |
def hsla(self, *args):
""" Translate hsla(...) to color string
raises:
ValueError
returns:
str
"""
if len(args) == 4:
h, s, l, a = args
rgb = colorsys.hls_to_rgb(
int(h) / 360.0, utility.pc_or_float(l), utility.pc_or_float(s))
color = [float(utility.convergent_round(c * 255)) for c in rgb]
color.append(utility.pc_or_float(a))
return "rgba(%s,%s,%s,%s)" % tuple(color)
raise ValueError('Illegal color values') |
def hue(self, color, *args):
""" Return the hue value of a color
args:
color (str): color
raises:
ValueError
returns:
float
"""
if color:
h, l, s = self._hextohls(color)
return utility.convergent_round(h * 360.0, 3)
raise ValueError('Illegal color values') |
def saturation(self, color, *args):
""" Return the saturation value of a color
args:
color (str): color
raises:
ValueError
returns:
float
"""
if color:
h, l, s = self._hextohls(color)
return s * 100.0
raise ValueError('Illegal color values') |
def lighten(self, color, diff, *args):
""" Lighten a color
args:
color (str): color
diff (str): percentage
returns:
str
"""
if color and diff:
return self._ophsl(color, diff, 1, operator.add)
raise ValueError('Illegal color values') |
def darken(self, color, diff, *args):
""" Darken a color
args:
color (str): color
diff (str): percentage
returns:
str
"""
if color and diff:
return self._ophsl(color, diff, 1, operator.sub)
raise ValueError('Illegal color values') |
def spin(self, color, degree, *args):
""" Spin color by degree. (Increase / decrease hue)
args:
color (str): color
degree (str): percentage
raises:
ValueError
returns:
str
"""
if color and degree:
if isinstance(degree, string_types):
degree = float(degree.strip('%'))
h, l, s = self._hextohls(color)
h = ((h * 360.0) + degree) % 360.0
h = 360.0 + h if h < 0 else h
rgb = colorsys.hls_to_rgb(h / 360.0, l, s)
color = (utility.convergent_round(c * 255) for c in rgb)
return self._rgbatohex(color)
raise ValueError('Illegal color values') |
def mix(self, color1, color2, weight=50, *args):
"""This algorithm factors in both the user-provided weight
and the difference between the alpha values of the two colors
to decide how to perform the weighted average of the two RGB values.
It works by first normalizing both parameters to be within [-1, 1],
where 1 indicates "only use color1", -1 indicates "only use color 0",
and all values in between indicated a proportionately weighted average.
Once we have the normalized variables w and a,
we apply the formula (w + a)/(1 + w*a)
to get the combined weight (in [-1, 1]) of color1.
This formula has two especially nice properties:
* When either w or a are -1 or 1, the combined weight is also that number
(cases where w * a == -1 are undefined, and handled as a special case).
* When a is 0, the combined weight is w, and vice versa
Finally, the weight of color1 is renormalized to be within [0, 1]
and the weight of color2 is given by 1 minus the weight of color1.
Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein
http://sass-lang.com
args:
color1 (str): first color
color2 (str): second color
weight (int/str): weight
raises:
ValueError
returns:
str
"""
if color1 and color2:
if isinstance(weight, string_types):
weight = float(weight.strip('%'))
weight = ((weight / 100.0) * 2) - 1
rgb1 = self._hextorgb(color1)
rgb2 = self._hextorgb(color2)
alpha = 0
w1 = (((weight if weight * alpha == -1 else weight + alpha) /
(1 + weight * alpha)) + 1)
w1 = w1 / 2.0
w2 = 1 - w1
rgb = [
rgb1[0] * w1 + rgb2[0] * w2,
rgb1[1] * w1 + rgb2[1] * w2,
rgb1[2] * w1 + rgb2[2] * w2,
]
return self._rgbatohex(rgb)
raise ValueError('Illegal color values') |
def fmt(self, color):
""" Format CSS Hex color code.
uppercase becomes lowercase, 3 digit codes expand to 6 digit.
args:
color (str): color
raises:
ValueError
returns:
str
"""
if utility.is_color(color):
color = color.lower().strip('#')
if len(color) in [3, 4]:
color = ''.join([c * 2 for c in color])
return '#%s' % color
raise ValueError('Cannot format non-color') |
def flatten(lst):
"""Flatten list.
Args:
lst (list): List to flatten
Returns:
generator
"""
for elm in lst:
if isinstance(elm, collections.Iterable) and not isinstance(
elm, string_types):
for sub in flatten(elm):
yield sub
else:
yield elm |
def pairwise(lst):
""" yield item i and item i+1 in lst. e.g.
(lst[0], lst[1]), (lst[1], lst[2]), ..., (lst[-1], None)
Args:
lst (list): List to process
Returns:
list
"""
if not lst:
return
length = len(lst)
for i in range(length - 1):
yield lst[i], lst[i + 1]
yield lst[-1], None |
def rename(blocks, scope, stype):
""" Rename all sub-blocks moved under another
block. (mixins)
Args:
lst (list): block list
scope (object): Scope object
"""
for p in blocks:
if isinstance(p, stype):
p.tokens[0].parse(scope)
if p.tokens[1]:
scope.push()
scope.current = p.tokens[0]
rename(p.tokens[1], scope, stype)
scope.pop() |
def blocksearch(block, name):
""" Recursive search for name in block (inner blocks)
Args:
name (str): search term
Returns:
Block OR False
"""
if hasattr(block, 'tokens'):
for b in block.tokens[1]:
b = (b if hasattr(b, 'raw') and b.raw() == name else blocksearch(
b, name))
if b:
return b
return False |
def reverse_guard(lst):
""" Reverse guard expression. not
(@a > 5) -> (@a =< 5)
Args:
lst (list): Expression
returns:
list
"""
rev = {'<': '>=', '>': '=<', '>=': '<', '=<': '>'}
return [rev[l] if l in rev else l for l in lst] |
def debug_print(lst, lvl=0):
""" Print scope tree
args:
lst (list): parse result
lvl (int): current nesting level
"""
pad = ''.join(['\t.'] * lvl)
t = type(lst)
if t is list:
for p in lst:
debug_print(p, lvl)
elif hasattr(lst, 'tokens'):
print(pad, t)
debug_print(list(flatten(lst.tokens)), lvl + 1) |
def analyze_number(var, err=''):
""" Analyse number for type and split from unit
1px -> (q, 'px')
args:
var (str): number string
kwargs:
err (str): Error message
raises:
SyntaxError
returns:
tuple
"""
n, u = split_unit(var)
if not isinstance(var, string_types):
return (var, u)
if is_color(var):
return (var, 'color')
if is_int(n):
n = int(n)
elif is_float(n):
n = float(n)
else:
raise SyntaxError('%s ´%s´' % (err, var))
return (n, u) |
def with_unit(number, unit=None):
""" Return number with unit
args:
number (mixed): Number
unit (str): Unit
returns:
str
"""
if isinstance(number, tuple):
number, unit = number
if number == 0:
return '0'
if unit:
number = str(number)
if number.startswith('.'):
number = '0' + number
return "%s%s" % (number, unit)
return number if isinstance(number, string_types) else str(number) |
def is_color(value):
""" Is string CSS color
args:
value (str): string
returns:
bool
"""
if not value or not isinstance(value, string_types):
return False
if value[0] == '#' and len(value) in [4, 5, 7, 9]:
try:
int(value[1:], 16)
return True
except ValueError:
pass
return False |
def is_variable(value):
""" Check if string is LESS variable
args:
value (str): string
returns:
bool
"""
if isinstance(value, string_types):
return (value.startswith('@') or value.startswith('-@'))
elif isinstance(value, tuple):
value = ''.join(value)
return (value.startswith('@') or value.startswith('-@'))
return False |
def is_float(value):
""" Is value float
args:
value (str): string
returns:
bool
"""
if not is_int(value):
try:
float(str(value))
return True
except (ValueError, TypeError):
pass
return False |
def split_unit(value):
""" Split a number from its unit
1px -> (q, 'px')
Args:
value (str): input
returns:
tuple
"""
r = re.search('^(\-?[\d\.]+)(.*)$', str(value))
return r.groups() if r else ('', '') |
def away_from_zero_round(value, ndigits=0):
"""Round half-way away from zero.
Python2's round() method.
"""
if sys.version_info[0] >= 3:
p = 10**ndigits
return float(math.floor((value * p) + math.copysign(0.5, value))) / p
else:
return round(value, ndigits) |
def convergent_round(value, ndigits=0):
"""Convergent rounding.
Round to neareas even, similar to Python3's round() method.
"""
if sys.version_info[0] < 3:
if value < 0.0:
return -convergent_round(-value)
epsilon = 0.0000001
integral_part, _ = divmod(value, 1)
if abs(value - (integral_part + 0.5)) < epsilon:
if integral_part % 2.0 < epsilon:
return integral_part
else:
nearest_even = integral_part + 0.5
return math.ceil(nearest_even)
return round(value, ndigits) |
def pc_or_float(s):
""" Utility function to process strings that contain either percentiles or floats
args:
str: s
returns:
float
"""
if isinstance(s, string_types) and '%' in s:
return float(s.strip('%')) / 100.0
return float(s) |
def permutations_with_replacement(iterable, r=None):
"""Return successive r length permutations of elements in the iterable.
Similar to itertools.permutation but withouth repeated values filtering.
"""
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in itertools.product(range(n), repeat=r):
yield list(pool[i] for i in indices) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.