text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""ML Fairness gym loan environment.
This environment is meant to be a hello-world example to the gym as well as
serve as a template for writing future environments.
In each step step, the agent decides whether to accept or reject an application.
Applicant features are generated by a mixture model which also determines the
likelihood of defaulting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import enum
from typing import List, Optional
from absl import logging
import attr
import core
from environments import lending_params
from spaces import multinomial
from gym import spaces
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# Used for rending applicant features.
_MARKERS = matplotlib.markers.MarkerStyle.filled_markers
class LoanDecision(enum.IntEnum):
"""Enum representing possible loan decisions."""
REJECT = 0
ACCEPT = 1
class _CashUpdater(core.StateUpdater):
"""Changes bank_cash as a result of an action."""
def update(self, state, action):
params = state.params
if action == LoanDecision.REJECT:
return
if state.will_default:
state.bank_cash -= params.loan_amount
else:
state.bank_cash += params.loan_amount * params.interest_rate
class _ApplicantSampler(core.StateUpdater):
"""Samples a new applicant."""
def update(self, state, action):
del action # Unused.
params = state.params
new_applicant = params.applicant_distribution.sample(state.rng)
state.applicant_features = np.clip(new_applicant.features,
params.min_observation,
params.max_observation)
state.group = new_applicant.group
state.group_id = np.argmax(new_applicant.group)
state.will_default = new_applicant.will_default
@attr.s(cmp=False) # Use core.State's equality methods.
class State(core.State):
"""State object for lending environments."""
# Random number generator for the simulation.
rng = attr.ib() # type: np.random.RandomState
# State parameters that can evolve over time.
params = attr.ib() # type: lending_params.Params
# Number of loans available for the bank.
bank_cash = attr.ib() # type: float
# Applicant-related attributes are Optional with defaults of None so that the
# object can be initialized in two steps, first with applicant attributes as
# None, then a StateUpdater is used to fill in the applicant features.
applicant_features = attr.ib(default=None) # type: Optional[np.ndarray]
group = attr.ib(default=None) # type: Optional[List[int]]
group_id = attr.ib(default=None) # type: Optional[int]
will_default = attr.ib(default=None) # type: Optional[bool]
class BaseLendingEnv(core.FairnessEnv):
"""Base loan decision environment.
In each step, the agent decides whether to accept or reject an
application.
The base class is abstract.
"""
metadata = {'render.modes': ['human']}
default_param_builder = lending_params.Params
group_membership_var = 'group'
_cash_updater = _CashUpdater()
_parameter_updater = core.NoUpdate()
_applicant_updater = _ApplicantSampler()
def __init__(self, params = None):
params = (
self.default_param_builder() if params is None else params
) # type: lending_params.Params
# The action space of the agent is Accept/Reject.
self.action_space = spaces.Discrete(2)
# Bank's cash is a scalar and cannot be negative.
bank_cash_space = spaces.Box(
low=0, high=params.max_cash, shape=(), dtype=np.float32)
# Two-dimensional observation space describes each loan applicant.
loan_applicant_space = spaces.Box(
params.min_observation,
params.max_observation,
dtype=np.float32,
shape=(params.applicant_distribution.dim,))
group_space = spaces.MultiBinary(params.num_groups)
self.observable_state_vars = {
'bank_cash': bank_cash_space,
'applicant_features': loan_applicant_space,
'group': group_space
}
super(BaseLendingEnv, self).__init__(params)
self._state_init()
def _state_init(self, rng=None):
self.state = State(
# Copy in case state.params get mutated, initial_params stays pristine.
params=copy.deepcopy(self.initial_params),
rng=rng or np.random.RandomState(),
bank_cash=self.initial_params.bank_starting_cash)
self._applicant_updater.update(self.state, None)
def reset(self):
"""Resets the environment."""
self._state_init(self.state.rng)
return super(BaseLendingEnv, self).reset()
def _is_done(self):
"""Returns True if the bank cash is less than loan_amount."""
return self.state.bank_cash < self.state.params.loan_amount
def _step_impl(self, state, action):
"""Run one timestep of the environment's dynamics.
In a single step, the agent decides whether to accept or reject an
application.
The potential payoffs of rejected application are always 0.
If an application is accepted, the payoffs are:
-loan_amount if the applicant defaults.
+loan_amount*interest_rate if the applicant successfully pays back.
Args:
state: A `State` object containing the current state.
action: An action in `action_space`.
Returns:
A `State` object containing the updated state.
"""
self._cash_updater.update(self.state, action)
self._parameter_updater.update(self.state, action)
self._applicant_updater.update(self.state, action)
return self.state
def render(self, mode='human'):
"""Renders the history and current state using matplotlib.
Args:
mode: string indicating the rendering mode. The only supported mode is
`human`.
"""
if mode == 'human':
if self.state.params.applicant_distribution.dim != 2:
raise NotImplementedError(
'Cannot render if applicant features are not exactly 2 dimensional. '
'Got %d dimensional applicant features.' %
self.state.params.applicant_distribution.dim)
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.title('Applicant Features')
plt.xticks([], [])
plt.yticks([], [])
for state, action in self.history:
if action == 1:
x, y = state.applicant_features
color = 'r' if state.will_default else 'b'
plt.plot([x], [y], _MARKERS[state.group_id] + color, markersize=12)
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
x, y = self.state.applicant_features
plt.plot([x], [y], _MARKERS[self.state.group_id] + 'k', markersize=15)
plt.subplot(1, 2, 2)
plt.title('Cash')
plt.plot([state.bank_cash for state, _ in self.history] +
[self.state.bank_cash])
plt.ylabel('# loans available')
plt.xlabel('Time')
plt.tight_layout()
else:
super(BaseLendingEnv, self).render(mode) # Raises NotImplementedError
class SimpleLoans(BaseLendingEnv):
"""Simple lending environment.
Applicants have 2D features which can be used to determine whether they have
high or low likelihood of success.
"""
default_param_builder = lending_params.Params
class DifferentialExpressionEnv(BaseLendingEnv):
"""Lending environment with groups that present creditworthiness differently.
Applicants have 2D features which can be used to determine whether they have
high or low likelihood of success, but the mapping is different for the
different groups.
"""
default_param_builder = lending_params.DifferentialExpressionParams
class _CreditShift(core.StateUpdater):
"""Updates the cluster probabilities based on the repayment."""
def update(self, state, action):
"""Updates the cluster probabilities based on the repayment.
Successful repayment raises one's credit score and default lowers one's
credit score. This is expressed by moving a small amount of probability mass
(representing an individual) from one credit-score cluster to an adjacent
one.
This change in credit only happens if the applicant is accepted. Rejected
applicants experience no change in their score.
state.params is mutated in place; nothing is returned.
Args:
state: A core.State object.
action: a `LoanDecision`.
"""
if action == LoanDecision.REJECT:
return
params = state.params
group_id = state.group_id
# Group should always be a one-hot encoding of group_id. This assert tests
# that these two values have not somehow gotten out of sync.
assert state.group_id == np.argmax(
state.group), 'Group id %s. group %s' % (state.group_id,
np.argmax(state.group))
# Cast to list so we can mutate it.
cluster_probs = list(
params.applicant_distribution.components[group_id].weights)
rng = np.random.RandomState()
for _ in range(10):
group = params.applicant_distribution.components[group_id].sample(
rng).group
assert np.array_equal(group, state.group), (
'Sampling from the component that is indexed here does not give '
'members of the group that is intended to be affected. Something is '
'quite wrong. Check that your group ids are in order in the credit '
'cluster spec. sampled group_id %s vs state.group %s. '
'Component[%d]: %s' %
(group, state.group, group_id,
params.applicant_distribution.components[group_id]))
# Assert argmax gives the right index.
for idx, component in enumerate(
params.applicant_distribution.components[group_id].components):
credit_score = component.sample(rng).features
assert np.argmax(credit_score) == idx, '%s vs %s' % (credit_score, idx)
# This applicant has their credit score lowered or raised.
cluster_id = np.argmax(state.applicant_features)
new_cluster = (cluster_id - 1 if state.will_default else cluster_id + 1)
# Prevents falling off the edges of the cluster array.
new_cluster = min(new_cluster, len(cluster_probs) - 1)
new_cluster = max(new_cluster, 0)
# Prevents moving more probability mass than this bucket has.
assert cluster_probs[cluster_id] > 0, (
'This cluster was sampled but has no mass. %d. Full distribution %s' %
(cluster_id, cluster_probs))
mass_to_shift = min(params.cluster_shift_increment,
cluster_probs[cluster_id])
# Mutates params.cluster_probs[group_id].
cluster_probs[cluster_id] -= mass_to_shift
cluster_probs[new_cluster] += mass_to_shift
logging.debug('Group %d: Moving mass %f from %d to %d', group_id,
mass_to_shift, cluster_id, new_cluster)
assert np.abs(np.sum(cluster_probs) -
1) < 1e-6, 'Cluster probs must sum to 1.'
assert all([prob >= 0 for prob in cluster_probs
]), 'Cluster probs must be non-negative'
state.params.applicant_distribution.components[
group_id].weights = cluster_probs
class DelayedImpactEnv(BaseLendingEnv):
"""Lending environment in which outcomes affect future credit.
Each applicant has a credit score which causally determines their likelihood
of success. Applicants who default have their credit lowered while applicants
who pay back have their credit raised.
Based on the environment described in Liu et al's Delayed Impact of Machine
Learning: https://arxiv.org/abs/1803.04383
"""
default_param_builder = lending_params.DelayedImpactParams
_parameter_updater = _CreditShift()
def __init__(self, params=None):
super(DelayedImpactEnv, self).__init__(params)
self.observable_state_vars['applicant_features'] = multinomial.Multinomial(
self.initial_params.applicant_distribution.dim, 1)
self.observation_space = spaces.Dict(self.observable_state_vars)
| {
"content_hash": "e928adb0278c34b255ed83bc0f15a166",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 81,
"avg_line_length": 34.69364161849711,
"alnum_prop": 0.6824391869376875,
"repo_name": "google/ml-fairness-gym",
"id": "9600d2fa6a8d1c8fcbe940baa51d9b6063dbd5a2",
"size": "12640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "environments/lending.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "654678"
},
{
"name": "Shell",
"bytes": "1613"
}
],
"symlink_target": ""
} |
import argparse
parser = argparse.ArgumentParser(description="Quick and easy commandline wrapper for converting markdown to word, html, and pdf formats.")
parser.add_argument("infile", help="the markdown file containing the document")
parser.add_argument("type", help="the type of output: html, word, or pdf")
parser.add_argument("-o", "--outfile", nargs='?', default=" ", help="filename of outfile (optional, will use same name as infile with new extension otherwise)")
parser.add_argument("-u", "--unsafe", action="store_true", help="UNSAFE mode: overwrite output file. Off by default, will append numbers to front of duplicates.")
parser.add_argument("-a", "--append", nargs='?', default=" ", help="Optional file to append to header file, for HTML (to add CSS or JS). Ignored otherwise.")
args = parser.parse_args()
validtypes = ['html', 'word', 'pdf']
outputtype = args.type.lower()
if outputtype not in validtypes:
raise ValueError("No output type called %s found. Valid output types are are: %s." % (outputtype, ', '.join(validtypes)))
from os.path import isfile
print(' ')
print(' ')
print("CONVERTING MARKDOWN FILE TO %s USING PANDOC" % outputtype.upper())
# validate input file
if args.infile[-3:] != '.md':
print(' ')
print("You gave me an input filename without a .md extension. Appending to file.")
theinfile = args.infile + '.md'
else:
theinfile = args.infile
if not isfile(theinfile):
raise IOError("input %s file not found" % theinfile)
# validate output file
if args.outfile == ' ':
print(' ')
print("You didn't give me an output filename. Using input filename with extension changed to appropriate type.")
if outputtype != 'word':
theoutfile = theinfile[0:-3] + '.' + outputtype
else:
theoutfile = theinfile[0:-3] + '.docx'
elif (args.outfile[-5:] != '.html') and (outputtype == "html"):
print(' ')
print("You gave me an output filename without a .html extension. Appending to file.")
theoutfile = args.outfile + '.html'
elif (args.outfile[-5:] != '.docx') and (outputtype == "word"):
print(' ')
print("You gave me an output filename without a .docx extension. Appending to file.")
theoutfile = args.outfile + '.docx'
elif (args.outfile[-4:] != '.pdf') and (outputtype == "pdf"):
print(' ')
print("You gave me an output filename without a .pdf extension. Appending to file.")
theoutfile = args.outfile + '.docx'
else:
theoutfile = args.outfile
def lazyiter():
number = 0
while True:
yield number
number += 1
mycounter = lazyiter()
# prevent overwriting of output file
if not args.unsafe:
while isfile(theoutfile):
print(' ')
print("You gave me an output file (%s) that already exists. Appending a number to the front to avoid overwriting." % theoutfile)
theoutfile = str(next(mycounter)) + theoutfile
print(' ')
print('New file is: %s' % theoutfile)
if (outputtype == "html") and (args.append != ' '):
commandstring = 'pandoc -s -H %s %s -o %s' % (args.append, theinfile, theoutfile)
else:
commandstring = 'pandoc -s %s -o %s' % (theinfile, theoutfile)
print(' ')
print('Converting %s to %s in %s format' % (theinfile, theoutfile, outputtype))
print(' ')
print(' ')
from subprocess import call
call(commandstring, shell=True)
| {
"content_hash": "90098a2be5f2ec9babe3e754cd58538a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 162,
"avg_line_length": 41.135802469135804,
"alnum_prop": 0.6668667466986795,
"repo_name": "sahmed95/sahmed95.github.io",
"id": "a35d520fb74099aed97b91b48f978f20d11309d3",
"size": "3332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gwave/pgmd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "506169"
},
{
"name": "HTML",
"bytes": "7224257"
},
{
"name": "JavaScript",
"bytes": "3105240"
},
{
"name": "Python",
"bytes": "17652"
}
],
"symlink_target": ""
} |
from menpo.groupalign.procrustes import GeneralizedProcrustesAnalysis
| {
"content_hash": "cade5c3c701a929045aed18b12918477",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 69,
"avg_line_length": 70,
"alnum_prop": 0.9142857142857143,
"repo_name": "jabooth/menpo-archive",
"id": "42f76083d7371f7b31f36587b2489dbf08fb27b1",
"size": "70",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "menpo/groupalign/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "101730"
},
{
"name": "C++",
"bytes": "169304"
},
{
"name": "Python",
"bytes": "818217"
},
{
"name": "Shell",
"bytes": "776"
}
],
"symlink_target": ""
} |
"""code generator for GL/GLES extension wrangler."""
import optparse
import os
import collections
import re
import platform
import sys
from subprocess import call
from collections import namedtuple
HEADER_PATHS = [
'../../third_party/khronos',
'../../third_party/mesa/src/include',
'.',
'../../gpu',
]
UNCONDITIONALLY_BOUND_EXTENSIONS = set([
'WGL_ARB_extensions_string',
'WGL_EXT_extensions_string',
'GL_CHROMIUM_gles_depth_binding_hack', # crbug.com/448206
'GL_CHROMIUM_glgetstringi_hack', # crbug.com/470396
'GL_CHROMIUM_egl_khr_fence_sync_hack', # crbug.com/504758
])
"""Function binding conditions can be specified manually by supplying a versions
array instead of the names array. Each version has the following keys:
name: Mandatory. Name of the function. Multiple versions can have the same
name but different conditions.
extensions: Extra Extensions for which the function is bound. Only needed
in some cases where the extension cannot be parsed from the
headers.
is_optional: True if the GetProcAddress can return NULL for the
function. This may happen for example when functions
are added to a new version of an extension, but the
extension string is not modified.
By default, the function gets its name from the first name in its names or
versions array. This can be overridden by supplying a 'known_as' key.
"""
GL_FUNCTIONS = [
{ 'return_type': 'void',
'names': ['glActiveTexture'],
'arguments': 'GLenum texture', },
{ 'return_type': 'void',
'known_as': 'glApplyFramebufferAttachmentCMAAINTEL',
'versions': [{ 'name': 'glApplyFramebufferAttachmentCMAAINTEL',
'extensions': ['GL_INTEL_framebuffer_CMAA'] }],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glAttachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glBeginQuery' },
{ 'name': 'glBeginQueryARB' },
{ 'name': 'glBeginQueryEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glBeginTransformFeedback' }],
'arguments': 'GLenum primitiveMode', },
{ 'return_type': 'void',
'names': ['glBindAttribLocation'],
'arguments': 'GLuint program, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindBuffer'],
'arguments': 'GLenum target, GLuint buffer', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glBindBufferBase' }],
'arguments': 'GLenum target, GLuint index, GLuint buffer', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glBindBufferRange' }],
'arguments': 'GLenum target, GLuint index, GLuint buffer, GLintptr offset, '
'GLsizeiptr size', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocation'],
'arguments': 'GLuint program, GLuint colorNumber, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFragDataLocationIndexed'],
'arguments':
'GLuint program, GLuint colorNumber, GLuint index, const char* name', },
{ 'return_type': 'void',
'names': ['glBindFramebufferEXT', 'glBindFramebuffer'],
'arguments': 'GLenum target, GLuint framebuffer', },
{ 'return_type': 'void',
'names': ['glBindRenderbufferEXT', 'glBindRenderbuffer'],
'arguments': 'GLenum target, GLuint renderbuffer', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glBindSampler' }],
'arguments': 'GLuint unit, GLuint sampler', },
{ 'return_type': 'void',
'names': ['glBindTexture'],
'arguments': 'GLenum target, GLuint texture', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glBindTransformFeedback' }],
'arguments': 'GLenum target, GLuint id', },
{ 'return_type': 'void',
'known_as': 'glBindVertexArrayOES',
'versions': [{ 'name': 'glBindVertexArray',
'extensions': ['GL_ARB_vertex_array_object'], },
{ 'name': 'glBindVertexArrayOES' },
{ 'name': 'glBindVertexArrayAPPLE',
'extensions': ['GL_APPLE_vertex_array_object'] }],
'arguments': 'GLuint array' },
{ 'return_type': 'void',
'known_as': 'glBlendBarrierKHR',
'versions': [{ 'name': 'glBlendBarrierNV',
'extensions': ['GL_NV_blend_equation_advanced'] },
{ 'name': 'glBlendBarrierKHR',
'extensions': ['GL_KHR_blend_equation_advanced'] }],
'arguments': 'void' },
{ 'return_type': 'void',
'names': ['glBlendColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'names': ['glBlendEquation'],
'arguments': ' GLenum mode ', },
{ 'return_type': 'void',
'names': ['glBlendEquationSeparate'],
'arguments': 'GLenum modeRGB, GLenum modeAlpha', },
{ 'return_type': 'void',
'names': ['glBlendFunc'],
'arguments': 'GLenum sfactor, GLenum dfactor', },
{ 'return_type': 'void',
'names': ['glBlendFuncSeparate'],
'arguments':
'GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha', },
{ 'return_type': 'void',
'names': ['glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferANGLE', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBlitFramebufferEXT', 'glBlitFramebuffer'],
'arguments': 'GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, '
'GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, '
'GLbitfield mask, GLenum filter', },
{ 'return_type': 'void',
'names': ['glBufferData'],
'arguments':
'GLenum target, GLsizeiptr size, const void* data, GLenum usage', },
{ 'return_type': 'void',
'names': ['glBufferSubData'],
'arguments':
'GLenum target, GLintptr offset, GLsizeiptr size, const void* data', },
{ 'return_type': 'GLenum',
'names': ['glCheckFramebufferStatusEXT',
'glCheckFramebufferStatus'],
'arguments': 'GLenum target',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLEnums::GetStringEnum(result));
""", },
{ 'return_type': 'void',
'names': ['glClear'],
'arguments': 'GLbitfield mask', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glClearBufferfi' }],
'arguments': 'GLenum buffer, GLint drawbuffer, const GLfloat depth, '
'GLint stencil', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glClearBufferfv' }],
'arguments': 'GLenum buffer, GLint drawbuffer, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glClearBufferiv' }],
'arguments': 'GLenum buffer, GLint drawbuffer, const GLint* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glClearBufferuiv' }],
'arguments': 'GLenum buffer, GLint drawbuffer, const GLuint* value', },
{ 'return_type': 'void',
'names': ['glClearColor'],
'arguments': 'GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glClearDepth',
'extensions': ['GL_CHROMIUM_gles_depth_binding_hack'] }],
'arguments': 'GLclampd depth', },
{ 'return_type': 'void',
'names': ['glClearDepthf'],
'arguments': 'GLclampf depth', },
{ 'return_type': 'void',
'names': ['glClearStencil'],
'arguments': 'GLint s', },
{ 'return_type': 'GLenum',
'versions': [{ 'name': 'glClientWaitSync',
'extensions': ['GL_ARB_sync'] }],
'arguments': 'GLsync sync, GLbitfield flags, GLuint64 timeout', },
{ 'return_type': 'void',
'names': ['glColorMask'],
'arguments':
'GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha', },
{ 'return_type': 'void',
'names': ['glCompileShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'names': ['glCompressedTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLsizei width, '
'GLsizei height, GLint border, GLsizei imageSize, const void* data', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glCompressedTexImage3D' }],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLsizei width, '
'GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, '
'const void* data', },
{ 'return_type': 'void',
'names': ['glCompressedTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, '
'const void* data', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glCompressedTexSubImage3D' }],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, '
'GLenum format, GLsizei imageSize, const void* data', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glCopyBufferSubData' }],
'arguments':
'GLenum readTarget, GLenum writeTarget, GLintptr readOffset, '
'GLintptr writeOffset, GLsizeiptr size', },
{ 'return_type': 'void',
'names': ['glCopyTexImage2D'],
'arguments':
'GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, '
'GLsizei width, GLsizei height, GLint border', },
{ 'return_type': 'void',
'names': ['glCopyTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, '
'GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glCopyTexSubImage3D' }],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glCoverFillPathInstancedNV'],
'arguments': 'GLsizei numPaths, GLenum pathNameType, const void* paths, '
'GLuint pathBase, GLenum coverMode, GLenum transformType, '
'const GLfloat* transformValues' },
{ 'return_type': 'void',
'names': ['glCoverFillPathNV'],
'arguments': 'GLuint path, GLenum coverMode' },
{ 'return_type': 'void',
'names': ['glCoverStrokePathInstancedNV'],
'arguments': 'GLsizei numPaths, GLenum pathNameType, const void* paths, '
'GLuint pathBase, GLenum coverMode, GLenum transformType, '
'const GLfloat* transformValues' },
{ 'return_type': 'void',
'names': ['glCoverStrokePathNV'],
'arguments': 'GLuint name, GLenum coverMode' },
{ 'return_type': 'GLuint',
'names': ['glCreateProgram'],
'arguments': 'void', },
{ 'return_type': 'GLuint',
'names': ['glCreateShader'],
'arguments': 'GLenum type', },
{ 'return_type': 'void',
'names': ['glCullFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDeleteBuffers'],
'known_as': 'glDeleteBuffersARB',
'arguments': 'GLsizei n, const GLuint* buffers', },
{ 'return_type': 'void',
'known_as': 'glDeleteFencesAPPLE',
'versions': [{ 'name': 'glDeleteFencesAPPLE',
'extensions': ['GL_APPLE_fence'] }],
'arguments': 'GLsizei n, const GLuint* fences', },
{ 'return_type': 'void',
'names': ['glDeleteFencesNV'],
'arguments': 'GLsizei n, const GLuint* fences', },
{ 'return_type': 'void',
'names': ['glDeleteFramebuffersEXT', 'glDeleteFramebuffers'],
'arguments': 'GLsizei n, const GLuint* framebuffers', },
{ 'return_type': 'void',
'names': ['glDeletePathsNV'],
'arguments': 'GLuint path, GLsizei range' },
{ 'return_type': 'void',
'names': ['glDeleteProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDeleteQueries' },
{ 'name': 'glDeleteQueriesARB'},
{ 'name': 'glDeleteQueriesEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'names': ['glDeleteRenderbuffersEXT', 'glDeleteRenderbuffers'],
'arguments': 'GLsizei n, const GLuint* renderbuffers', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDeleteSamplers' }],
'arguments': 'GLsizei n, const GLuint* samplers', },
{ 'return_type': 'void',
'names': ['glDeleteShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDeleteSync',
'extensions': ['GL_ARB_sync'] }],
'arguments': 'GLsync sync', },
{ 'return_type': 'void',
'names': ['glDeleteTextures'],
'arguments': 'GLsizei n, const GLuint* textures', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDeleteTransformFeedbacks' }],
'arguments': 'GLsizei n, const GLuint* ids', },
{ 'return_type': 'void',
'known_as': 'glDeleteVertexArraysOES',
'versions': [{ 'name': 'glDeleteVertexArrays',
'extensions': ['GL_ARB_vertex_array_object'], },
{ 'name': 'glDeleteVertexArraysOES' },
{ 'name': 'glDeleteVertexArraysAPPLE',
'extensions': ['GL_APPLE_vertex_array_object'] }],
'arguments': 'GLsizei n, const GLuint* arrays' },
{ 'return_type': 'void',
'names': ['glDepthFunc'],
'arguments': 'GLenum func', },
{ 'return_type': 'void',
'names': ['glDepthMask'],
'arguments': 'GLboolean flag', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDepthRange',
'extensions': ['GL_CHROMIUM_gles_depth_binding_hack'] }],
'arguments': 'GLclampd zNear, GLclampd zFar', },
{ 'return_type': 'void',
'names': ['glDepthRangef'],
'arguments': 'GLclampf zNear, GLclampf zFar', },
{ 'return_type': 'void',
'names': ['glDetachShader'],
'arguments': 'GLuint program, GLuint shader', },
{ 'return_type': 'void',
'names': ['glDisable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glDisableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDiscardFramebufferEXT',
'extensions': ['GL_EXT_discard_framebuffer'] }],
'arguments': 'GLenum target, GLsizei numAttachments, '
'const GLenum* attachments' },
{ 'return_type': 'void',
'names': ['glDrawArrays'],
'arguments': 'GLenum mode, GLint first, GLsizei count', },
{ 'return_type': 'void',
'known_as': 'glDrawArraysInstancedANGLE',
'names': ['glDrawArraysInstancedARB', 'glDrawArraysInstancedANGLE',
'glDrawArraysInstanced'],
'arguments': 'GLenum mode, GLint first, GLsizei count, GLsizei primcount', },
{ 'return_type': 'void',
'names': ['glDrawBuffer'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glDrawBuffersARB', 'glDrawBuffersEXT', 'glDrawBuffers'],
'arguments': 'GLsizei n, const GLenum* bufs', },
{ 'return_type': 'void',
'names': ['glDrawElements'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices', },
{ 'return_type': 'void',
'known_as': 'glDrawElementsInstancedANGLE',
'names': ['glDrawElementsInstancedARB', 'glDrawElementsInstancedANGLE',
'glDrawElementsInstanced'],
'arguments':
'GLenum mode, GLsizei count, GLenum type, const void* indices, '
'GLsizei primcount', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glDrawRangeElements' }],
'arguments': 'GLenum mode, GLuint start, GLuint end, GLsizei count, '
'GLenum type, const void* indices', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetRenderbufferStorageOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEGLImageTargetTexture2DOES'],
'arguments': 'GLenum target, GLeglImageOES image', },
{ 'return_type': 'void',
'names': ['glEnable'],
'arguments': 'GLenum cap', },
{ 'return_type': 'void',
'names': ['glEnableVertexAttribArray'],
'arguments': 'GLuint index', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glEndQuery' },
{ 'name': 'glEndQueryARB' },
{ 'name': 'glEndQueryEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glEndTransformFeedback' }],
'arguments': 'void', },
{ 'return_type': 'GLsync',
'versions': [{ 'name': 'glFenceSync',
'extensions': ['GL_ARB_sync'] }],
'arguments': 'GLenum condition, GLbitfield flags', },
{ 'return_type': 'void',
'names': ['glFinish'],
'arguments': 'void', },
{ 'return_type': 'void',
'known_as': 'glFinishFenceAPPLE',
'versions': [{ 'name': 'glFinishFenceAPPLE',
'extensions': ['GL_APPLE_fence'] }],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glFinishFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glFlush'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glFlushMappedBufferRange'],
'arguments': 'GLenum target, GLintptr offset, GLsizeiptr length', },
{ 'return_type': 'void',
'names': ['glFramebufferRenderbufferEXT', 'glFramebufferRenderbuffer'],
'arguments':
'GLenum target, GLenum attachment, GLenum renderbuffertarget, '
'GLuint renderbuffer', },
{ 'return_type': 'void',
'names': ['glFramebufferTexture2DEXT', 'glFramebufferTexture2D'],
'arguments':
'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, '
'GLint level', },
{ 'return_type': 'void',
'names': ['glFramebufferTexture2DMultisampleEXT'],
'arguments':
'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, '
'GLint level, GLsizei samples', },
{ 'return_type': 'void',
'names': ['glFramebufferTexture2DMultisampleIMG'],
'arguments':
'GLenum target, GLenum attachment, GLenum textarget, GLuint texture, '
'GLint level, GLsizei samples', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glFramebufferTextureLayer' }],
'arguments': 'GLenum target, GLenum attachment, GLuint texture, GLint level, '
'GLint layer', },
{ 'return_type': 'void',
'names': ['glFrontFace'],
'arguments': 'GLenum mode', },
{ 'return_type': 'void',
'names': ['glGenBuffers'],
'known_as': 'glGenBuffersARB',
'arguments': 'GLsizei n, GLuint* buffers', },
{ 'return_type': 'void',
'names': ['glGenerateMipmapEXT', 'glGenerateMipmap'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'known_as': 'glGenFencesAPPLE',
'versions': [{ 'name': 'glGenFencesAPPLE',
'extensions': ['GL_APPLE_fence'] }],
'arguments': 'GLsizei n, GLuint* fences', },
{ 'return_type': 'void',
'names': ['glGenFencesNV'],
'arguments': 'GLsizei n, GLuint* fences', },
{ 'return_type': 'void',
'names': ['glGenFramebuffersEXT', 'glGenFramebuffers'],
'arguments': 'GLsizei n, GLuint* framebuffers', },
{ 'return_type': 'GLuint',
'names': ['glGenPathsNV'],
'arguments': 'GLsizei range' },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGenQueries' },
{ 'name': 'glGenQueriesARB', },
{ 'name' : 'glGenQueriesEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLsizei n, GLuint* ids', },
{ 'return_type': 'void',
'names': ['glGenRenderbuffersEXT', 'glGenRenderbuffers'],
'arguments': 'GLsizei n, GLuint* renderbuffers', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGenSamplers' }],
'arguments': 'GLsizei n, GLuint* samplers', },
{ 'return_type': 'void',
'names': ['glGenTextures'],
'arguments': 'GLsizei n, GLuint* textures', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGenTransformFeedbacks' }],
'arguments': 'GLsizei n, GLuint* ids', },
{ 'return_type': 'void',
'known_as': 'glGenVertexArraysOES',
'versions': [{ 'name': 'glGenVertexArrays',
'extensions': ['GL_ARB_vertex_array_object'], },
{ 'name': 'glGenVertexArraysOES' },
{ 'name': 'glGenVertexArraysAPPLE',
'extensions': ['GL_APPLE_vertex_array_object'] }],
'arguments': 'GLsizei n, GLuint* arrays', },
{ 'return_type': 'void',
'names': ['glGetActiveAttrib'],
'arguments':
'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, '
'GLint* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'names': ['glGetActiveUniform'],
'arguments':
'GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, '
'GLint* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetActiveUniformBlockiv' }],
'arguments': 'GLuint program, GLuint uniformBlockIndex, GLenum pname, '
'GLint* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetActiveUniformBlockName' }],
'arguments': 'GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, '
'GLsizei* length, char* uniformBlockName', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetActiveUniformsiv' }],
'arguments': 'GLuint program, GLsizei uniformCount, '
'const GLuint* uniformIndices, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetAttachedShaders'],
'arguments':
'GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders', },
{ 'return_type': 'GLint',
'names': ['glGetAttribLocation'],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetBooleanv'],
'arguments': 'GLenum pname, GLboolean* params', },
{ 'return_type': 'void',
'names': ['glGetBufferParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'GLenum',
'names': ['glGetError'],
'arguments': 'void',
'logging_code': """
GL_SERVICE_LOG("GL_RESULT: " << GLEnums::GetStringError(result));
""", },
{ 'return_type': 'void',
'names': ['glGetFenceivNV'],
'arguments': 'GLuint fence, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetFloatv'],
'arguments': 'GLenum pname, GLfloat* params', },
{ 'return_type': 'GLint',
'versions': [{ 'name': 'glGetFragDataLocation' }],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetFramebufferAttachmentParameterivEXT',
'glGetFramebufferAttachmentParameteriv'],
'arguments': 'GLenum target, '
'GLenum attachment, GLenum pname, GLint* params', },
{ 'return_type': 'GLenum',
'names': ['glGetGraphicsResetStatusARB',
'glGetGraphicsResetStatusKHR',
'glGetGraphicsResetStatusEXT',
'glGetGraphicsResetStatus'],
'arguments': 'void', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetInteger64i_v' }],
'arguments': 'GLenum target, GLuint index, GLint64* data', },
{ 'return_type': 'void',
'names': ['glGetInteger64v'],
'arguments': 'GLenum pname, GLint64* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetIntegeri_v' }],
'arguments': 'GLenum target, GLuint index, GLint* data', },
{ 'return_type': 'void',
'names': ['glGetIntegerv'],
'arguments': 'GLenum pname, GLint* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetInternalformativ' }],
'arguments': 'GLenum target, GLenum internalformat, GLenum pname, '
'GLsizei bufSize, GLint* params', },
{ 'return_type': 'void',
'known_as': 'glGetProgramBinary',
'versions': [{ 'name': 'glGetProgramBinaryOES' },
{ 'name': 'glGetProgramBinary',
'extensions': ['GL_ARB_get_program_binary'] }],
'arguments': 'GLuint program, GLsizei bufSize, GLsizei* length, '
'GLenum* binaryFormat, GLvoid* binary' },
{ 'return_type': 'void',
'names': ['glGetProgramInfoLog'],
'arguments':
'GLuint program, GLsizei bufsize, GLsizei* length, char* infolog', },
{ 'return_type': 'void',
'versions': [{'name': 'glGetProgramInterfaceiv',
'extensions': ['GL_ARB_program_interface_query']}],
'arguments': 'GLuint program, GLenum programInterface, GLenum pname, '
'GLint* params'},
{ 'return_type': 'void',
'names': ['glGetProgramiv'],
'arguments': 'GLuint program, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'versions': [{'name': 'glGetProgramResourceiv',
'extensions': ['GL_ARB_program_interface_query']}],
'arguments': 'GLuint program, GLenum programInterface, GLuint index, '
'GLsizei propCount, const GLenum* props, GLsizei bufSize, '
'GLsizei* length, GLint* params'},
{ 'return_type': 'GLint',
'names': ['glGetProgramResourceLocation'],
'arguments': 'GLuint program, GLenum programInterface, const char* name', },
{ 'return_type': 'void',
'versions': [{'name': 'glGetProgramResourceName',
'extensions': ['GL_ARB_program_interface_query']}],
'arguments': 'GLuint program, GLenum programInterface, GLuint index, '
'GLsizei bufSize, GLsizei* length, GLchar* name'},
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetQueryiv' },
{ 'name': 'glGetQueryivARB' },
{ 'name': 'glGetQueryivEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetQueryObjecti64v',
'extensions': ['GL_ARB_timer_query'] },
{ 'name': 'glGetQueryObjecti64vEXT' }],
'arguments': 'GLuint id, GLenum pname, GLint64* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetQueryObjectiv' },
{ 'name': 'glGetQueryObjectivARB' },
{ 'name': 'glGetQueryObjectivEXT' }],
'arguments': 'GLuint id, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetQueryObjectui64v',
'extensions': ['GL_ARB_timer_query'] },
{ 'name': 'glGetQueryObjectui64vEXT' }],
'arguments': 'GLuint id, GLenum pname, GLuint64* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetQueryObjectuiv' },
{ 'name': 'glGetQueryObjectuivARB' },
{ 'name': 'glGetQueryObjectuivEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLuint id, GLenum pname, GLuint* params', },
{ 'return_type': 'void',
'names': ['glGetRenderbufferParameterivEXT', 'glGetRenderbufferParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetSamplerParameterfv' }],
'arguments': 'GLuint sampler, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetSamplerParameteriv' }],
'arguments': 'GLuint sampler, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetShaderInfoLog'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* infolog', },
{ 'return_type': 'void',
'names': ['glGetShaderiv'],
'arguments': 'GLuint shader, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetShaderPrecisionFormat'],
'arguments': 'GLenum shadertype, GLenum precisiontype, '
'GLint* range, GLint* precision', },
{ 'return_type': 'void',
'names': ['glGetShaderSource'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', },
{ 'return_type': 'const GLubyte*',
'names': ['glGetString'],
'arguments': 'GLenum name', },
{ 'return_type': 'const GLubyte*',
# This is needed for bootstrapping on the desktop GL core profile.
# It won't be called unless the expected GL version is used.
'versions': [{ 'name': 'glGetStringi',
'extensions': ['GL_CHROMIUM_glgetstringi_hack'] }],
'arguments': 'GLenum name, GLuint index', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetSynciv',
'extensions': ['GL_ARB_sync'] }],
'arguments':
'GLsync sync, GLenum pname, GLsizei bufSize, GLsizei* length,'
'GLint* values', },
{ 'return_type': 'void',
'names': ['glGetTexLevelParameterfv'],
'arguments': 'GLenum target, GLint level, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetTexLevelParameteriv'],
'arguments': 'GLenum target, GLint level, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetTexParameterfv'],
'arguments': 'GLenum target, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetTexParameteriv'],
'arguments': 'GLenum target, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetTransformFeedbackVarying' }],
'arguments': 'GLuint program, GLuint index, GLsizei bufSize, '
'GLsizei* length, GLsizei* size, GLenum* type, char* name', },
{ 'return_type': 'void',
'names': ['glGetTranslatedShaderSourceANGLE'],
'arguments':
'GLuint shader, GLsizei bufsize, GLsizei* length, char* source', },
{ 'return_type': 'GLuint',
'versions': [{ 'name': 'glGetUniformBlockIndex' }],
'arguments': 'GLuint program, const char* uniformBlockName', },
{ 'return_type': 'void',
'names': ['glGetUniformfv'],
'arguments': 'GLuint program, GLint location, GLfloat* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glGetUniformIndices' }],
'arguments': 'GLuint program, GLsizei uniformCount, '
'const char* const* uniformNames, GLuint* uniformIndices', },
{ 'return_type': 'void',
'names': ['glGetUniformiv'],
'arguments': 'GLuint program, GLint location, GLint* params', },
{ 'return_type': 'GLint',
'names': ['glGetUniformLocation'],
'arguments': 'GLuint program, const char* name', },
{ 'return_type': 'void',
'names': ['glGetUniformuiv'],
'arguments': 'GLuint program, GLint location, GLuint* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribfv'],
'arguments': 'GLuint index, GLenum pname, GLfloat* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribiv'],
'arguments': 'GLuint index, GLenum pname, GLint* params', },
{ 'return_type': 'void',
'names': ['glGetVertexAttribPointerv'],
'arguments': 'GLuint index, GLenum pname, void** pointer', },
{ 'return_type': 'void',
'names': ['glHint'],
'arguments': 'GLenum target, GLenum mode', },
{ 'return_type': 'void',
'names': ['glInsertEventMarkerEXT'],
'arguments': 'GLsizei length, const char* marker', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glInvalidateFramebuffer' }],
'arguments': 'GLenum target, GLsizei numAttachments, '
'const GLenum* attachments' },
{ 'return_type': 'void',
'versions': [{ 'name': 'glInvalidateSubFramebuffer' }],
'arguments':
'GLenum target, GLsizei numAttachments, const GLenum* attachments, '
'GLint x, GLint y, GLint width, GLint height', },
{ 'return_type': 'GLboolean',
'names': ['glIsBuffer'],
'arguments': 'GLuint buffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsEnabled'],
'arguments': 'GLenum cap', },
{ 'return_type': 'GLboolean',
'known_as': 'glIsFenceAPPLE',
'versions': [{ 'name': 'glIsFenceAPPLE',
'extensions': ['GL_APPLE_fence'] }],
'arguments': 'GLuint fence', },
{ 'return_type': 'GLboolean',
'names': ['glIsFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'GLboolean',
'names': ['glIsFramebufferEXT', 'glIsFramebuffer'],
'arguments': 'GLuint framebuffer', },
{ 'return_type': 'GLboolean',
'names': ['glIsPathNV'],
'arguments': 'GLuint path' },
{ 'return_type': 'GLboolean',
'names': ['glIsProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'GLboolean',
'versions': [{ 'name': 'glIsQuery' },
{ 'name': 'glIsQueryARB' },
{ 'name': 'glIsQueryEXT',
'extensions': ['GL_EXT_occlusion_query_boolean'] }],
'arguments': 'GLuint query', },
{ 'return_type': 'GLboolean',
'names': ['glIsRenderbufferEXT', 'glIsRenderbuffer'],
'arguments': 'GLuint renderbuffer', },
{ 'return_type': 'GLboolean',
'versions': [{ 'name': 'glIsSampler' }],
'arguments': 'GLuint sampler', },
{ 'return_type': 'GLboolean',
'names': ['glIsShader'],
'arguments': 'GLuint shader', },
{ 'return_type': 'GLboolean',
'versions': [{ 'name': 'glIsSync',
'extensions': ['GL_ARB_sync'] }],
'arguments': 'GLsync sync', },
{ 'return_type': 'GLboolean',
'names': ['glIsTexture'],
'arguments': 'GLuint texture', },
{ 'return_type': 'GLboolean',
'versions': [{ 'name': 'glIsTransformFeedback' }],
'arguments': 'GLuint id', },
{ 'return_type': 'GLboolean',
'known_as': 'glIsVertexArrayOES',
'versions': [{ 'name': 'glIsVertexArray',
'extensions': ['GL_ARB_vertex_array_object'], },
{ 'name': 'glIsVertexArrayOES' },
{ 'name': 'glIsVertexArrayAPPLE',
'extensions': ['GL_APPLE_vertex_array_object'] }],
'arguments': 'GLuint array' },
{ 'return_type': 'void',
'names': ['glLineWidth'],
'arguments': 'GLfloat width', },
{ 'return_type': 'void',
'names': ['glLinkProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void*',
'known_as': 'glMapBuffer',
'names': ['glMapBufferOES', 'glMapBuffer'],
'arguments': 'GLenum target, GLenum access', },
{ 'return_type': 'void*',
'known_as': 'glMapBufferRange',
'versions': [{ 'name': 'glMapBufferRange',
'extensions': ['GL_ARB_map_buffer_range'] },
{ 'name': 'glMapBufferRangeEXT',
'extensions': ['GL_EXT_map_buffer_range'] }],
'arguments':
'GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access', },
{ 'return_type': 'void',
'known_as': 'glMatrixLoadfEXT',
'versions': [{ 'name': 'glMatrixLoadfEXT',
'extensions': ['GL_EXT_direct_state_access',
'GL_NV_path_rendering'] }],
'arguments': 'GLenum matrixMode, const GLfloat* m' },
{ 'return_type': 'void',
'known_as': 'glMatrixLoadIdentityEXT',
'versions': [{ 'name': 'glMatrixLoadIdentityEXT',
'extensions': ['GL_EXT_direct_state_access',
'GL_NV_path_rendering'] },],
'arguments': 'GLenum matrixMode' },
{ 'return_type': 'void',
'names': ['glPathCommandsNV'],
'arguments': 'GLuint path, GLsizei numCommands, const GLubyte* commands, '
'GLsizei numCoords, GLenum coordType, const GLvoid* coords' },
{ 'return_type': 'void',
'names': ['glPathParameterfNV'],
'arguments': 'GLuint path, GLenum pname, GLfloat value' },
{ 'return_type': 'void',
'names': ['glPathParameteriNV'],
'arguments': 'GLuint path, GLenum pname, GLint value' },
{ 'return_type': 'void',
'names': ['glPathStencilFuncNV'],
'arguments': 'GLenum func, GLint ref, GLuint mask' },
{ 'return_type': 'void',
'versions': [{ 'name': 'glPauseTransformFeedback' }],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glPixelStorei'],
'arguments': 'GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glPointParameteri'],
'arguments': 'GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glPolygonOffset'],
'arguments': 'GLfloat factor, GLfloat units', },
{ 'return_type': 'void',
'names': ['glPopGroupMarkerEXT'],
'arguments': 'void', },
{ 'return_type': 'void',
'known_as': 'glProgramBinary',
'versions': [{ 'name': 'glProgramBinaryOES' },
{ 'name': 'glProgramBinary',
'extensions': ['GL_ARB_get_program_binary'] }],
'arguments': 'GLuint program, GLenum binaryFormat, '
'const GLvoid* binary, GLsizei length' },
{ 'return_type': 'void',
'versions': [{ 'name': 'glProgramParameteri',
'extensions': ['GL_ARB_get_program_binary'] }],
'arguments': 'GLuint program, GLenum pname, GLint value' },
{ 'return_type': 'void',
'names': ['glProgramPathFragmentInputGenNV'],
'arguments': 'GLuint program, GLint location, GLenum genMode, '
'GLint components, const GLfloat* coeffs',
'is_optional': True, },
{ 'return_type': 'void',
'names': ['glPushGroupMarkerEXT'],
'arguments': 'GLsizei length, const char* marker', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glQueryCounter',
'extensions': ['GL_ARB_timer_query'] },
{ 'name': 'glQueryCounterEXT' }],
'arguments': 'GLuint id, GLenum target', },
{ 'return_type': 'void',
'names': ['glReadBuffer'],
'arguments': 'GLenum src', },
{ 'return_type': 'void',
'names': ['glReadPixels'],
'arguments':
'GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, '
'GLenum type, void* pixels', },
{ 'return_type': 'void',
'names': ['glReleaseShaderCompiler'],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageEXT', 'glRenderbufferStorage'],
'arguments':
'GLenum target, GLenum internalformat, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisample'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleANGLE'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleEXT'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'names': ['glRenderbufferStorageMultisampleIMG'],
'arguments': 'GLenum target, GLsizei samples, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glResumeTransformFeedback' }],
'arguments': 'void', },
{ 'return_type': 'void',
'names': ['glSampleCoverage'],
'arguments': 'GLclampf value, GLboolean invert', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glSamplerParameterf' }],
'arguments': 'GLuint sampler, GLenum pname, GLfloat param', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glSamplerParameterfv' }],
'arguments': 'GLuint sampler, GLenum pname, const GLfloat* params', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glSamplerParameteri' }],
'arguments': 'GLuint sampler, GLenum pname, GLint param', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glSamplerParameteriv' }],
'arguments': 'GLuint sampler, GLenum pname, const GLint* params', },
{ 'return_type': 'void',
'names': ['glScissor'],
'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'known_as': 'glSetFenceAPPLE',
'versions': [{ 'name': 'glSetFenceAPPLE',
'extensions': ['GL_APPLE_fence'] }],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glSetFenceNV'],
'arguments': 'GLuint fence, GLenum condition', },
{ 'return_type': 'void',
'names': ['glShaderBinary'],
'arguments': 'GLsizei n, const GLuint* shaders, GLenum binaryformat, '
'const void* binary, GLsizei length', },
{ 'return_type': 'void',
'names': ['glShaderSource'],
'arguments': 'GLuint shader, GLsizei count, const char* const* str, '
'const GLint* length',
'logging_code': """
GL_SERVICE_LOG_CODE_BLOCK({
for (GLsizei ii = 0; ii < count; ++ii) {
if (str[ii]) {
if (length && length[ii] >= 0) {
std::string source(str[ii], length[ii]);
GL_SERVICE_LOG(" " << ii << ": ---\\n" << source << "\\n---");
} else {
GL_SERVICE_LOG(" " << ii << ": ---\\n" << str[ii] << "\\n---");
}
} else {
GL_SERVICE_LOG(" " << ii << ": NULL");
}
}
});
""", },
{ 'return_type': 'void',
'names': ['glStencilFillPathInstancedNV'],
'arguments': 'GLsizei numPaths, GLenum pathNameType, const void* paths, '
'GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, '
'const GLfloat* transformValues' },
{ 'return_type': 'void',
'names': ['glStencilFillPathNV'],
'arguments': 'GLuint path, GLenum fillMode, GLuint mask' },
{ 'return_type': 'void',
'names': ['glStencilFunc'],
'arguments': 'GLenum func, GLint ref, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilFuncSeparate'],
'arguments': 'GLenum face, GLenum func, GLint ref, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilMask'],
'arguments': 'GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilMaskSeparate'],
'arguments': 'GLenum face, GLuint mask', },
{ 'return_type': 'void',
'names': ['glStencilOp'],
'arguments': 'GLenum fail, GLenum zfail, GLenum zpass', },
{ 'return_type': 'void',
'names': ['glStencilOpSeparate'],
'arguments': 'GLenum face, GLenum fail, GLenum zfail, GLenum zpass', },
{ 'return_type': 'void',
'names': ['glStencilStrokePathInstancedNV'],
'arguments': 'GLsizei numPaths, GLenum pathNameType, const void* paths, '
'GLuint pathBase, GLint ref, GLuint mask, GLenum transformType, '
'const GLfloat* transformValues' },
{ 'return_type': 'void',
'names': ['glStencilStrokePathNV'],
'arguments': 'GLuint path, GLint reference, GLuint mask' },
{ 'return_type': 'void',
'names': ['glStencilThenCoverFillPathInstancedNV'],
'arguments': 'GLsizei numPaths, GLenum pathNameType, const void* paths, '
'GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, '
'GLenum transformType, const GLfloat* transformValues',
'is_optional': True, },
{ 'return_type': 'void',
'names': ['glStencilThenCoverFillPathNV'],
'arguments': 'GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode',
'is_optional': True, },
{ 'return_type': 'void',
'names': ['glStencilThenCoverStrokePathInstancedNV'],
'arguments': 'GLsizei numPaths, GLenum pathNameType, const void* paths, '
'GLuint pathBase, GLint ref, GLuint mask, GLenum coverMode, '
'GLenum transformType, const GLfloat* transformValues',
'is_optional': True, },
{ 'return_type': 'void',
'names': ['glStencilThenCoverStrokePathNV'],
'arguments': 'GLuint path, GLint reference, GLuint mask, GLenum coverMode',
'is_optional': True, },
{ 'return_type': 'GLboolean',
'known_as': 'glTestFenceAPPLE',
'versions': [{ 'name': 'glTestFenceAPPLE',
'extensions': ['GL_APPLE_fence'] }],
'arguments': 'GLuint fence', },
{ 'return_type': 'GLboolean',
'names': ['glTestFenceNV'],
'arguments': 'GLuint fence', },
{ 'return_type': 'void',
'names': ['glTexImage2D'],
'arguments':
'GLenum target, GLint level, GLint internalformat, GLsizei width, '
'GLsizei height, GLint border, GLenum format, GLenum type, '
'const void* pixels', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glTexImage3D' }],
'arguments':
'GLenum target, GLint level, GLint internalformat, GLsizei width, '
'GLsizei height, GLsizei depth, GLint border, GLenum format, '
'GLenum type, const void* pixels', },
{ 'return_type': 'void',
'names': ['glTexParameterf'],
'arguments': 'GLenum target, GLenum pname, GLfloat param', },
{ 'return_type': 'void',
'names': ['glTexParameterfv'],
'arguments': 'GLenum target, GLenum pname, const GLfloat* params', },
{ 'return_type': 'void',
'names': ['glTexParameteri'],
'arguments': 'GLenum target, GLenum pname, GLint param', },
{ 'return_type': 'void',
'names': ['glTexParameteriv'],
'arguments': 'GLenum target, GLenum pname, const GLint* params', },
{ 'return_type': 'void',
'known_as': 'glTexStorage2DEXT',
'versions': [{ 'name': 'glTexStorage2D',
'extensions': ['GL_ARB_texture_storage'] },
{ 'name': 'glTexStorage2DEXT',
'extensions': ['GL_EXT_texture_storage'] }],
'arguments': 'GLenum target, GLsizei levels, GLenum internalformat, '
'GLsizei width, GLsizei height', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glTexStorage3D' }],
'arguments': 'GLenum target, GLsizei levels, GLenum internalformat, '
'GLsizei width, GLsizei height, GLsizei depth', },
{ 'return_type': 'void',
'names': ['glTexSubImage2D'],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLsizei width, GLsizei height, GLenum format, GLenum type, '
'const void* pixels', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glTexSubImage3D' }],
'arguments':
'GLenum target, GLint level, GLint xoffset, GLint yoffset, '
'GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, '
'GLenum format, GLenum type, const void* pixels', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glTransformFeedbackVaryings' }],
'arguments': 'GLuint program, GLsizei count, const char* const* varyings, '
'GLenum bufferMode', },
{ 'return_type': 'void',
'names': ['glUniform1f'],
'arguments': 'GLint location, GLfloat x', },
{ 'return_type': 'void',
'names': ['glUniform1fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform1i'],
'arguments': 'GLint location, GLint x', },
{ 'return_type': 'void',
'names': ['glUniform1iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform1ui' }],
'arguments': 'GLint location, GLuint v0', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform1uiv' }],
'arguments': 'GLint location, GLsizei count, const GLuint* v', },
{ 'return_type': 'void',
'names': ['glUniform2f'],
'arguments': 'GLint location, GLfloat x, GLfloat y', },
{ 'return_type': 'void',
'names': ['glUniform2fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform2i'],
'arguments': 'GLint location, GLint x, GLint y', },
{ 'return_type': 'void',
'names': ['glUniform2iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform2ui' }],
'arguments': 'GLint location, GLuint v0, GLuint v1', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform2uiv' }],
'arguments': 'GLint location, GLsizei count, const GLuint* v', },
{ 'return_type': 'void',
'names': ['glUniform3f'],
'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z', },
{ 'return_type': 'void',
'names': ['glUniform3fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform3i'],
'arguments': 'GLint location, GLint x, GLint y, GLint z', },
{ 'return_type': 'void',
'names': ['glUniform3iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform3ui' }],
'arguments': 'GLint location, GLuint v0, GLuint v1, GLuint v2', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform3uiv' }],
'arguments': 'GLint location, GLsizei count, const GLuint* v', },
{ 'return_type': 'void',
'names': ['glUniform4f'],
'arguments': 'GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w', },
{ 'return_type': 'void',
'names': ['glUniform4fv'],
'arguments': 'GLint location, GLsizei count, const GLfloat* v', },
{ 'return_type': 'void',
'names': ['glUniform4i'],
'arguments': 'GLint location, GLint x, GLint y, GLint z, GLint w', },
{ 'return_type': 'void',
'names': ['glUniform4iv'],
'arguments': 'GLint location, GLsizei count, const GLint* v', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform4ui' }],
'arguments': 'GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniform4uiv' }],
'arguments': 'GLint location, GLsizei count, const GLuint* v', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformBlockBinding' }],
'arguments': 'GLuint program, GLuint uniformBlockIndex, '
'GLuint uniformBlockBinding', },
{ 'return_type': 'void',
'names': ['glUniformMatrix2fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformMatrix2x3fv' }],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformMatrix2x4fv' }],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'names': ['glUniformMatrix3fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformMatrix3x2fv' }],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformMatrix3x4fv' }],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'names': ['glUniformMatrix4fv'],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformMatrix4x2fv' }],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glUniformMatrix4x3fv' }],
'arguments': 'GLint location, GLsizei count, '
'GLboolean transpose, const GLfloat* value', },
{ 'return_type': 'GLboolean',
'known_as': 'glUnmapBuffer',
'names': ['glUnmapBufferOES', 'glUnmapBuffer'],
'arguments': 'GLenum target', },
{ 'return_type': 'void',
'names': ['glUseProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glValidateProgram'],
'arguments': 'GLuint program', },
{ 'return_type': 'void',
'names': ['glVertexAttrib1f'],
'arguments': 'GLuint indx, GLfloat x', },
{ 'return_type': 'void',
'names': ['glVertexAttrib1fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib2f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y', },
{ 'return_type': 'void',
'names': ['glVertexAttrib2fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib3f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z', },
{ 'return_type': 'void',
'names': ['glVertexAttrib3fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'names': ['glVertexAttrib4f'],
'arguments': 'GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w', },
{ 'return_type': 'void',
'names': ['glVertexAttrib4fv'],
'arguments': 'GLuint indx, const GLfloat* values', },
{ 'return_type': 'void',
'known_as': 'glVertexAttribDivisorANGLE',
'names': ['glVertexAttribDivisorARB', 'glVertexAttribDivisorANGLE',
'glVertexAttribDivisor'],
'arguments':
'GLuint index, GLuint divisor', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glVertexAttribI4i' }],
'arguments': 'GLuint indx, GLint x, GLint y, GLint z, GLint w', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glVertexAttribI4iv' }],
'arguments': 'GLuint indx, const GLint* values', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glVertexAttribI4ui' }],
'arguments': 'GLuint indx, GLuint x, GLuint y, GLuint z, GLuint w', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glVertexAttribI4uiv' }],
'arguments': 'GLuint indx, const GLuint* values', },
{ 'return_type': 'void',
'versions': [{ 'name': 'glVertexAttribIPointer' }],
'arguments': 'GLuint indx, GLint size, GLenum type, GLsizei stride, '
'const void* ptr', },
{ 'return_type': 'void',
'names': ['glVertexAttribPointer'],
'arguments': 'GLuint indx, GLint size, GLenum type, GLboolean normalized, '
'GLsizei stride, const void* ptr', },
{ 'return_type': 'void',
'names': ['glViewport'],
'arguments': 'GLint x, GLint y, GLsizei width, GLsizei height', },
{ 'return_type': 'GLenum',
'versions': [{ 'name': 'glWaitSync',
'extensions': ['GL_ARB_sync'] }],
'arguments':
'GLsync sync, GLbitfield flags, GLuint64 timeout', },
]
OSMESA_FUNCTIONS = [
{ 'return_type': 'void',
'names': ['OSMesaColorClamp'],
'arguments': 'GLboolean enable', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaCreateContext'],
'arguments': 'GLenum format, OSMesaContext sharelist', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaCreateContextExt'],
'arguments':
'GLenum format, GLint depthBits, GLint stencilBits, GLint accumBits, '
'OSMesaContext sharelist', },
{ 'return_type': 'void',
'names': ['OSMesaDestroyContext'],
'arguments': 'OSMesaContext ctx', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaGetColorBuffer'],
'arguments': 'OSMesaContext c, GLint* width, GLint* height, GLint* format, '
'void** buffer', },
{ 'return_type': 'OSMesaContext',
'names': ['OSMesaGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaGetDepthBuffer'],
'arguments':
'OSMesaContext c, GLint* width, GLint* height, GLint* bytesPerValue, '
'void** buffer', },
{ 'return_type': 'void',
'names': ['OSMesaGetIntegerv'],
'arguments': 'GLint pname, GLint* value', },
{ 'return_type': 'OSMESAproc',
'names': ['OSMesaGetProcAddress'],
'arguments': 'const char* funcName', },
{ 'return_type': 'GLboolean',
'names': ['OSMesaMakeCurrent'],
'arguments': 'OSMesaContext ctx, void* buffer, GLenum type, GLsizei width, '
'GLsizei height', },
{ 'return_type': 'void',
'names': ['OSMesaPixelStore'],
'arguments': 'GLint pname, GLint value', },
]
EGL_FUNCTIONS = [
{ 'return_type': 'EGLBoolean',
'names': ['eglBindAPI'],
'arguments': 'EGLenum api', },
{ 'return_type': 'EGLBoolean',
'names': ['eglBindTexImage'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', },
{ 'return_type': 'EGLBoolean',
'names': ['eglChooseConfig'],
'arguments': 'EGLDisplay dpy, const EGLint* attrib_list, EGLConfig* configs, '
'EGLint config_size, EGLint* num_config', },
{ 'return_type': 'EGLint',
'versions': [{ 'name': 'eglClientWaitSyncKHR',
'extensions': [
'EGL_KHR_fence_sync',
'GL_CHROMIUM_egl_khr_fence_sync_hack'
] }],
'arguments': 'EGLDisplay dpy, EGLSyncKHR sync, EGLint flags, '
'EGLTimeKHR timeout' },
{ 'return_type': 'EGLBoolean',
'names': ['eglCopyBuffers'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLNativePixmapType target', },
{ 'return_type': 'EGLContext',
'names': ['eglCreateContext'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLContext share_context, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLImageKHR',
'versions': [{ 'name': 'eglCreateImageKHR',
'extensions':
['EGL_KHR_image_base', 'EGL_KHR_gl_texture_2D_image'] }],
'arguments':
'EGLDisplay dpy, EGLContext ctx, EGLenum target, EGLClientBuffer buffer, '
'const EGLint* attrib_list' },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePbufferFromClientBuffer'],
'arguments':
'EGLDisplay dpy, EGLenum buftype, void* buffer, EGLConfig config, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePbufferSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, const EGLint* attrib_list', },
{ 'return_type': 'EGLSurface',
'names': ['eglCreatePixmapSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativePixmapType pixmap, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLSyncKHR',
'versions': [{ 'name': 'eglCreateSyncKHR',
'extensions': [
'EGL_KHR_fence_sync',
'GL_CHROMIUM_egl_khr_fence_sync_hack'
] }],
'arguments': 'EGLDisplay dpy, EGLenum type, const EGLint* attrib_list' },
{ 'return_type': 'EGLSurface',
'names': ['eglCreateWindowSurface'],
'arguments': 'EGLDisplay dpy, EGLConfig config, EGLNativeWindowType win, '
'const EGLint* attrib_list', },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroyContext'],
'arguments': 'EGLDisplay dpy, EGLContext ctx', },
{ 'return_type': 'EGLBoolean',
'versions': [{ 'name' : 'eglDestroyImageKHR',
'extensions': ['EGL_KHR_image_base'] }],
'arguments': 'EGLDisplay dpy, EGLImageKHR image' },
{ 'return_type': 'EGLBoolean',
'names': ['eglDestroySurface'],
'arguments': 'EGLDisplay dpy, EGLSurface surface', },
{ 'return_type': 'EGLBoolean',
'versions': [{ 'name': 'eglDestroySyncKHR',
'extensions': [
'EGL_KHR_fence_sync',
'GL_CHROMIUM_egl_khr_fence_sync_hack'
] }],
'arguments': 'EGLDisplay dpy, EGLSyncKHR sync' },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetConfigAttrib'],
'arguments':
'EGLDisplay dpy, EGLConfig config, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetConfigs'],
'arguments': 'EGLDisplay dpy, EGLConfig* configs, EGLint config_size, '
'EGLint* num_config', },
{ 'return_type': 'EGLContext',
'names': ['eglGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'EGLDisplay',
'names': ['eglGetCurrentDisplay'],
'arguments': 'void', },
{ 'return_type': 'EGLSurface',
'names': ['eglGetCurrentSurface'],
'arguments': 'EGLint readdraw', },
{ 'return_type': 'EGLDisplay',
'names': ['eglGetDisplay'],
'arguments': 'EGLNativeDisplayType display_id', },
{ 'return_type': 'EGLint',
'names': ['eglGetError'],
'arguments': 'void', },
{ 'return_type': 'EGLDisplay',
'known_as': 'eglGetPlatformDisplayEXT',
'versions': [{ 'name': 'eglGetPlatformDisplayEXT',
'client_extensions': ['EGL_EXT_platform_base'], }],
'arguments': 'EGLenum platform, void* native_display, '
'const EGLint* attrib_list', },
{ 'return_type': '__eglMustCastToProperFunctionPointerType',
'names': ['eglGetProcAddress'],
'arguments': 'const char* procname', },
{ 'return_type': 'EGLBoolean',
'versions': [{ 'name': 'eglGetSyncAttribKHR',
'extensions': [
'EGL_KHR_fence_sync',
'GL_CHROMIUM_egl_khr_fence_sync_hack'
] }],
'arguments': 'EGLDisplay dpy, EGLSyncKHR sync, EGLint attribute, '
'EGLint* value' },
{ 'return_type': 'EGLBoolean',
'names': ['eglGetSyncValuesCHROMIUM'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, '
'EGLuint64CHROMIUM* ust, EGLuint64CHROMIUM* msc, '
'EGLuint64CHROMIUM* sbc', },
{ 'return_type': 'EGLBoolean',
'names': ['eglInitialize'],
'arguments': 'EGLDisplay dpy, EGLint* major, EGLint* minor', },
{ 'return_type': 'EGLBoolean',
'names': ['eglMakeCurrent'],
'arguments':
'EGLDisplay dpy, EGLSurface draw, EGLSurface read, EGLContext ctx', },
{ 'return_type': 'EGLBoolean',
'names': ['eglPostSubBufferNV'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, '
'EGLint x, EGLint y, EGLint width, EGLint height', },
{ 'return_type': 'EGLenum',
'names': ['eglQueryAPI'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQueryContext'],
'arguments':
'EGLDisplay dpy, EGLContext ctx, EGLint attribute, EGLint* value', },
{ 'return_type': 'const char*',
'names': ['eglQueryString'],
'arguments': 'EGLDisplay dpy, EGLint name', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQuerySurface'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint* value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglQuerySurfacePointerANGLE'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, void** value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglReleaseTexImage'],
'arguments': 'EGLDisplay dpy, EGLSurface surface, EGLint buffer', },
{ 'return_type': 'EGLBoolean',
'names': ['eglReleaseThread'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSurfaceAttrib'],
'arguments':
'EGLDisplay dpy, EGLSurface surface, EGLint attribute, EGLint value', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSwapBuffers'],
'arguments': 'EGLDisplay dpy, EGLSurface surface', },
{ 'return_type': 'EGLBoolean',
'names': ['eglSwapInterval'],
'arguments': 'EGLDisplay dpy, EGLint interval', },
{ 'return_type': 'EGLBoolean',
'names': ['eglTerminate'],
'arguments': 'EGLDisplay dpy', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitClient'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitGL'],
'arguments': 'void', },
{ 'return_type': 'EGLBoolean',
'names': ['eglWaitNative'],
'arguments': 'EGLint engine', },
{ 'return_type': 'EGLint',
'versions': [{ 'name': 'eglWaitSyncKHR',
'extensions': ['EGL_KHR_wait_sync'] }],
'arguments': 'EGLDisplay dpy, EGLSyncKHR sync, EGLint flags' },
]
WGL_FUNCTIONS = [
{ 'return_type': 'BOOL',
'names': ['wglChoosePixelFormatARB'],
'arguments':
'HDC dc, const int* int_attrib_list, const float* float_attrib_list, '
'UINT max_formats, int* formats, UINT* num_formats', },
{ 'return_type': 'BOOL',
'names': ['wglCopyContext'],
'arguments': 'HGLRC hglrcSrc, HGLRC hglrcDst, UINT mask', },
{ 'return_type': 'HGLRC',
'names': ['wglCreateContext'],
'arguments': 'HDC hdc', },
{ 'return_type': 'HGLRC',
'names': ['wglCreateLayerContext'],
'arguments': 'HDC hdc, int iLayerPlane', },
{ 'return_type': 'HPBUFFERARB',
'names': ['wglCreatePbufferARB'],
'arguments': 'HDC hDC, int iPixelFormat, int iWidth, int iHeight, '
'const int* piAttribList', },
{ 'return_type': 'BOOL',
'names': ['wglDeleteContext'],
'arguments': 'HGLRC hglrc', },
{ 'return_type': 'BOOL',
'names': ['wglDestroyPbufferARB'],
'arguments': 'HPBUFFERARB hPbuffer', },
{ 'return_type': 'HGLRC',
'names': ['wglGetCurrentContext'],
'arguments': '', },
{ 'return_type': 'HDC',
'names': ['wglGetCurrentDC'],
'arguments': '', },
{ 'return_type': 'const char*',
'names': ['wglGetExtensionsStringARB'],
'arguments': 'HDC hDC', },
{ 'return_type': 'const char*',
'names': ['wglGetExtensionsStringEXT'],
'arguments': '', },
{ 'return_type': 'HDC',
'names': ['wglGetPbufferDCARB'],
'arguments': 'HPBUFFERARB hPbuffer', },
{ 'return_type': 'BOOL',
'names': ['wglMakeCurrent'],
'arguments': 'HDC hdc, HGLRC hglrc', },
{ 'return_type': 'BOOL',
'names': ['wglQueryPbufferARB'],
'arguments': 'HPBUFFERARB hPbuffer, int iAttribute, int* piValue', },
{ 'return_type': 'int',
'names': ['wglReleasePbufferDCARB'],
'arguments': 'HPBUFFERARB hPbuffer, HDC hDC', },
{ 'return_type': 'BOOL',
'names': ['wglShareLists'],
'arguments': 'HGLRC hglrc1, HGLRC hglrc2', },
{ 'return_type': 'BOOL',
'names': ['wglSwapIntervalEXT'],
'arguments': 'int interval', },
{ 'return_type': 'BOOL',
'names': ['wglSwapLayerBuffers'],
'arguments': 'HDC hdc, UINT fuPlanes', },
]
GLX_FUNCTIONS = [
{ 'return_type': 'void',
'names': ['glXBindTexImageEXT'],
'arguments':
'Display* dpy, GLXDrawable drawable, int buffer, int* attribList', },
{ 'return_type': 'GLXFBConfig*',
'names': ['glXChooseFBConfig'],
'arguments':
'Display* dpy, int screen, const int* attribList, int* nitems', },
{ 'return_type': 'XVisualInfo*',
'names': ['glXChooseVisual'],
'arguments': 'Display* dpy, int screen, int* attribList', },
{ 'return_type': 'void',
'names': ['glXCopyContext'],
'arguments':
'Display* dpy, GLXContext src, GLXContext dst, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXCopySubBufferMESA'],
'arguments': 'Display* dpy, GLXDrawable drawable, '
'int x, int y, int width, int height', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContext'],
'arguments':
'Display* dpy, XVisualInfo* vis, GLXContext shareList, int direct', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateContextAttribsARB'],
'arguments':
'Display* dpy, GLXFBConfig config, GLXContext share_context, int direct, '
'const int* attrib_list', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreateGLXPixmap'],
'arguments': 'Display* dpy, XVisualInfo* visual, Pixmap pixmap', },
{ 'return_type': 'GLXContext',
'names': ['glXCreateNewContext'],
'arguments': 'Display* dpy, GLXFBConfig config, int renderType, '
'GLXContext shareList, int direct', },
{ 'return_type': 'GLXPbuffer',
'names': ['glXCreatePbuffer'],
'arguments': 'Display* dpy, GLXFBConfig config, const int* attribList', },
{ 'return_type': 'GLXPixmap',
'names': ['glXCreatePixmap'],
'arguments': 'Display* dpy, GLXFBConfig config, '
'Pixmap pixmap, const int* attribList', },
{ 'return_type': 'GLXWindow',
'names': ['glXCreateWindow'],
'arguments':
'Display* dpy, GLXFBConfig config, Window win, const int* attribList', },
{ 'return_type': 'void',
'names': ['glXDestroyContext'],
'arguments': 'Display* dpy, GLXContext ctx', },
{ 'return_type': 'void',
'names': ['glXDestroyGLXPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'void',
'names': ['glXDestroyPbuffer'],
'arguments': 'Display* dpy, GLXPbuffer pbuf', },
{ 'return_type': 'void',
'names': ['glXDestroyPixmap'],
'arguments': 'Display* dpy, GLXPixmap pixmap', },
{ 'return_type': 'void',
'names': ['glXDestroyWindow'],
'arguments': 'Display* dpy, GLXWindow window', },
{ 'return_type': 'const char*',
'names': ['glXGetClientString'],
'arguments': 'Display* dpy, int name', },
{ 'return_type': 'int',
'names': ['glXGetConfig'],
'arguments': 'Display* dpy, XVisualInfo* visual, int attrib, int* value', },
{ 'return_type': 'GLXContext',
'names': ['glXGetCurrentContext'],
'arguments': 'void', },
{ 'return_type': 'Display*',
'names': ['glXGetCurrentDisplay'],
'arguments': 'void', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentDrawable'],
'arguments': 'void', },
{ 'return_type': 'GLXDrawable',
'names': ['glXGetCurrentReadDrawable'],
'arguments': 'void', },
{ 'return_type': 'int',
'names': ['glXGetFBConfigAttrib'],
'arguments': 'Display* dpy, GLXFBConfig config, int attribute, int* value', },
{ 'return_type': 'GLXFBConfig',
'names': ['glXGetFBConfigFromVisualSGIX'],
'arguments': 'Display* dpy, XVisualInfo* visualInfo', },
{ 'return_type': 'GLXFBConfig*',
'names': ['glXGetFBConfigs'],
'arguments': 'Display* dpy, int screen, int* nelements', },
{ 'return_type': 'bool',
'names': ['glXGetMscRateOML'],
'arguments':
'Display* dpy, GLXDrawable drawable, int32* numerator, '
'int32* denominator' },
{ 'return_type': 'void',
'names': ['glXGetSelectedEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long* mask', },
{ 'return_type': 'bool',
'names': ['glXGetSyncValuesOML'],
'arguments':
'Display* dpy, GLXDrawable drawable, int64* ust, int64* msc, '
'int64* sbc' },
{ 'return_type': 'XVisualInfo*',
'names': ['glXGetVisualFromFBConfig'],
'arguments': 'Display* dpy, GLXFBConfig config', },
{ 'return_type': 'int',
'names': ['glXIsDirect'],
'arguments': 'Display* dpy, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXMakeContextCurrent'],
'arguments':
'Display* dpy, GLXDrawable draw, GLXDrawable read, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXMakeCurrent'],
'arguments': 'Display* dpy, GLXDrawable drawable, GLXContext ctx', },
{ 'return_type': 'int',
'names': ['glXQueryContext'],
'arguments': 'Display* dpy, GLXContext ctx, int attribute, int* value', },
{ 'return_type': 'void',
'names': ['glXQueryDrawable'],
'arguments':
'Display* dpy, GLXDrawable draw, int attribute, unsigned int* value', },
{ 'return_type': 'int',
'names': ['glXQueryExtension'],
'arguments': 'Display* dpy, int* errorb, int* event', },
{ 'return_type': 'const char*',
'names': ['glXQueryExtensionsString'],
'arguments': 'Display* dpy, int screen', },
{ 'return_type': 'const char*',
'names': ['glXQueryServerString'],
'arguments': 'Display* dpy, int screen, int name', },
{ 'return_type': 'int',
'names': ['glXQueryVersion'],
'arguments': 'Display* dpy, int* maj, int* min', },
{ 'return_type': 'void',
'names': ['glXReleaseTexImageEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int buffer', },
{ 'return_type': 'void',
'names': ['glXSelectEvent'],
'arguments': 'Display* dpy, GLXDrawable drawable, unsigned long mask', },
{ 'return_type': 'void',
'names': ['glXSwapBuffers'],
'arguments': 'Display* dpy, GLXDrawable drawable', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalEXT'],
'arguments': 'Display* dpy, GLXDrawable drawable, int interval', },
{ 'return_type': 'void',
'names': ['glXSwapIntervalMESA'],
'arguments': 'unsigned int interval', },
{ 'return_type': 'void',
'names': ['glXUseXFont'],
'arguments': 'Font font, int first, int count, int list', },
{ 'return_type': 'void',
'names': ['glXWaitGL'],
'arguments': 'void', },
{ 'return_type': 'int',
'names': ['glXWaitVideoSyncSGI'],
'arguments': 'int divisor, int remainder, unsigned int* count', },
{ 'return_type': 'void',
'names': ['glXWaitX'],
'arguments': 'void', },
]
FUNCTION_SETS = [
[GL_FUNCTIONS, 'gl', [
'GL/gl.h',
'noninclude/GL/glext.h',
'GLES2/gl2ext.h',
'GLES3/gl3.h',
'GLES3/gl31.h',
# Files below are Chromium-specific and shipped with Chromium sources.
'GL/glextchromium.h',
'GLES2/gl2chromium.h',
'GLES2/gl2extchromium.h'
], []],
[OSMESA_FUNCTIONS, 'osmesa', [], []],
[EGL_FUNCTIONS, 'egl', [
'EGL/eglext.h',
# Files below are Chromium-specific and shipped with Chromium sources.
'EGL/eglextchromium.h',
],
[
'EGL_ANGLE_d3d_share_handle_client_buffer',
'EGL_ANGLE_surface_d3d_texture_2d_share_handle',
],
],
[WGL_FUNCTIONS, 'wgl', ['noninclude/GL/wglext.h'], []],
[GLX_FUNCTIONS, 'glx', ['GL/glx.h', 'noninclude/GL/glxext.h'], []],
]
GLES2_HEADERS_WITH_ENUMS = [
'GLES2/gl2.h',
'GLES2/gl2ext.h',
'GLES2/gl2chromium.h',
'GLES2/gl2extchromium.h',
'GLES3/gl3.h',
]
SELF_LOCATION = os.path.dirname(os.path.abspath(__file__))
LICENSE_AND_HEADER = """\
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// This file is auto-generated from
// ui/gl/generate_bindings.py
// It's formatted by clang-format using chromium coding style:
// clang-format -i -style=chromium filename
// DO NOT EDIT!
"""
GLVersion = namedtuple('GLVersion', 'is_es major_version minor_version')
def GLVersionBindAlways(version):
return version.major_version <= 2
def GetStaticBinding(func):
"""If this function has a name assigned to it that should be bound always,
then return this name.
This will be the case if either a function name is specified
that depends on an extension from UNCONDITIONALLY_BOUND_EXTENSIONS,
or if the GL version it depends on is assumed to be available (e.g. <=2.1).
There can only be one name that satisfies this condition (or the bindings
would be ambiguous)."""
static_bindings = set([])
for version in func['versions']:
if 'extensions' in version:
extensions = version['extensions']
num_unconditional_extensions = len(
extensions & UNCONDITIONALLY_BOUND_EXTENSIONS)
if num_unconditional_extensions:
static_bindings.add(version['name'])
elif 'gl_versions' in version:
versions = [v for v in version['gl_versions'] if GLVersionBindAlways(v)]
# It's only unconditional if it exists in GL and GLES
if len(versions) == 2:
assert versions[0].is_es != versions[1].is_es
static_bindings.add(version['name'])
else:
static_bindings.add(version['name'])
# Avoid ambiguous bindings (static binding with different names)
assert len(static_bindings) <= 1
if len(static_bindings):
static_name = static_bindings.pop()
# Avoid ambiguous bindings (static and dynamic bindings with
# different names)
assert len([v['name'] for v in func['versions']
if v['name'] != static_name]) == 0, func
return static_name
else:
return None
def GenerateHeader(file, functions, set_name,
used_extensions, used_client_extensions):
"""Generates gl_bindings_autogen_x.h"""
# Write file header.
file.write(LICENSE_AND_HEADER +
"""
#ifndef UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
#define UI_GFX_GL_GL_BINDINGS_AUTOGEN_%(name)s_H_
namespace gfx {
class GLContext;
""" % {'name': set_name.upper()})
# Write typedefs for function pointer types. Always use the GL name for the
# typedef.
file.write('\n')
for func in functions:
file.write('typedef %s (GL_BINDING_CALL *%sProc)(%s);\n' %
(func['return_type'], func['known_as'], func['arguments']))
# Write declarations for booleans indicating which extensions are available.
file.write('\n')
file.write("struct Extensions%s {\n" % set_name.upper())
for extension in sorted(used_client_extensions):
file.write(' bool b_%s;\n' % extension)
for extension in sorted(used_extensions):
file.write(' bool b_%s;\n' % extension)
file.write('};\n')
file.write('\n')
# Write Procs struct.
file.write("struct Procs%s {\n" % set_name.upper())
for func in functions:
file.write(' %sProc %sFn;\n' % (func['known_as'], func['known_as']))
file.write('};\n')
file.write('\n')
# Write Api class.
file.write(
"""class GL_EXPORT %(name)sApi {
public:
%(name)sApi();
virtual ~%(name)sApi();
""" % {'name': set_name.upper()})
for func in functions:
file.write(' virtual %s %sFn(%s) = 0;\n' %
(func['return_type'], func['known_as'], func['arguments']))
file.write('};\n')
file.write('\n')
file.write( '} // namespace gfx\n')
# Write macros to invoke function pointers. Always use the GL name for the
# macro.
file.write('\n')
for func in functions:
file.write('#define %s ::gfx::g_current_%s_context->%sFn\n' %
(func['known_as'], set_name.lower(), func['known_as']))
file.write('\n')
file.write('#endif // UI_GFX_GL_GL_BINDINGS_AUTOGEN_%s_H_\n' %
set_name.upper())
def GenerateAPIHeader(file, functions, set_name):
"""Generates gl_bindings_api_autogen_x.h"""
# Write file header.
file.write(LICENSE_AND_HEADER)
# Write API declaration.
for func in functions:
file.write(' %s %sFn(%s) override;\n' %
(func['return_type'], func['known_as'], func['arguments']))
file.write('\n')
def GenerateMockHeader(file, functions, set_name):
"""Generates gl_mock_autogen_x.h"""
# Write file header.
file.write(LICENSE_AND_HEADER)
# Write API declaration.
for func in functions:
args = func['arguments']
if args == 'void':
args = ''
arg_count = 0
if len(args):
arg_count = func['arguments'].count(',') + 1
# TODO(zmo): crbug.com/456340
# For now gmock supports at most 10 args.
if arg_count <= 10:
file.write(' MOCK_METHOD%d(%s, %s(%s));\n' %
(arg_count, func['known_as'][2:], func['return_type'], args))
else:
file.write(' // TODO(zmo): crbug.com/456340\n')
file.write(' // %s cannot be mocked because it has %d args.\n' %
(func['known_as'], arg_count))
file.write('\n')
def GenerateSource(file, functions, set_name, used_extensions,
used_client_extensions, options):
"""Generates gl_bindings_autogen_x.cc"""
set_header_name = "ui/gl/gl_" + set_name.lower() + "_api_implementation.h"
include_list = [ 'base/trace_event/trace_event.h',
'ui/gl/gl_enums.h',
'ui/gl/gl_bindings.h',
'ui/gl/gl_context.h',
'ui/gl/gl_implementation.h',
'ui/gl/gl_version_info.h',
set_header_name ]
includes_string = "\n".join(["#include \"{0}\"".format(h)
for h in sorted(include_list)])
# Write file header.
file.write(LICENSE_AND_HEADER +
"""
#include <string>
%s
namespace gfx {
""" % includes_string)
file.write('\n')
file.write('static bool g_debugBindingsInitialized;\n')
file.write('Driver%s g_driver_%s;\n' % (set_name.upper(), set_name.lower()))
file.write('\n')
# Write stub functions that take the place of some functions before a context
# is initialized. This is done to provide clear asserts on debug build and to
# avoid crashing in case of a bug on release build.
file.write('\n')
num_dynamic = 0
for func in functions:
static_binding = GetStaticBinding(func)
if static_binding:
func['static_binding'] = static_binding
else:
num_dynamic = num_dynamic + 1
print "[%s] %d static bindings, %d dynamic bindings" % (
set_name, len(functions) - num_dynamic, num_dynamic)
# Write function to initialize the function pointers that are always the same
# and to initialize bindings where choice of the function depends on the
# extension string or the GL version to point to stub functions.
file.write('\n')
file.write('void Driver%s::InitializeStaticBindings() {\n' %
set_name.upper())
def WriteFuncBinding(file, known_as, version_name):
file.write(
' fn.%sFn = reinterpret_cast<%sProc>(GetGLProcAddress("%s"));\n' %
(known_as, known_as, version_name))
for func in functions:
if 'static_binding' in func:
WriteFuncBinding(file, func['known_as'], func['static_binding'])
else:
file.write(' fn.%sFn = 0;\n' % func['known_as'])
def GetGLVersionCondition(gl_version):
if GLVersionBindAlways(gl_version):
if gl_version.is_es:
return 'ver->is_es'
else:
return '!ver->is_es'
elif gl_version.is_es:
return 'ver->IsAtLeastGLES(%du, %du)' % (
gl_version.major_version, gl_version.minor_version)
else:
return 'ver->IsAtLeastGL(%du, %du)' % (
gl_version.major_version, gl_version.minor_version)
def GetBindingCondition(version):
conditions = []
if 'gl_versions' in version:
conditions.extend(
[GetGLVersionCondition(v) for v in version['gl_versions']])
if 'extensions' in version and version['extensions']:
conditions.extend(
['ext.b_%s' % e for e in version['extensions']])
return ' || '.join(conditions)
def WriteConditionalFuncBinding(file, func):
assert len(func['versions']) > 0
known_as = func['known_as']
i = 0
first_version = True
while i < len(func['versions']):
version = func['versions'][i]
cond = GetBindingCondition(version)
if first_version:
file.write(' if (%s) {\n ' % cond)
else:
file.write(' else if (%s) {\n ' % (cond))
WriteFuncBinding(file, known_as, version['name'])
if options.validate_bindings:
if not 'is_optional' in func or not func['is_optional']:
file.write('DCHECK(fn.%sFn);\n' % known_as)
file.write('}\n')
i += 1
first_version = False
# TODO(jmadill): make more robust
def IsClientExtensionFunc(func):
assert len(func['versions']) > 0
if 'client_extensions' in func['versions'][0]:
assert len(func['versions']) == 1
return True
return False
file.write("}\n\n");
if set_name == 'gl':
file.write("""\
void DriverGL::InitializeDynamicBindings(
GLContext* context) {
DCHECK(context && context->IsCurrent(NULL));
const GLVersionInfo* ver = context->GetVersionInfo();
ALLOW_UNUSED_LOCAL(ver);
std::string extensions = context->GetExtensions() + " ";
ALLOW_UNUSED_LOCAL(extensions);
""")
elif set_name == 'egl':
file.write("""\
void DriverEGL::InitializeExtensionBindings() {
std::string client_extensions(GetClientExtensions());
client_extensions += " ";
ALLOW_UNUSED_LOCAL(client_extensions);
""")
else:
file.write("""\
void Driver%s::InitializeExtensionBindings() {
std::string extensions(GetPlatformExtensions());
extensions += " ";
ALLOW_UNUSED_LOCAL(extensions);
""" % (set_name.upper(),))
def OutputExtensionBindings(extension_var, extensions, extension_funcs):
# Extra space at the end of the extension name is intentional,
# it is used as a separator
for extension in extensions:
file.write(' ext.b_%s = %s.find("%s ") != std::string::npos;\n' %
(extension, extension_var, extension))
for func in extension_funcs:
if not 'static_binding' in func:
file.write('\n')
file.write(' debug_fn.%sFn = 0;\n' % func['known_as'])
WriteConditionalFuncBinding(file, func)
OutputExtensionBindings(
'client_extensions',
sorted(used_client_extensions),
[ f for f in functions if IsClientExtensionFunc(f) ])
if set_name == 'egl':
file.write("""\
std::string extensions(GetPlatformExtensions());
extensions += " ";
ALLOW_UNUSED_LOCAL(extensions);
""")
OutputExtensionBindings(
'extensions',
sorted(used_extensions),
[ f for f in functions if not IsClientExtensionFunc(f) ])
# Some new function pointers have been added, so update them in debug bindings
file.write('\n')
file.write(' if (g_debugBindingsInitialized)\n')
file.write(' InitializeDebugBindings();\n')
file.write('}\n')
file.write('\n')
# Write logging wrappers for each function.
file.write('extern "C" {\n')
for func in functions:
return_type = func['return_type']
arguments = func['arguments']
file.write('\n')
file.write('static %s GL_BINDING_CALL Debug_%s(%s) {\n' %
(return_type, func['known_as'], arguments))
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments)
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names)
log_argument_names = re.sub(
r'const char\* ([a-zA-Z0-9_]+)', r'CONSTCHAR_\1', arguments)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\* ([a-zA-Z0-9_]+)',
r'CONSTVOID_\2', log_argument_names)
log_argument_names = re.sub(
r'(?<!E)GLenum ([a-zA-Z0-9_]+)', r'GLenum_\1', log_argument_names)
log_argument_names = re.sub(
r'(?<!E)GLboolean ([a-zA-Z0-9_]+)', r'GLboolean_\1', log_argument_names)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2',
log_argument_names)
log_argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2',
log_argument_names)
log_argument_names = re.sub(
r'CONSTVOID_([a-zA-Z0-9_]+)',
r'static_cast<const void*>(\1)', log_argument_names)
log_argument_names = re.sub(
r'CONSTCHAR_([a-zA-Z0-9_]+)', r'\1', log_argument_names)
log_argument_names = re.sub(
r'GLenum_([a-zA-Z0-9_]+)', r'GLEnums::GetStringEnum(\1)',
log_argument_names)
log_argument_names = re.sub(
r'GLboolean_([a-zA-Z0-9_]+)', r'GLEnums::GetStringBool(\1)',
log_argument_names)
log_argument_names = log_argument_names.replace(',', ' << ", " <<')
if argument_names == 'void' or argument_names == '':
argument_names = ''
log_argument_names = ''
else:
log_argument_names = " << " + log_argument_names
function_name = func['known_as']
if return_type == 'void':
file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' %
(function_name, log_argument_names))
file.write(' g_driver_%s.debug_fn.%sFn(%s);\n' %
(set_name.lower(), function_name, argument_names))
if 'logging_code' in func:
file.write("%s\n" % func['logging_code'])
if options.generate_dchecks and set_name == 'gl':
file.write(' {\n')
file.write(' GLenum error = g_driver_gl.debug_fn.glGetErrorFn();\n')
file.write(' DCHECK(error == 0);\n')
file.write(' }\n')
else:
file.write(' GL_SERVICE_LOG("%s" << "(" %s << ")");\n' %
(function_name, log_argument_names))
file.write(' %s result = g_driver_%s.debug_fn.%sFn(%s);\n' %
(return_type, set_name.lower(), function_name, argument_names))
if 'logging_code' in func:
file.write("%s\n" % func['logging_code'])
else:
file.write(' GL_SERVICE_LOG("GL_RESULT: " << result);\n')
if options.generate_dchecks and set_name == 'gl':
file.write(' {\n')
file.write(' GLenum _error = g_driver_gl.debug_fn.glGetErrorFn();\n')
file.write(' DCHECK(_error == 0);\n')
file.write(' }\n')
file.write(' return result;\n')
file.write('}\n')
file.write('} // extern "C"\n')
# Write function to initialize the debug function pointers.
file.write('\n')
file.write('void Driver%s::InitializeDebugBindings() {\n' %
set_name.upper())
for func in functions:
first_name = func['known_as']
file.write(' if (!debug_fn.%sFn) {\n' % first_name)
file.write(' debug_fn.%sFn = fn.%sFn;\n' % (first_name, first_name))
file.write(' fn.%sFn = Debug_%s;\n' % (first_name, first_name))
file.write(' }\n')
file.write(' g_debugBindingsInitialized = true;\n')
file.write('}\n')
# Write function to clear all function pointers.
file.write('\n')
file.write("""void Driver%s::ClearBindings() {
memset(this, 0, sizeof(*this));
}
""" % set_name.upper())
def MakeArgNames(arguments):
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', arguments)
argument_names = re.sub(
r'(const )?[a-zA-Z0-9_]+\** ([a-zA-Z0-9_]+)', r'\2', argument_names)
if argument_names == 'void' or argument_names == '':
argument_names = ''
return argument_names
# Write GLApiBase functions
for func in functions:
function_name = func['known_as']
return_type = func['return_type']
arguments = func['arguments']
file.write('\n')
file.write('%s %sApiBase::%sFn(%s) {\n' %
(return_type, set_name.upper(), function_name, arguments))
argument_names = MakeArgNames(arguments)
if return_type == 'void':
file.write(' driver_->fn.%sFn(%s);\n' %
(function_name, argument_names))
else:
file.write(' return driver_->fn.%sFn(%s);\n' %
(function_name, argument_names))
file.write('}\n')
# Write TraceGLApi functions
for func in functions:
function_name = func['known_as']
return_type = func['return_type']
arguments = func['arguments']
file.write('\n')
file.write('%s Trace%sApi::%sFn(%s) {\n' %
(return_type, set_name.upper(), function_name, arguments))
argument_names = MakeArgNames(arguments)
file.write(' TRACE_EVENT_BINARY_EFFICIENT0("gpu", "TraceGLAPI::%s")\n' %
function_name)
if return_type == 'void':
file.write(' %s_api_->%sFn(%s);\n' %
(set_name.lower(), function_name, argument_names))
else:
file.write(' return %s_api_->%sFn(%s);\n' %
(set_name.lower(), function_name, argument_names))
file.write('}\n')
# Write NoContextGLApi functions
if set_name.upper() == "GL":
for func in functions:
function_name = func['known_as']
return_type = func['return_type']
arguments = func['arguments']
file.write('\n')
file.write('%s NoContextGLApi::%sFn(%s) {\n' %
(return_type, function_name, arguments))
argument_names = MakeArgNames(arguments)
no_context_error = "Trying to call %s() without current GL context" % function_name
file.write(' NOTREACHED() << "%s";\n' % no_context_error)
file.write(' LOG(ERROR) << "%s";\n' % no_context_error)
default_value = { 'GLenum': 'static_cast<GLenum>(0)',
'GLuint': '0U',
'GLint': '0',
'GLboolean': 'GL_FALSE',
'GLbyte': '0',
'GLubyte': '0',
'GLbutfield': '0',
'GLushort': '0',
'GLsizei': '0',
'GLfloat': '0.0f',
'GLdouble': '0.0',
'GLsync': 'NULL'}
if return_type.endswith('*'):
file.write(' return NULL;\n')
elif return_type != 'void':
file.write(' return %s;\n' % default_value[return_type])
file.write('}\n')
file.write('\n')
file.write('} // namespace gfx\n')
def GetUniquelyNamedFunctions(functions):
uniquely_named_functions = {}
for func in functions:
for version in func['versions']:
uniquely_named_functions[version['name']] = ({
'name': version['name'],
'return_type': func['return_type'],
'arguments': func['arguments'],
'known_as': func['known_as']
})
return uniquely_named_functions
def GenerateMockBindingsHeader(file, functions):
"""Headers for functions that invoke MockGLInterface members"""
file.write(LICENSE_AND_HEADER)
uniquely_named_functions = GetUniquelyNamedFunctions(functions)
for key in sorted(uniquely_named_functions.iterkeys()):
func = uniquely_named_functions[key]
file.write('static %s GL_BINDING_CALL Mock_%s(%s);\n' %
(func['return_type'], func['name'], func['arguments']))
def GenerateMockBindingsSource(file, functions):
"""Generates functions that invoke MockGLInterface members and a
GetGLProcAddress function that returns addresses to those functions."""
file.write(LICENSE_AND_HEADER +
"""
#include <string.h>
#include "ui/gl/gl_mock.h"
namespace gfx {
// This is called mainly to prevent the compiler combining the code of mock
// functions with identical contents, so that their function pointers will be
// different.
void MakeFunctionUnique(const char *func_name) {
VLOG(2) << "Calling mock " << func_name;
}
""")
# Write functions that trampoline into the set MockGLInterface instance.
uniquely_named_functions = GetUniquelyNamedFunctions(functions)
sorted_function_names = sorted(uniquely_named_functions.iterkeys())
for key in sorted_function_names:
func = uniquely_named_functions[key]
file.write('\n')
file.write('%s GL_BINDING_CALL MockGLInterface::Mock_%s(%s) {\n' %
(func['return_type'], func['name'], func['arguments']))
file.write(' MakeFunctionUnique("%s");\n' % func['name'])
arg_re = r'(const )?[a-zA-Z0-9]+((\s*const\s*)?\*)* ([a-zA-Z0-9]+)'
argument_names = re.sub(arg_re, r'\4', func['arguments'])
if argument_names == 'void':
argument_names = ''
function_name = func['known_as'][2:]
if func['return_type'] == 'void':
file.write(' interface_->%s(%s);\n' %
(function_name, argument_names))
else:
file.write(' return interface_->%s(%s);\n' %
(function_name, argument_names))
file.write('}\n')
# Write an 'invalid' function to catch code calling through uninitialized
# function pointers or trying to interpret the return value of
# GLProcAddress().
file.write('\n')
file.write('static void MockInvalidFunction() {\n')
file.write(' NOTREACHED();\n')
file.write('}\n')
# Write a function to lookup a mock GL function based on its name.
file.write('\n')
file.write('void* GL_BINDING_CALL ' +
'MockGLInterface::GetGLProcAddress(const char* name) {\n')
for key in sorted_function_names:
name = uniquely_named_functions[key]['name']
file.write(' if (strcmp(name, "%s") == 0)\n' % name)
file.write(' return reinterpret_cast<void*>(Mock_%s);\n' % name)
# Always return a non-NULL pointer like some EGL implementations do.
file.write(' return reinterpret_cast<void*>(&MockInvalidFunction);\n')
file.write('}\n')
file.write('\n')
file.write('} // namespace gfx\n')
def GenerateEnumUtils(out_file, input_filenames):
enum_re = re.compile(r'\#define\s+(GL_[a-zA-Z0-9_]+)\s+([0-9A-Fa-fx]+)')
dict = {}
for fname in input_filenames:
lines = open(fname).readlines()
for line in lines:
m = enum_re.match(line)
if m:
name = m.group(1)
value = m.group(2)
if len(value) <= 10:
if not value in dict:
dict[value] = name
# check our own _CHROMIUM macro conflicts with khronos GL headers.
elif dict[value] != name and (name.endswith('_CHROMIUM') or
dict[value].endswith('_CHROMIUM')):
raise RunTimeError("code collision: %s and %s have the same code %s"
% (dict[value], name, value))
out_file.write(LICENSE_AND_HEADER)
out_file.write("static const GLEnums::EnumToString "
"enum_to_string_table[] = {\n")
for value in dict:
out_file.write(' { %s, "%s", },\n' % (value, dict[value]))
out_file.write("""};
const GLEnums::EnumToString* const GLEnums::enum_to_string_table_ =
enum_to_string_table;
const size_t GLEnums::enum_to_string_table_len_ =
sizeof(enum_to_string_table) / sizeof(enum_to_string_table[0]);
""")
def ParseFunctionsFromHeader(header_file, extensions, versions):
"""Parse a C extension header file and return a map from extension names to
a list of functions.
Args:
header_file: Line-iterable C header file.
Returns:
Map of extension name => functions, Map of gl version => functions.
Functions will only be in either one of the two maps.
"""
version_start = re.compile(
r'#ifndef GL_(ES_|)VERSION((?:_[0-9])+)$')
extension_start = re.compile(
r'#ifndef ((?:GL|EGL|WGL|GLX)_[A-Z]+_[a-zA-Z]\w+)')
extension_function = re.compile(r'.+\s+([a-z]+\w+)\s*\(')
typedef = re.compile(r'typedef .*')
macro_start = re.compile(r'^#(if|ifdef|ifndef).*')
macro_end = re.compile(r'^#endif.*')
macro_depth = 0
current_version = None
current_version_depth = 0
current_extension = None
current_extension_depth = 0
# Pick up all core functions here, since some of them are missing in the
# Khronos headers.
hdr = os.path.basename(header_file.name)
if hdr == "gl.h":
current_version = GLVersion(False, 1, 0)
line_num = 1
for line in header_file:
version_match = version_start.match(line)
if macro_start.match(line):
macro_depth += 1
if version_match:
if current_version:
raise RuntimeError('Nested GL version macro in %s at line %d' % (
header_file.name, line_num))
current_version_depth = macro_depth
es = version_match.group(1)
major_version, minor_version =\
version_match.group(2).lstrip('_').split('_')
is_es = len(es) > 0
if (not is_es) and (major_version == '1'):
minor_version = 0
current_version = GLVersion(
is_es, int(major_version), int(minor_version))
elif macro_end.match(line):
macro_depth -= 1
if macro_depth < current_extension_depth:
current_extension = None
if macro_depth < current_version_depth:
current_version = None
match = extension_start.match(line)
if match and not version_match:
if current_version and hdr != "gl.h":
raise RuntimeError('Nested GL version macro in %s at line %d' % (
header_file.name, line_num))
current_extension = match.group(1)
current_extension_depth = macro_depth
match = extension_function.match(line)
if match and not typedef.match(line):
if current_extension:
extensions[current_extension].add(match.group(1))
elif current_version:
versions[current_version].add(match.group(1))
line_num = line_num + 1
def GetDynamicFunctions(extension_headers):
"""Parse all optional functions from a list of header files.
Args:
extension_headers: List of header file names.
Returns:
Map of extension name => list of functions,
Map of gl version => list of functions.
"""
extensions = collections.defaultdict(lambda: set([]))
gl_versions = collections.defaultdict(lambda: set([]))
for header in extension_headers:
ParseFunctionsFromHeader(open(header), extensions, gl_versions)
return extensions, gl_versions
def GetFunctionToExtensionsMap(extensions):
"""Construct map from a function names to extensions which define the
function.
Args:
extensions: Map of extension name => functions.
Returns:
Map of function name => extension names.
"""
function_to_extensions = {}
for extension, functions in extensions.items():
for function in functions:
if not function in function_to_extensions:
function_to_extensions[function] = set([])
function_to_extensions[function].add(extension)
return function_to_extensions
def GetFunctionToGLVersionsMap(gl_versions):
"""Construct map from a function names to GL versions which define the
function.
Args:
extensions: Map of gl versions => functions.
Returns:
Map of function name => gl versions.
"""
function_to_gl_versions = {}
for gl_version, functions in gl_versions.items():
for function in functions:
if not function in function_to_gl_versions:
function_to_gl_versions[function] = set([])
function_to_gl_versions[function].add(gl_version)
return function_to_gl_versions
def LooksLikeExtensionFunction(function):
"""Heuristic to see if a function name is consistent with extension function
naming."""
vendor = re.match(r'\w+?([A-Z][A-Z]+)$', function)
return vendor is not None and not vendor.group(1) in ['GL', 'API', 'DC']
def SortVersions(key):
# Prefer functions from the core for binding
if 'gl_versions' in key:
return 0
else:
return 1
def FillExtensionsFromHeaders(functions, extension_headers, extra_extensions):
"""Determine which functions belong to extensions based on extension headers,
and fill in this information to the functions table for functions that don't
already have the information.
Args:
functions: List of (return type, function versions, arguments).
extension_headers: List of header file names.
extra_extensions: Extensions to add to the list.
Returns:
Set of used extensions.
"""
# Parse known extensions.
extensions, gl_versions = GetDynamicFunctions(extension_headers)
functions_to_extensions = GetFunctionToExtensionsMap(extensions)
functions_to_gl_versions = GetFunctionToGLVersionsMap(gl_versions)
# Fill in the extension information.
used_extensions = set()
used_client_extensions = set()
used_functions_by_version = collections.defaultdict(lambda: set([]))
for func in functions:
for version in func['versions']:
name = version['name']
# There should only be one version entry per name string.
if len([v for v in func['versions'] if v['name'] == name]) > 1:
raise RuntimeError(
'Duplicate version entries with same name for %s' % name)
# Make sure we know about all extensions and extension functions.
extensions_from_headers = set([])
if name in functions_to_extensions:
extensions_from_headers = set(functions_to_extensions[name])
explicit_extensions = set([])
if 'client_extensions' in version:
assert not 'extensions' in version
version['extensions'] = version['client_extensions']
if 'extensions' in version:
explicit_extensions = set(version['extensions'])
in_both = explicit_extensions.intersection(extensions_from_headers)
if len(in_both):
print "[%s] Specified redundant extensions for binding: %s" % (
name, ', '.join(in_both))
diff = explicit_extensions - extensions_from_headers
if len(diff):
print "[%s] Specified extra extensions for binding: %s" % (
name, ', '.join(diff))
all_extensions = extensions_from_headers.union(explicit_extensions)
if len(all_extensions):
version['extensions'] = all_extensions
if 'extensions' in version:
assert len(version['extensions'])
if 'client_extensions' in version:
used_client_extensions.update(version['extensions'])
else:
used_extensions.update(version['extensions'])
if not 'extensions' in version and LooksLikeExtensionFunction(name):
raise RuntimeError('%s looks like an extension function but does not '
'belong to any of the known extensions.' % name)
if name in functions_to_gl_versions:
assert not 'gl_versions' in version
version['gl_versions'] = functions_to_gl_versions[name]
for v in version['gl_versions']:
used_functions_by_version[v].add(name)
func['versions'] = sorted(func['versions'], key=SortVersions)
# Add extensions that do not have any functions.
used_extensions.update(extra_extensions)
# Print out used function count by GL(ES) version.
for v in sorted([v for v in used_functions_by_version if v.is_es]):
print "OpenGL ES %d.%d: %d used functions" % (
v.major_version, v.minor_version, len(used_functions_by_version[v]))
for v in sorted([v for v in used_functions_by_version if not v.is_es]):
print "OpenGL %d.%d: %d used functions" % (
v.major_version, v.minor_version, len(used_functions_by_version[v]))
return used_extensions, used_client_extensions
def ResolveHeader(header, header_paths):
for path in header_paths:
result = os.path.join(path, header)
if not os.path.isabs(path):
result = os.path.abspath(os.path.join(SELF_LOCATION, result))
if os.path.exists(result):
# Always use forward slashes as path separators. Otherwise backslashes
# may be incorrectly interpreted as escape characters.
return result.replace(os.path.sep, '/')
raise Exception('Header %s not found.' % header)
def main(argv):
"""This is the main function."""
parser = optparse.OptionParser()
parser.add_option('--inputs', action='store_true')
parser.add_option('--verify-order', action='store_true')
parser.add_option('--generate-dchecks', action='store_true',
help='Generates DCHECKs into the logging functions '
'asserting no GL errors (useful for debugging)')
parser.add_option('--validate-bindings', action='store_true',
help='Generate DCHECKs to validate function bindings '
' were correctly supplied (useful for debugging)')
options, args = parser.parse_args(argv)
if options.inputs:
for [_, _, headers, _] in FUNCTION_SETS:
for header in headers:
print ResolveHeader(header, HEADER_PATHS)
return 0
directory = SELF_LOCATION
if len(args) >= 1:
directory = args[0]
def ClangFormat(filename):
formatter = "clang-format"
if platform.system() == "Windows":
formatter += ".bat"
call([formatter, "-i", "-style=chromium", filename])
for [functions, set_name, extension_headers, extensions] in FUNCTION_SETS:
# Function names can be specified in two ways (list of unique names or list
# of versions with different binding conditions). Fill in the data to the
# versions list in case it is missing, so that can be used from here on:
for func in functions:
assert 'versions' in func or 'names' in func, 'Function with no names'
if 'versions' not in func:
func['versions'] = [{'name': n} for n in func['names']]
# Use the first version's name unless otherwise specified
if 'known_as' not in func:
func['known_as'] = func['versions'][0]['name']
# Make sure that 'names' is not accidentally used instead of 'versions'
if 'names' in func:
del func['names']
# Check function names in each set is sorted in alphabetical order.
for index in range(len(functions) - 1):
func_name = functions[index]['known_as']
next_func_name = functions[index + 1]['known_as']
if func_name.lower() > next_func_name.lower():
raise Exception(
'function %s is not in alphabetical order' % next_func_name)
if options.verify_order:
continue
extension_headers = [ResolveHeader(h, HEADER_PATHS)
for h in extension_headers]
used_extensions, used_client_extensions = FillExtensionsFromHeaders(
functions, extension_headers, extensions)
header_file = open(
os.path.join(directory, 'gl_bindings_autogen_%s.h' % set_name), 'wb')
GenerateHeader(header_file, functions, set_name,
used_extensions, used_client_extensions)
header_file.close()
ClangFormat(header_file.name)
header_file = open(
os.path.join(directory, 'gl_bindings_api_autogen_%s.h' % set_name),
'wb')
GenerateAPIHeader(header_file, functions, set_name)
header_file.close()
ClangFormat(header_file.name)
source_file = open(
os.path.join(directory, 'gl_bindings_autogen_%s.cc' % set_name), 'wb')
GenerateSource(source_file, functions, set_name,
used_extensions, used_client_extensions, options)
source_file.close()
ClangFormat(source_file.name)
if not options.verify_order:
header_file = open(
os.path.join(directory, 'gl_mock_autogen_gl.h'), 'wb')
GenerateMockHeader(header_file, GL_FUNCTIONS, 'gl')
header_file.close()
ClangFormat(header_file.name)
header_file = open(os.path.join(directory, 'gl_bindings_autogen_mock.h'),
'wb')
GenerateMockBindingsHeader(header_file, GL_FUNCTIONS)
header_file.close()
ClangFormat(header_file.name)
source_file = open(os.path.join(directory, 'gl_bindings_autogen_mock.cc'),
'wb')
GenerateMockBindingsSource(source_file, GL_FUNCTIONS)
source_file.close()
ClangFormat(source_file.name)
enum_header_filenames = [ResolveHeader(h, HEADER_PATHS)
for h in GLES2_HEADERS_WITH_ENUMS]
header_file = open(os.path.join(directory,
'gl_enums_implementation_autogen.h'),
'wb')
GenerateEnumUtils(header_file, enum_header_filenames)
header_file.close()
ClangFormat(header_file.name)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "3645e7e603358a718718fb573adae944",
"timestamp": "",
"source": "github",
"line_count": 2745,
"max_line_length": 89,
"avg_line_length": 38.71876138433515,
"alnum_prop": 0.6234204905770443,
"repo_name": "Workday/OpenFrame",
"id": "d5019b8b8feffdd6aeefc8d63c0b21041ed84b2a",
"size": "106472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ui/gl/generate_bindings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import functools
import unittest
from test import test_support
from weakref import proxy
@staticmethod
def PythonPartial(func, *args, **keywords):
'Pure Python approximation of partial()'
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
class TestPartial(unittest.TestCase):
thetype = functools.partial
def test_basic_examples(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.thetype(map, lambda x: x*10)
self.assertEqual(p([1,2,3,4]), [10, 20, 30, 40])
def test_attributes(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
# attributes should not be writable
if not isinstance(self.thetype, type):
return
self.assertRaises(TypeError, setattr, p, 'func', map)
self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
def test_argument_checking(self):
self.assertRaises(TypeError, self.thetype) # need at least a func arg
try:
self.thetype(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.thetype(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.thetype(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.thetype(capture, a=1)
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.thetype(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.failUnless(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.thetype(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.failUnless(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.thetype(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.failUnless(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.failUnless(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.thetype(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.thetype(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.thetype(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.thetype(f, y=0), 1)
def test_attributes(self):
p = self.thetype(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_weakref(self):
f = self.thetype(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = map(str, range(10))
join = self.thetype(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.thetype(''.join)
self.assertEqual(join(data), '0123456789')
class PartialSubclass(functools.partial):
pass
class TestPartialSubclass(TestPartial):
thetype = PartialSubclass
class TestPythonPartial(TestPartial):
thetype = PythonPartial
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.failUnless(getattr(wrapper, name) is getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
self.failUnless(wrapped_attr[key] is wrapper_attr[key])
def test_default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f)
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__doc__, 'This is a test')
self.assertEqual(wrapper.attr, 'This is also a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.failIf(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assert_(wrapper.__doc__.startswith('max('))
class TestWraps(TestUpdateWrapper):
def test_default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f)
def wrapper():
pass
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__doc__, 'This is a test')
self.assertEqual(wrapper.attr, 'This is also a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.failIf(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_main(verbose=None):
import sys
test_classes = (
TestPartial,
TestPartialSubclass,
TestPythonPartial,
TestUpdateWrapper,
TestWraps
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == '__main__':
test_main(verbose=True)
| {
"content_hash": "d85751be957dff7402444e24e8886ffc",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 81,
"avg_line_length": 33.641638225255974,
"alnum_prop": 0.5662980622907579,
"repo_name": "TathagataChakraborti/resource-conflicts",
"id": "6012f9f8551803eb4ef7c3c2716c70f51974b457",
"size": "9857",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/test/test_functools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "Batchfile",
"bytes": "9764"
},
{
"name": "C",
"bytes": "14253103"
},
{
"name": "C++",
"bytes": "754817"
},
{
"name": "CSS",
"bytes": "9779"
},
{
"name": "DIGITAL Command Language",
"bytes": "13234"
},
{
"name": "Emacs Lisp",
"bytes": "174752"
},
{
"name": "Groff",
"bytes": "43625"
},
{
"name": "HTML",
"bytes": "418642"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Makefile",
"bytes": "392287"
},
{
"name": "Matlab",
"bytes": "918"
},
{
"name": "Objective-C",
"bytes": "28604"
},
{
"name": "Perl",
"bytes": "163937"
},
{
"name": "Prolog",
"bytes": "66"
},
{
"name": "Python",
"bytes": "38769203"
},
{
"name": "R",
"bytes": "2349"
},
{
"name": "SAS",
"bytes": "57249"
},
{
"name": "Shell",
"bytes": "173594"
},
{
"name": "TeX",
"bytes": "5169842"
},
{
"name": "VimL",
"bytes": "9563"
},
{
"name": "Visual Basic",
"bytes": "1443"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
import json
Base = declarative_base()
class Puppy(Base):
__tablename__ = 'puppy'
name =Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
description = Column(String(250))
#Add add a decorator property to serialize data from the database
@property
def serialize(self):
return {'name':self.name,
'id':self.id,
'description':self.description}
engine = create_engine('sqlite:///puppies.db')
Base.metadata.create_all(engine)
| {
"content_hash": "cd272fde42ba5ea8f97a2ddb343b9b47",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 25.68,
"alnum_prop": 0.705607476635514,
"repo_name": "AtmaMani/pyChakras",
"id": "6a2005185efef83321c16bd4d48a21c69ed4f08d",
"size": "642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udacity_restful_apis/lesson_3/05_serializing_data_from_the_database/starter_code/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "6325099"
},
{
"name": "Jupyter Notebook",
"bytes": "48138435"
},
{
"name": "MATLAB",
"bytes": "155575"
},
{
"name": "Python",
"bytes": "179172"
},
{
"name": "R",
"bytes": "27849"
}
],
"symlink_target": ""
} |
"""
MoinMoin - MoinMoin.userform.admin Tests
@copyright: 2009 MoinMoin:DmitrijsMilajevs
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin.userform.admin import do_user_browser
from MoinMoin.datastruct import ConfigGroups
from MoinMoin.user import User
from MoinMoin.Page import Page
from MoinMoin._tests import nuke_user, become_superuser, wikiconfig
class TestAdmin:
class Config(wikiconfig.Config):
def groups(self, request):
groups = {'OneGroup': ['TestUser, OtherUser'],
'OtherGroup': ['TestUser']}
return ConfigGroups(request, groups)
def setup_class(self):
request = self.request
user_name = 'TestUser'
self.user_name = user_name
become_superuser(request)
User(request, name=user_name, password=user_name).save()
def teardown_class(self):
nuke_user(self.request, self.user_name)
def setup_method(self, method):
self.request.page = Page(self.request, 'SystemAdmin')
def test_do_user_browser(self):
request = self.request
browser = do_user_browser(request)
assert browser
coverage_modules = ['MoinMoin.userform.admin']
| {
"content_hash": "05c252b0bc7d83bd9202d305bce0f50c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 67,
"avg_line_length": 26.893617021276597,
"alnum_prop": 0.6384493670886076,
"repo_name": "Glottotopia/aagd",
"id": "0223168de18bd9b69c30c1e6db1285e1a919835a",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/userform/_tests/test_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
import requests
import json
import io
import sys
from datetime import datetime,timezone
import os
import google_calendar
DATA_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)),'data')
def download_fixtures_file(teamname):
url_file = open(os.path.join(DATA_FOLDER,'metadata.json'),'r')
url_data = json.load(url_file)
try:
url = url_data['url'].replace('id',str(url_data['teams'][teamname]['id']))
except KeyError:
print("Ïnvalid team name")
sys.exit(0)
try:
down_request = requests.get(url)
return io.StringIO(down_request.text)
except requests.ConnectionError:
print("No internet. Retrieving offline data...\n")
return io.StringIO()
def get_fixtures_data(teamname):
team_file = download_fixtures_file(teamname)
fixture_data=[]
for line in team_file:
fixture_data.append(line.strip())
return fixture_data
def get_list(fixture_data,value_to_get):
value_list=[]
for line in fixture_data:
if(line[:len(value_to_get)]==value_to_get):
value_list.append(line[len(value_to_get)+1:])
return value_list
def get_datetime_list(fixture_data,value_to_get):
dt_string_list = get_list(fixture_data,value_to_get)
date_format = '%Y%m%dT%H%M%SZ'
datetime_list=[]
for dt_string in dt_string_list:
datetime_list.append(datetime.strptime(dt_string,date_format).isoformat('T'))
return datetime_list
def save_fixtures(teamname,fixtures_json):
event_id_dict = {}
try:
saved_fixtures_json = json.load(open(os.path.join(DATA_FOLDER,teamname+'.json'),'r'))
for saved_fixture in saved_fixtures_json:
if 'event_id' in saved_fixture:
event_id_dict[saved_fixture['Summary']] = saved_fixture['event_id']
except FileNotFoundError:
event_id_dict = {}
for i in range(len(fixtures_json)):
if fixtures_json[i]['Summary'] in event_id_dict:
fixtures_json[i]['event_id'] = event_id_dict[fixtures_json[i]['Summary']]
with open(os.path.join(DATA_FOLDER,teamname+'.json'),'w') as fixtures_file:
json.dump(fixtures_json,fixtures_file)
fixtures_file.close()
return fixtures_json
def get_offline_fixtures(teamname):
try:
fixtures_file = open(os.path.join(DATA_FOLDER,teamname+'.json'),'r')
return json.load(fixtures_file)
except FileNotFoundError:
print("Sorry! No offline data currently available for "+teamname)
sys.exit(0)
def get_fixtures(teamname):
fixture_data = get_fixtures_data(teamname)
if fixture_data:
fixtures = []
summaries = get_list(fixture_data,'SUMMARY')
start_times = get_datetime_list(fixture_data,'DTSTART')
end_times = get_datetime_list(fixture_data,'DTEND')
venues = get_list(fixture_data,'LOCATION')
for i in range(len(summaries)):
fixture = {}
fixture['Summary'] = summaries[i]
fixture['Start Time'] = start_times[i]
fixture['End Time'] = end_times[i]
fixture['Venue'] = venues[i]
fixtures.append(fixture)
fixtures = save_fixtures(teamname,fixtures)
return fixtures
else:
return get_offline_fixtures(teamname)
def print_fixtures(fixtures_json):
try:
if(fixtures_json == []):
print("No data to display")
for fixture in fixtures_json:
print(fixture['Summary'])
date_format = '%Y-%m-%dT%H:%M:%S'
print("Start Time: "+ datetime.strptime(fixture['Start Time'],date_format).replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%d %B %Y, %I:%M %p"))
print("End Time: "+ datetime.strptime(fixture['End Time'],date_format).replace(tzinfo=timezone.utc).astimezone(tz=None).strftime("%d %B %Y, %I:%M %p"))
print("Venue: "+fixture['Venue']+"\n")
except TypeError:
print("No data to display")
def google_calendar_init(fixtures_json,teamname):
metadata_file = open(os.path.join(DATA_FOLDER, 'metadata.json'), 'r')
metadata = json.load(metadata_file)
fixture_data = {}
fixture_data['colorId'] = metadata['teams'][teamname]['calendar_color']
fixture_data['fixtures'] = fixtures_json
try:
fixture_data['calendarId'] = metadata['calendarId']
google_calendar.get_logged_in_user()
if input('Login as another user?(y/n)').lower() == 'y':
metadata['calendarId'] = google_calendar.get_calendar(True)
fixture_data['calendarId'] = metadata['calendarId']
except KeyError:
metadata['calendarId'] = google_calendar.get_calendar(None)
fixture_data['calendarId'] = metadata['calendarId']
with open(os.path.join(DATA_FOLDER, 'metadata.json'), 'w') as metadata_file:
json.dump(metadata, metadata_file)
return fixture_data
def main():
teamname = input('Enter team name: ').lower()
while(teamname.lower() != 'q'):
fixtures_json = get_fixtures(teamname)
print_fixtures(fixtures_json)
calendar_choice = input('GOOGLE CALENDAR:\n' +
'1. Add to/Update Google Calendar\n' +
'2. Delete from Google Calendar\n' +
'Enter Choice(1,2) or press Enter to continue')
if calendar_choice == '1' or calendar_choice == '2':
events_json = google_calendar_init(fixtures_json,teamname)
if calendar_choice == '1':
google_calendar.create_update_events(events_json)
elif calendar_choice == '2':
google_calendar.delete_events(events_json)
teamname = input('Enter team name(Q to Quit): ').lower()
main()
| {
"content_hash": "ae4d8627265a2fb8ff93892607669edb",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 167,
"avg_line_length": 37.91447368421053,
"alnum_prop": 0.6205101509630401,
"repo_name": "shivendratandon/cricket-fixtures",
"id": "e5861be73c6641a82213bbf5ca90674334112bb2",
"size": "5764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cricket_fixtures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12448"
}
],
"symlink_target": ""
} |
import sys,re,random
class BondMovie:
actor = ''
year = ''
name = ''
watched = 0
def __init__(self,t0 ='',t1 = '',t2 = '', t3 = 0):
if t0 != '' and t1 != '' and t2 != '':
self.name = t0
self.year = t1
self.actor = t2
self.watched = t3
try:
movieNumber = int(sys.argv[1])
if movieNumber != 1 and movieNumber != 2:
raise IndexError
except IndexError:
print 'Usage: python ' + sys.argv[0] + ' <1 or 2>'
sys.exit(0)
myRegex = '^(\*\s)?([\w\s\W]+)\s(\d{4})\s([\w\s\W]+)$'
list1 = []
watchedNum = 0
fh = open('jamesFinal.txt','r+')
for line in fh:
match = re.match(myRegex,line)
watched = 0
try:
if match.group(1) is not None:
watched = 1
watchedNum = watchedNum + 1
temp = BondMovie(match.group(2),match.group(3),match.group(4).rstrip(),watched)
list1.append(temp)
except AttributeError:
print "Regex Didnt Match anything"
fh.close()
sys.exit(0)
firstMovie = random.randint(0,len(list1)-1)
secondMovie = random.randint(0,len(list1)-1)
oneMovie = random.randint(0,len(list1)-1)
bailCount = 0
flag = 0
if watchedNum != len(list1):
if movieNumber == 2:
while list1[firstMovie].watched == 1 or list1[secondMovie].watched == 1 or firstMovie == secondMovie:
if list1[firstMovie].watched == 1:
firstMovie = random.randint(0,len(list1)-1)
elif list1[secondMovie].watched == 1:
secondMovie = random.randint(0,len(list1)-1)
else:
secondMovie = random.randint(0,len(list1)-1)
bailCount = bailCount + 1
if bailCount >= 1000000:
flag = 1
break
if flag != 1:
print 'The first Movie will be ' + list1[firstMovie].name + ' starring ' + list1[firstMovie].actor + ' and filmed in ' + list1[firstMovie].year
print 'The second Movie will be ' + list1[secondMovie].name + ' starring ' + list1[secondMovie].actor + ' and filmed in ' + list1[secondMovie].year
else:
print "since there is an odd number of movies we only have 1 left..."
if list1[secondMovie].watched == 1:
print 'The Movie will be ' + list1[firstMovie].name + ' starring ' + list1[firstMovie].actor + ' and filmed in ' + list1[firstMovie].year
else:
print 'The Movie will be ' + list1[secondMovie].name + ' starring ' + list1[secondMovie].actor + ' and filmed in ' + list1[secondMovie].year
fh.seek(0)
for x in range(len(list1)):
if list1[x].watched == 1 or x == firstMovie or x == secondMovie:
fh.write('* ' + list1[x].name + ' ' + list1[x].year + ' ' + list1[x].actor + '\n')
else:
fh.write(list1[x].name + ' ' + list1[x].year + ' ' + list1[x].actor + '\n')
else:
while list1[oneMovie].watched == 1:
oneMovie = random.randint(0,len(list1)-1)
print 'The Movie will be ' + list1[oneMovie].name + ' starring ' + list1[oneMovie].actor + ' and filmed in ' + list1[oneMovie].year
fh.seek(0)
for x in range(len(list1)):
if list1[x].watched == 1 or x == oneMovie:
fh.write('* ' + list1[x].name + ' ' + list1[x].year + ' ' + list1[x].actor + '\n')
else:
fh.write(list1[x].name + ' ' + list1[x].year + ' ' + list1[x].actor + '\n')
fh.truncate()
else:
print "WE HAVE WATCHED THEM ALL!!!!"
fh.close()
| {
"content_hash": "2c665744560bc74d8d21654dfc7c33b1",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 150,
"avg_line_length": 35.735632183908045,
"alnum_prop": 0.6249597941460276,
"repo_name": "FireElementalNE/jamesbond",
"id": "b5bc5bc0be6b8e71c855a77a0bc345383f9a5fd0",
"size": "3109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jamesFinal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3109"
}
],
"symlink_target": ""
} |
"""Easy to use object-oriented thread pool framework.
A thread pool is an object that maintains a pool of worker threads to perform
time consuming operations in parallel. It assigns jobs to the threads
by putting them in a work request queue, where they are picked up by the
next available thread. This then performs the requested operation in the
background and puts the results in another queue.
The thread pool object can then collect the results from all threads from
this queue as soon as they become available or after all threads have
finished their work. It's also possible, to define callbacks to handle
each result as it comes in.
The basic concept and some code was taken from the book "Python in a Nutshell,
2nd edition" by Alex Martelli, O'Reilly 2006, ISBN 0-596-10046-9, from section
14.5 "Threaded Program Architecture". I wrapped the main program logic in the
ThreadPool class, added the WorkRequest class and the callback system and
tweaked the code here and there. Kudos also to Florent Aide for the exception
handling mechanism.
Basic usage::
>>> pool = ThreadPool(poolsize)
>>> requests = makeRequests(some_callable, list_of_args, callback)
>>> [pool.putRequest(req) for req in requests]
>>> pool.wait()
See the end of the module code for a brief, annotated usage example.
Website : http://chrisarndt.de/projects/threadpool/
"""
__docformat__ = "restructuredtext en"
__all__ = [
'makeRequests',
'NoResultsPending',
'NoWorkersAvailable',
'ThreadPool',
'WorkRequest',
'WorkerThread'
]
__author__ = "Christopher Arndt"
__version__ = '1.2.7'
__revision__ = "$Revision: 416 $"
__date__ = "$Date: 2009-10-07 05:41:27 +0200 (Wed, 07 Oct 2009) $"
__license__ = "MIT license"
# standard library modules
import sys
import threading
import Queue
import traceback
# exceptions
class NoResultsPending(Exception):
"""All work requests have been processed."""
pass
class NoWorkersAvailable(Exception):
"""No worker threads available to process remaining requests."""
pass
# internal module helper functions
def _handle_thread_exception(request, exc_info):
"""Default exception handler callback function.
This just prints the exception info via ``traceback.print_exception``.
"""
traceback.print_exception(*exc_info)
# utility functions
def makeRequests(callable_, args_list, callback=None,
exc_callback=_handle_thread_exception):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable_, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable_, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests
# classes
class WorkerThread(threading.Thread):
"""Background thread connected to the requests/results queues.
A worker thread sits in the background and picks up work requests from
one queue and puts the results in another until it is dismissed.
"""
def __init__(self, requests_queue, results_queue, poll_timeout=5, **kwds):
"""Set up thread in daemonic mode and start it immediatedly.
``requests_queue`` and ``results_queue`` are instances of
``Queue.Queue`` passed by the ``ThreadPool`` class when it creates a new
worker thread.
"""
threading.Thread.__init__(self, **kwds)
self.setDaemon(1)
self._requests_queue = requests_queue
self._results_queue = results_queue
self._poll_timeout = poll_timeout
self._dismissed = threading.Event()
self.start()
def run(self):
"""Repeatedly process the job queue until told to exit."""
while True:
if self._dismissed.isSet():
# we are dismissed, break out of loop
break
# get next work request. If we don't get a new request from the
# queue after self._poll_timout seconds, we jump to the start of
# the while loop again, to give the thread a chance to exit.
try:
request = self._requests_queue.get(True, self._poll_timeout)
except Queue.Empty:
continue
else:
if self._dismissed.isSet():
# we are dismissed, put back request in queue and exit loop
self._requests_queue.put(request)
break
try:
result = request.callable(*request.args, **request.kwds)
self._results_queue.put((request, result))
except:
request.exception = True
self._results_queue.put((request, sys.exc_info()))
def dismiss(self):
"""Sets a flag to tell the thread to exit when done with current job."""
self._dismissed.set()
class WorkRequest:
"""A request to execute a callable for putting in the request queue later.
See the module function ``makeRequests`` for the common case
where you want to build several ``WorkRequest`` objects for the same
callable but with different arguments for each call.
"""
def __init__(self, callable_, args=None, kwds=None, requestID=None,
callback=None, exc_callback=_handle_thread_exception):
"""Create a work request for a callable and attach callbacks.
A work request consists of the a callable to be executed by a
worker thread, a list of positional arguments, a dictionary
of keyword arguments.
A ``callback`` function can be specified, that is called when the
results of the request are picked up from the result queue. It must
accept two anonymous arguments, the ``WorkRequest`` object and the
results of the callable, in that order. If you want to pass additional
information to the callback, just stick it on the request object.
You can also give custom callback for when an exception occurs with
the ``exc_callback`` keyword parameter. It should also accept two
anonymous arguments, the ``WorkRequest`` and a tuple with the exception
details as returned by ``sys.exc_info()``. The default implementation
of this callback just prints the exception info via
``traceback.print_exception``. If you want no exception handler
callback, just pass in ``None``.
``requestID``, if given, must be hashable since it is used by
``ThreadPool`` object to store the results of that work request in a
dictionary. It defaults to the return value of ``id(self)``.
"""
if requestID is None:
self.requestID = id(self)
else:
try:
self.requestID = hash(requestID)
except TypeError:
raise TypeError("requestID must be hashable.")
self.exception = False
self.callback = callback
self.exc_callback = exc_callback
self.callable = callable_
self.args = args or []
self.kwds = kwds or {}
def __str__(self):
return "<WorkRequest id=%s args=%r kwargs=%r exception=%s>" % \
(self.requestID, self.args, self.kwds, self.exception)
class ThreadPool:
"""A thread pool, distributing work requests and collecting results.
See the module docstring for more information.
"""
def __init__(self, num_workers, q_size=0, resq_size=0, poll_timeout=5):
"""Set up the thread pool and start num_workers worker threads.
``num_workers`` is the number of worker threads to start initially.
If ``q_size > 0`` the size of the work *request queue* is limited and
the thread pool blocks when the queue is full and it tries to put
more work requests in it (see ``putRequest`` method), unless you also
use a positive ``timeout`` value for ``putRequest``.
If ``resq_size > 0`` the size of the *results queue* is limited and the
worker threads will block when the queue is full and they try to put
new results in it.
.. warning:
If you set both ``q_size`` and ``resq_size`` to ``!= 0`` there is
the possibilty of a deadlock, when the results queue is not pulled
regularly and too many jobs are put in the work requests queue.
To prevent this, always set ``timeout > 0`` when calling
``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions.
"""
self._requests_queue = Queue.Queue(q_size)
self._results_queue = Queue.Queue(resq_size)
self.workers = []
self.dismissedWorkers = []
self.workRequests = {}
self.createWorkers(num_workers, poll_timeout)
def createWorkers(self, num_workers, poll_timeout=5):
"""Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
ofte threads should check whether they are dismissed, while waiting for
requests.
"""
for i in range(num_workers):
self.workers.append(WorkerThread(self._requests_queue,
self._results_queue, poll_timeout=poll_timeout))
def dismissWorkers(self, num_workers, do_join=False):
"""Tell num_workers worker threads to quit after their current task."""
dismiss_list = []
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss()
dismiss_list.append(worker)
if do_join:
for worker in dismiss_list:
worker.join()
else:
self.dismissedWorkers.extend(dismiss_list)
def joinAllDismissedWorkers(self):
"""Perform Thread.join() on all worker threads that have been dismissed.
"""
for worker in self.dismissedWorkers:
worker.join()
self.dismissedWorkers = []
def putRequest(self, request, block=True, timeout=None):
"""Put work request into work queue and save its id for later."""
assert isinstance(request, WorkRequest)
# don't reuse old work requests
assert not getattr(request, 'exception', None)
self._requests_queue.put(request, block, timeout)
self.workRequests[request.requestID] = request
def poll(self, block=False):
"""Process any new results in the queue."""
while True:
# still results pending?
if not self.workRequests:
raise NoResultsPending
# are there still workers to process remaining requests?
elif block and not self.workers:
raise NoWorkersAvailable
try:
# get back next results
request, result = self._results_queue.get(block=block)
# has an exception occured?
if request.exception and request.exc_callback:
request.exc_callback(request, result)
# hand results to callback, if any
if request.callback and not \
(request.exception and request.exc_callback):
request.callback(request, result)
del self.workRequests[request.requestID]
except Queue.Empty:
break
def wait(self):
"""Wait for results, blocking until all have arrived."""
while 1:
try:
self.poll(True)
except NoResultsPending:
break
################
# USAGE EXAMPLE
################
if __name__ == '__main__':
import random
import time
# the work the threads will have to do (rather trivial in our example)
def do_something(data):
time.sleep(random.randint(1,5))
result = round(random.random() * data, 5)
# just to show off, we throw an exception once in a while
if result > 5:
raise RuntimeError("Something extraordinary happened!")
return result
# this will be called each time a result is available
def print_result(request, result):
print "**** Result from request #%s: %r" % (request.requestID, result)
# this will be called when an exception occurs within a thread
# this example exception handler does little more than the default handler
def handle_exception(request, exc_info):
return
if not isinstance(exc_info, tuple):
# Something is seriously wrong...
print request
print exc_info
raise SystemExit
print "**** Exception occured in request #%s: %s" % (request.requestID, exc_info)
# assemble the arguments for each job to a list...
data = [1,2,3,4,5,6,7,8,9,0]
# ... and build a WorkRequest object for each item in data
requests = makeRequests(do_something, data, print_result, handle_exception)
# to use the default exception handler, uncomment next line and comment out
# the preceding one.
#requests = makeRequests(do_something, data, print_result)
# or the other form of args_lists accepted by makeRequests: ((,), {})
data = [((random.randint(1,10),), {}) for i in range(20)]
requests.extend(
makeRequests(do_something, data, print_result, handle_exception)
#makeRequests(do_something, data, print_result)
# to use the default exception handler, uncomment next line and comment
# out the preceding one.
)
# we create a pool of 3 worker threads
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
# then we put the work requests in the queue...
for req in requests:
main.putRequest(req)
print "Work request #%s added." % req.requestID
# or shorter:
# [main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# by using ThreadPool.wait(). This would block until results for
# all work requests have arrived:
# main.wait()
# instead we can poll for results while doing something else:
i = 0
while True:
try:
time.sleep(0.5)
main.poll()
print "Main thread working...",
print "(active worker threads: %i)" % (threading.activeCount()-1, )
if i == 10:
print "**** Adding 3 more worker threads..."
main.createWorkers(3)
if i == 20:
print "**** Dismissing 2 worker threads..."
main.dismissWorkers(2)
main.joinAllDismissedWorkers()
i += 1
except KeyboardInterrupt:
print "**** Interrupted!"
break
except NoResultsPending:
print "**** No pending results."
break
if main.dismissedWorkers:
print "Joining all dismissed worker threads..."
main.joinAllDismissedWorkers()
print "11111"
| {
"content_hash": "d573f8d9960ca29a2bdc14b8e16098bb",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 89,
"avg_line_length": 37.55847255369928,
"alnum_prop": 0.6269301645802885,
"repo_name": "ptphp/PyLib",
"id": "f49cadcb6daf0a29583d247fc9944be21ba60fd6",
"size": "15761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/webpy1/src/fetch/threadpool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1523"
},
{
"name": "C++",
"bytes": "7541"
},
{
"name": "CSS",
"bytes": "625731"
},
{
"name": "JavaScript",
"bytes": "4811257"
},
{
"name": "PHP",
"bytes": "34868"
},
{
"name": "Python",
"bytes": "3824172"
},
{
"name": "Ruby",
"bytes": "322"
},
{
"name": "SQL",
"bytes": "685656"
},
{
"name": "Shell",
"bytes": "4143"
}
],
"symlink_target": ""
} |
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_1(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_1(self):
success = True
wd = self.wd
wd.get("https://piexpertonline.power.com/site/login")
wd.find_element_by_id("username-field").click()
wd.find_element_by_id("username-field").clear()
wd.find_element_by_id("username-field").send_keys("[email protected]")
wd.find_element_by_id("password-field").click()
wd.find_element_by_id("password-field").send_keys("\\undefined")
wd.find_element_by_id("login-submit-btn").click()
wd.find_element_by_link_text("Component Library").click()
wd.find_element_by_xpath("//ul[1]/li[2]/span").click()
wd.find_element_by_id("dlgCompSetOk").click()
wd.find_element_by_id("closedlgCancel420").click()
wd.find_element_by_id("logoutLink").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1d1777c3a2cbf797e7a831db069d15db",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 35.26315789473684,
"alnum_prop": 0.6313432835820896,
"repo_name": "ivanSchistov/Python_tranings_new",
"id": "c0bf76d6ed22d896f6634fbcea34eaaa8308cadb",
"size": "1364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22813"
}
],
"symlink_target": ""
} |
def pr(data, start=0, end=None):
"""
Pretty print a sequence of data items
@param data: the data stream to print
@type data: C{sequence} or C{iterator}
@param start: the start position
@type start: C{int}
@param end: the end position
@type end: C{int}
"""
from pprint import pprint
from itertools import islice
pprint(list(islice(data, start, end)))
def print_string(s, width=70):
"""
Pretty print a string, breaking lines on whitespace
@param s: the string to print, consisting of words and spaces
@type s: C{string}
@param width: the display width
@type width: C{int}
"""
import re
while s:
s = s.strip()
try:
i = s[:width].rindex(' ')
except ValueError:
print s
return
print s[:i]
s = s[i:]
class SortedDict(dict):
"""
A very rudamentary sorted dictionary, whose main purpose is to
allow dictionaries to be displayed in a consistent order in
regression tests. keys(), items(), values(), iter*(), and
__repr__ all sort their return values before returning them.
(note that the sort order for values() does *not* correspond to
the sort order for keys(). I.e., zip(d.keys(), d.values()) is not
necessarily equal to d.items().
"""
def keys(self): return sorted(dict.keys(self))
def items(self): return sorted(dict.items(self))
def values(self): return sorted(dict.values(self))
def iterkeys(self): return iter(sorted(dict.keys(self)))
def iteritems(self): return iter(sorted(dict.items(self)))
def itervalues(self): return iter(sorted(dict.values(self)))
def __iter__(self): return iter(sorted(dict.keys(self)))
def repr(self):
items = ['%s=%s' % t for t in sorted(self.items())]
return '{%s}' % ', '.join(items)
##########################################################################
# EDIT DISTANCE (LEVENSHTEIN)
##########################################################################
def _edit_dist_init(len1, len2):
lev = []
for i in range(len1):
lev.append([0] * len2) # initialize 2-D array to zero
for i in range(len1):
lev[i][0] = i # column 0: 0,1,2,3,4,...
for j in range(len2):
lev[0][j] = j # row 0: 0,1,2,3,4,...
return lev
def _edit_dist_step(lev, i, j, c1, c2):
a = lev[i-1][j ] + 1 # skipping s1[i]
b = lev[i-1][j-1] + (c1 != c2) # matching s1[i] with s2[j]
c = lev[i ][j-1] + 1 # skipping s2[j]
lev[i][j] = min(a,b,c) # pick the cheapest
def edit_dist(s1, s2):
"""
Calculate the Levenshtein edit-distance between two strings.
The edit distance is the number of characters that need to be
substituted, inserted, or deleted, to transform s1 into s2. For
example, transforming "rain" to "shine" requires three steps,
consisting of two substitutions and one insertion:
"rain" -> "sain" -> "shin" -> "shine". These operations could have
been done in other orders, but at least three steps are needed.
@param s1, s2: The strings to be analysed
@type s1, s2: C{string}
@rtype C{int}
"""
# set up a 2-D array
len1 = len(s1); len2 = len(s2)
lev = _edit_dist_init(len1+1, len2+1)
# iterate over the array
for i in range(len1):
for j in range (len2):
_edit_dist_step(lev, i+1, j+1, s1[i], s2[j])
return lev[len1][len2]
##########################################################################
# MINIMAL SETS
##########################################################################
class MinimalSet(object):
"""
Find contexts where more than one possible target value can
appear. E.g. if targets are word-initial letters, and contexts
are the remainders of words, then we would like to find cases like
"fat" vs "cat", and "training" vs "draining". If targets are
parts-of-speech and contexts are words, then we would like to find
cases like wind (noun) 'air in rapid motion', vs wind (verb)
'coil, wrap'.
"""
def __init__(self, parameters=None):
"""
Create a new minimal set.
@param parameters: The (context, target, display) tuples for the item
@type parameters: C{list} of C{tuple} of C{string}
"""
self._targets = set() # the contrastive information
self._contexts = set() # what we are controlling for
self._seen = {} # to record what we have seen
self._displays = {} # what we will display
for context, target, display in parameters:
self.add(context, target, display)
def add(self, context, target, display):
"""
Add a new item to the minimal set, having the specified
context, target, and display form.
@param context: The context in which the item of interest appears
@type context: C{string}
@param target: The item of interest
@type target: C{string}
@param display: The information to be reported for each item
@type display: C{string}
"""
# Store the set of targets that occurred in this context
if context not in self._seen:
self._seen[context] = set()
self._seen[context].add(target)
# Keep track of which contexts and targets we have seen
self._contexts.add(context)
self._targets.add(target)
# For a given context and target, store the display form
self._displays[(context, target)] = display
def contexts(self, minimum=2):
"""
Determine which contexts occurred with enough distinct targets.
@param minimum: the minimum number of distinct target forms
@type minimum: C(int)
@rtype C(list)
"""
return [c for c in self._contexts if len(self._seen[c]) >= minimum]
def display(self, context, target, default=""):
if self._displays.has_key((context, target)):
return self._displays[(context, target)]
else:
return default
def display_all(self, context):
result = []
for target in self._targets:
x = self.display(context, target)
if x: result.append(x)
return result
def targets(self):
return self._targets
######################################################################
## Regexp display (thanks to David Mertz)
######################################################################
import re
def re_show(regexp, string):
"""
Search C{string} for substrings matching C{regexp} and wrap
the matches with braces. This is convenient for learning about
regular expressions.
@param regexp: The regular expression.
@param string: The string being matched.
@rtype: C{string}
@return: A string with braces surrounding the matched substrings.
"""
print re.compile(regexp, re.M).sub("{\g<0>}", string.rstrip())
##########################################################################
# READ FROM FILE OR STRING
##########################################################################
# recipe from David Mertz
def filestring(f):
if hasattr(f, 'read'):
return f.read()
elif isinstance(f, basestring):
return open(f).read()
else:
raise ValueError, "Must be called with a filename or file-like object"
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
| {
"content_hash": "3c25646c5091839d16f745c093c8737e",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 78,
"avg_line_length": 34.51754385964912,
"alnum_prop": 0.5527318932655655,
"repo_name": "rossgoodwin/drgonzo",
"id": "1a5c6a48d04a9cdc9c5cfe03b10e9659337c665b",
"size": "8260",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "en/parser/nltk_lite/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "167504"
},
{
"name": "CSS",
"bytes": "1694"
},
{
"name": "HTML",
"bytes": "252950"
},
{
"name": "Makefile",
"bytes": "76315"
},
{
"name": "Perl",
"bytes": "265"
},
{
"name": "PostScript",
"bytes": "433017"
},
{
"name": "Python",
"bytes": "1924333"
},
{
"name": "Shell",
"bytes": "95497"
},
{
"name": "XML",
"bytes": "27812"
},
{
"name": "XSLT",
"bytes": "5815"
}
],
"symlink_target": ""
} |
"""Extracts bounding boxes from a list of images, saving them to files.
The images must be in JPG format. The program checks if boxes already
exist, and skips computation for those.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from delf import box_io
from delf import detector
cmd_args = None
# Extension/suffix of produced files.
_BOX_EXT = '.boxes'
_VIZ_SUFFIX = '_viz.jpg'
# Used for plotting boxes.
_BOX_EDGE_COLORS = ['r', 'y', 'b', 'm', 'k', 'g', 'c', 'w']
# Pace to report extraction log.
_STATUS_CHECK_ITERATIONS = 100
def _ReadImageList(list_path):
"""Helper function to read image paths.
Args:
list_path: Path to list of images, one image path per line.
Returns:
image_paths: List of image paths.
"""
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths
def _FilterBoxesByScore(boxes, scores, class_indices, score_threshold):
"""Filter boxes based on detection scores.
Boxes with detection score >= score_threshold are returned.
Args:
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
scores: [N] float array with detection scores.
class_indices: [N] int array with class indices.
score_threshold: Float detection score threshold to use.
Returns:
selected_boxes: selected `boxes`.
selected_scores: selected `scores`.
selected_class_indices: selected `class_indices`.
"""
selected_boxes = []
selected_scores = []
selected_class_indices = []
for i, box in enumerate(boxes):
if scores[i] >= score_threshold:
selected_boxes.append(box)
selected_scores.append(scores[i])
selected_class_indices.append(class_indices[i])
return np.array(selected_boxes), np.array(selected_scores), np.array(
selected_class_indices)
def _PlotBoxesAndSaveImage(image, boxes, output_path):
"""Plot boxes on image and save to output path.
Args:
image: Numpy array containing image.
boxes: [N, 4] float array denoting bounding box coordinates, in format [top,
left, bottom, right].
output_path: String containing output path.
"""
height = image.shape[0]
width = image.shape[1]
fig, ax = plt.subplots(1)
ax.imshow(image)
for i, box in enumerate(boxes):
scaled_box = [
box[0] * height, box[1] * width, box[2] * height, box[3] * width
]
rect = patches.Rectangle([scaled_box[1], scaled_box[0]],
scaled_box[3] - scaled_box[1],
scaled_box[2] - scaled_box[0],
linewidth=3,
edgecolor=_BOX_EDGE_COLORS[i %
len(_BOX_EDGE_COLORS)],
facecolor='none')
ax.add_patch(rect)
ax.axis('off')
plt.savefig(output_path, bbox_inches='tight')
plt.close(fig)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
tf.logging.set_verbosity(tf.logging.INFO)
# Read list of images.
tf.logging.info('Reading list of images...')
image_paths = _ReadImageList(cmd_args.list_images_path)
num_images = len(image_paths)
tf.logging.info('done! Found %d images', num_images)
# Create output directories if necessary.
if not tf.gfile.Exists(cmd_args.output_dir):
tf.gfile.MakeDirs(cmd_args.output_dir)
if cmd_args.output_viz_dir and not tf.gfile.Exists(cmd_args.output_viz_dir):
tf.gfile.MakeDirs(cmd_args.output_viz_dir)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Reading list of images.
filename_queue = tf.train.string_input_producer(image_paths, shuffle=False)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
image_tf = tf.image.decode_jpeg(value, channels=3)
image_tf = tf.expand_dims(image_tf, 0)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
detector_fn = detector.MakeDetector(sess, cmd_args.detector_path)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
start = time.clock()
for i, image_path in enumerate(image_paths):
# Write to log-info once in a while.
if i == 0:
tf.logging.info('Starting to detect objects in images...')
elif i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
tf.logging.info(
'Processing image %d out of %d, last %d '
'images took %f seconds', i, num_images, _STATUS_CHECK_ITERATIONS,
elapsed)
start = time.clock()
# # Get next image.
im = sess.run(image_tf)
# If descriptor already exists, skip its computation.
base_boxes_filename, _ = os.path.splitext(os.path.basename(image_path))
out_boxes_filename = base_boxes_filename + _BOX_EXT
out_boxes_fullpath = os.path.join(cmd_args.output_dir,
out_boxes_filename)
if tf.gfile.Exists(out_boxes_fullpath):
tf.logging.info('Skipping %s', image_path)
continue
# Extract and save boxes.
(boxes_out, scores_out, class_indices_out) = detector_fn(im)
(selected_boxes, selected_scores,
selected_class_indices) = _FilterBoxesByScore(boxes_out[0],
scores_out[0],
class_indices_out[0],
cmd_args.detector_thresh)
box_io.WriteToFile(out_boxes_fullpath, selected_boxes, selected_scores,
selected_class_indices)
if cmd_args.output_viz_dir:
out_viz_filename = base_boxes_filename + _VIZ_SUFFIX
out_viz_fullpath = os.path.join(cmd_args.output_viz_dir,
out_viz_filename)
_PlotBoxesAndSaveImage(im[0], selected_boxes, out_viz_fullpath)
# Finalize enqueue threads.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--detector_path',
type=str,
default='/tmp/d2r_frcnn_20190411/',
help="""
Path to exported detector model.
""")
parser.add_argument(
'--detector_thresh',
type=float,
default=.0,
help="""
Detector threshold. Any box with confidence score lower than this is not
returned.
""")
parser.add_argument(
'--list_images_path',
type=str,
default='list_images.txt',
help="""
Path to list of images to undergo object detection.
""")
parser.add_argument(
'--output_dir',
type=str,
default='test_boxes',
help="""
Directory where bounding boxes will be written to. Each image's boxes
will be written to a file with same name, and extension replaced by
.boxes.
""")
parser.add_argument(
'--output_viz_dir',
type=str,
default='',
help="""
Optional. If set, a visualization of the detected boxes overlaid on the
image is produced, and saved to this directory. Each image is saved with
_viz.jpg suffix.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"content_hash": "f2d80e58c6af91cc5432e932fef5cd7d",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 80,
"avg_line_length": 32.83817427385892,
"alnum_prop": 0.6178923426838514,
"repo_name": "alexgorban/models",
"id": "6e315d178e49141722a3ffe78d6e7e37ce193708",
"size": "8602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/delf/delf/python/examples/extract_boxes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1619012"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "454746"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "16363107"
},
{
"name": "Shell",
"bytes": "144095"
},
{
"name": "Starlark",
"bytes": "148029"
}
],
"symlink_target": ""
} |
'''
Media.py
Calcula a média entre dois valores
Entrada: duas variáveis do tipo float
Saída: a média, tipo float
Autor: Fabrício Olivetti de França
Disciplina Processamento da Informação
Universidade Federal do ABC
'''
x = float(raw_input("Entre o primeiro valor: "))
y = float(raw_input("Entre o segundo valor: "))
media = (x+y)/2.0
print "A média é: ", media
| {
"content_hash": "3327aa3135f266cda4aa0a2a94a887f0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 22.75,
"alnum_prop": 0.7307692307692307,
"repo_name": "folivetti/PI-UFABC",
"id": "6bbf116bf34d6e72390c278e8f435aea4f1b2c29",
"size": "389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AULA_01/Python/Media.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15492"
},
{
"name": "Haskell",
"bytes": "17421"
},
{
"name": "Java",
"bytes": "73988"
},
{
"name": "Python",
"bytes": "22639"
}
],
"symlink_target": ""
} |
"""distutils
The main package for the Python Module Distribution Utilities. Normally
used from a setup script as
from distutils.core import setup
setup (...)
"""
# This module should be kept compatible with Python 1.5.2.
__revision__ = "$Id: __init__.py,v 1.23 2002/11/19 13:12:26 akuchling Exp $"
__version__ = "1.0.3"
| {
"content_hash": "5d18013c0d6d6f9e74fa4a537cf991e2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 22.2,
"alnum_prop": 0.6786786786786787,
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"id": "3a8fab75fce3cb58f5a51122a774535011ab4981",
"size": "333",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/distutils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from barbicanclient import client as barbicanclient
from keystoneauth1 import loading
from oslo_config import cfg
class BarbicanClient(object):
"""Barbican client wrapper so we can encapsulate logic in one place."""
def __init__(self, verify=True):
self._admin_client = None
@property
def admin_client(self):
if not self._admin_client:
# Create connection to API
self._admin_client = self._barbican_admin_init()
return self._admin_client
def _barbican_admin_init(self):
# Import auth_token to have keystone_authtoken settings setup.
auth = loading.load_auth_from_conf_options(
cfg.CONF, 'keystone_authtoken')
sess = loading.load_session_from_conf_options(
cfg.CONF, 'keystone_authtoken', auth=auth)
return barbicanclient.Client(session=sess)
| {
"content_hash": "821d132e1934e3e8b22bdd7621581690",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 33.53846153846154,
"alnum_prop": 0.6662844036697247,
"repo_name": "stackforge/solum",
"id": "9c4a4e82d517ddab560d7ec3843b5a2d94bc895a",
"size": "1451",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "solum/common/solum_barbicanclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1243294"
},
{
"name": "Shell",
"bytes": "80784"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
import datetime, random, re, os, csv, time
from django.template import Context, Template, loader
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from models import *
from django.forms import ModelForm
from models import *
from ldap_settings import *
from local_ldap import *
from django import forms
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
attrs_dict={}
from slave_settings import *
from tasks import *
import requests
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def check_ldap(request):
try:
x =get_user_list()
try:
if x[0]!="server_not_found":
return HttpResponse("OK")
else:
raise Http404
except:
raise Http404
except ldap.LDAPError, e:
raise Http404
@csrf_exempt
def api_add_user(request):
if request.method=="POST":
secret_key =request.POST.get("secret_key")
if secret_k ==secret_key:
pass
else:
raise Http404
ip = request.META["REMOTE_ADDR"]
if ip == master_ip:
pass
else:
raise Http404
first_name =request.POST.get("first_name")
last_name = request.POST.get("last_name")
username = request.POST.get("username")
email =request.POST.get("email")
uid = request.POST.get("uid")
if request.POST.get("phone"):
phone = request.POST.get("phone")
else:
phone =" "
password = request.POST.get("password")
x=task_add_user(first_name, last_name, username, email, password, uid, phone)
return HttpResponse("OK")
return HttpResponse("OK")
@csrf_exempt
def api_add_group(request):
if request.method=="POST":
secret_key =request.POST.get("secret_key")
ip = request.META["REMOTE_ADDR"]
print secret_key, ip
if ip == master_ip:
pass
else:
raise Http404
if secret_k == secret_key:
pass
else:
raise Http404
name =request.POST.get("name")
gid =request.POST.get("gid")
print name, gid
task_add_group(name, gid)
return HttpResponse("OK")
return HttpResponse("OK")
@csrf_exempt
def api_delete_user(request):
if request.method=="POST":
secret_key =request.POST.get("secret_key")
ip = request.META["REMOTE_ADDR"]
if ip == master_ip:
pass
else:
raise Http404
if secret_k == secret_key:
pass
else:
raise Http404
user =request.POST.get("user")
print user
task_delete_user(user)
return HttpResponse("OK")
return HttpResponse("OK")
import ast
@csrf_exempt
def api_u2g(request):
if request.method=="POST":
secret_key =request.POST.get("secret_key")
ip = request.META["REMOTE_ADDR"]
if ip == master_ip:
pass
else:
raise Http404
if secret_k == secret_key:
pass
else:
raise Http404
gid = request.POST["gid"]
usernames = request.POST["usernames"]
group =LdapGroup.objects.get(gid=int(gid))
group.usernames = ast.literal_eval(usernames)
return HttpResponse("OK")
return HttpResponse("OK")
@csrf_exempt
def api_delete_group(request):
if request.method=="POST":
secret_key =request.POST.get("secret_key")
ip = request.META["REMOTE_ADDR"]
if ip == master_ip:
pass
else:
raise Http404
if secret_k == secret_key:
pass
else:
raise Http404
group = request.POST.get("group")
task_delete_group(group)
return HttpResponse("OK")
@csrf_exempt
def api_change_password(request):
print request.POST.items()
if request.method=="POST":
print 1
secret_key =request.POST.get("secret_key")
ip = request.META["REMOTE_ADDR"]
if ip == master_ip:
pass
else:
raise Http404
if secret_k == secret_key:
pass
else:
raise Http404
user= request.POST.get("user")
password = request.POST.get("password")
print "KK"
task_change_password(user,password)
return HttpResponse("OK")
return HttpResponse("OK")
@csrf_exempt
def api_upload(request):
if request.method=="POST":
secret_k =request.POST.get("secret_k")
ip = request.META["REMOTE_ADDR"]
if ip == master_ip:
pass
else:
raise Http404
if ip == master_ip:
pass
else:
raise Http404
if request.method=="POST":
handle_uploaded_file(request.FILES['file'])
return HttpResponse("OK")
| {
"content_hash": "35744cec4de3160c39ac90be5d5a419a",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 79,
"avg_line_length": 22.41025641025641,
"alnum_prop": 0.6848970251716248,
"repo_name": "netzary/Kaline",
"id": "54ba0c7fe55dfda1f99dc3d6facc0e7c362bd90c",
"size": "4370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ldapman/api_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4063"
},
{
"name": "HTML",
"bytes": "33085"
},
{
"name": "JavaScript",
"bytes": "496"
},
{
"name": "Python",
"bytes": "110003"
}
],
"symlink_target": ""
} |
import os
import sys
import optparse
import re
import fnmatch
import glob
import subprocess
revision = "## RevTag:: r458 ##".strip('# ').replace('RevTag::', 'revision')
field_processor = re.compile(
r'''
^ # Start of the line
(?P<keyword>.*?) # Capture the first field: everything up to the first tab
\t # Field separator: a tab character
.*? # Second field (uncaptured): everything up to the next tab
\t # Field separator: a tab character
(?P<search>.*?) # Any character at all, but as few as necessary (i.e. catch everything up to the ;")
;" # The end of the search specifier (see http://ctags.sourceforge.net/FORMAT)
(?=\t) # There MUST be a tab character after the ;", but we want to match it with zero width
.*\t # There can be other fields before "kind", so catch them here.
# Also catch the tab character from the previous line as there MUST be a tab before the field
(kind:)? # This is the "kind" field; "kind:" is optional
(?P<kind>\w) # The kind is a single character: catch it
(\t|$) # It must be followed either by a tab or by the end of the line
.* # If it is followed by a tab, soak up the rest of the line; replace with the syntax keyword line
''', re.VERBOSE)
field_keyword = re.compile(r'syntax keyword (?P<kind>CTags\w+) (?P<keyword>.*)')
field_const = re.compile(r'\bconst\b')
vim_synkeyword_arguments = [
'contains',
'oneline',
'fold',
'display',
'extend',
'contained',
'containedin',
'nextgroup',
'transparent',
'skipwhite',
'skipnl',
'skipempty'
]
ctags_exe = 'ctags'
cscope_exe = 'cscope'
# Used for timing a function; from http://www.daniweb.com/code/snippet368.html
import time
def print_timing(func):
def wrapper(*arg):
t1 = time.time()
res = func(*arg)
t2 = time.time()
print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)
return res
return wrapper
def GetCommandArgs(options):
Configuration = {}
Configuration['CTAGS_OPTIONS'] = ''
if options.ctags_file:
Configuration['CTAGS_OPTIONS'] += ' -f %s ' % options.ctags_file
if options.recurse:
Configuration['CTAGS_OPTIONS'] += '--recurse'
if options.include_locals:
Configuration['CTAGS_OPTIONS'] += ' --c-kinds=+l'
Configuration['CTAGS_OPTIONS'] += ' --java-kinds=+l'
Configuration['CTAGS_OPTIONS'] += ' --c++-kinds=+l'
Configuration['CTAGS_OPTIONS'] += ' --c#-kinds=+l'
Configuration['CTAGS_OPTIONS'] += ' --java-kinds=+l'
Configuration['CTAGS_FILES'] = ['.']
else:
if options.include_locals:
Configuration['CTAGS_OPTIONS'] = '--c-kinds=+l'
Configuration['CTAGS_FILES'] = glob.glob('*')
if not options.include_docs:
Configuration['CTAGS_OPTIONS'] += r" --exclude=docs --exclude=Documentation"
return Configuration
key_regexp = re.compile('^(?P<keyword>.*?)\t(?P<remainder>.*\t(?P<kind>[a-zA-Z])(?:\t|$).*)')
def ctags_key(ctags_line):
match = key_regexp.match(ctags_line)
if match is None:
return ctags_line
return match.group('keyword') + match.group('kind') + match.group('remainder')
def CreateCScopeFile(options):
cscope_options = '-b'
run_cscope = False
if options.build_cscopedb:
run_cscope = True
if os.path.exists('cscope.files'):
if options.build_cscopedb_if_file_exists:
run_cscope = True
else:
cscope_options += 'R'
if run_cscope:
print "Spawning cscope"
os.spawnl(os.P_NOWAIT, cscope_exe, 'cscope', cscope_options)
#@print_timing
def CreateTagsFile(config, languages, options):
print "Generating Tags"
ctags_languages = languages[:]
if 'c' in ctags_languages:
ctags_languages.append('c++')
ctags_cmd = '%s %s %s %s' % (ctags_exe, config['CTAGS_OPTIONS'], "--languages=" + ",".join(ctags_languages), " ".join(config['CTAGS_FILES']))
# fh = open('ctags_cmd.txt', 'w')
# fh.write(ctags_cmd)
# fh.write('\n')
# fh.close()
#os.system(ctags_cmd)
subprocess.call(ctags_cmd, shell = (os.name != 'nt'))
tagFile = open(options.ctags_file, 'r')
tagLines = [line.strip() for line in tagFile]
tagFile.close()
# Also sort the file a bit better (tag, then kind, then filename)
tagLines.sort(key=ctags_key)
tagFile = open(options.ctags_file, 'w')
for line in tagLines:
tagFile.write(line + "\n")
tagFile.close()
def GetLanguageParameters(lang):
params = {}
# Default value for iskeyword
params['iskeyword'] = '@,48-57,_,192-255'
if lang == 'c':
params['suffix'] = 'c'
params['name'] = 'c'
params['extensions'] = r'(c|cc|cpp|h|hpp|cxx|hxx)'
elif lang == 'python':
params['suffix'] = 'py'
params['name'] = 'python'
params['extensions'] = r'pyw?'
elif lang == 'ruby':
params['suffix'] = 'ruby'
params['name'] = 'ruby'
params['extensions'] = 'rb'
elif lang == 'java':
params['suffix'] = 'java'
params['name'] = 'java'
params['extensions'] = 'java'
elif lang == 'perl':
params['suffix'] = 'pl'
params['name'] = 'perl'
params['extensions'] = r'p[lm]'
elif lang == 'vhdl':
params['suffix'] = 'vhdl'
params['name'] = 'vhdl'
params['extensions'] = r'vhdl?'
elif lang == 'php':
params['suffix'] = 'php'
params['name'] = 'php'
params['extensions'] = r'php'
elif lang == 'c#':
params['suffix'] = 'cs'
params['name'] = 'c#'
params['extensions'] = 'cs'
else:
raise AttributeError('Language not recognised %s' % lang)
return params
def GenerateValidKeywordRange(iskeyword):
ValidKeywordSets = iskeyword.split(',')
rangeMatcher = re.compile('^(?P<from>(?:\d+|\S))-(?P<to>(?:\d+|\S))$')
falseRangeMatcher = re.compile('^^(?P<from>(?:\d+|\S))-(?P<to>(?:\d+|\S))$')
validList = []
for valid in ValidKeywordSets:
m = rangeMatcher.match(valid)
fm = falseRangeMatcher.match(valid)
if valid == '@':
for ch in [chr(i) for i in range(0,256)]:
if ch.isalpha():
validList.append(ch)
elif m is not None:
# We have a range of ascii values
if m.group('from').isdigit():
rangeFrom = int(m.group('from'))
else:
rangeFrom = ord(m.group('from'))
if m.group('to').isdigit():
rangeTo = int(m.group('to'))
else:
rangeTo = ord(m.group('to'))
validRange = range(rangeFrom, rangeTo+1)
for ch in [chr(i) for i in validRange]:
validList.append(ch)
elif fm is not None:
# We have a range of ascii values: remove them!
if fm.group('from').isdigit():
rangeFrom = int(fm.group('from'))
else:
rangeFrom = ord(fm.group('from'))
if fm.group('to').isdigit():
rangeTo = int(fm.group('to'))
else:
rangeTo = ord(fm.group('to'))
validRange = range(rangeFrom, rangeTo+1)
for ch in [chr(i) for i in validRange]:
for i in range(validList.count(ch)):
validList.remove(ch)
elif len(valid) == 1:
# Just a char
validList.append(valid)
else:
raise ValueError('Unrecognised iskeyword part: ' + valid)
return validList
def IsValidKeyword(keyword, iskeyword):
for char in keyword:
if not char in iskeyword:
return False
return True
#@print_timing
def CreateTypesFile(config, Parameters, options):
outfile = '%s_%s.vim' % (options.types_prefix, Parameters['suffix'])
print "Generating " + outfile
lineMatcher = re.compile(r'^.*?\t[^\t]*\.(?P<extension>' + Parameters['extensions'] + ')\t')
#p = os.popen(ctags_cmd, "r")
p = open(options.ctags_file, "r")
if options.include_locals:
LocalTagType = ',CTagsLocalVariable'
else:
LocalTagType = ''
KindList = GetKindList()[Parameters['name']]
ctags_entries = []
while 1:
line = p.readline()
if not line:
break
if not lineMatcher.match(line):
continue
m = field_processor.match(line.strip())
if m is not None:
try:
vimmed_line = 'syntax keyword ' + KindList['ctags_' + m.group('kind')] + ' ' + m.group('keyword')
if options.parse_constants and (Parameters['suffix'] == 'c') and (m.group('kind') == 'v'):
if field_const.search(m.group('search')) is not None:
vimmed_line = vimmed_line.replace('CTagsGlobalVariable', 'CTagsConstant')
if Parameters['suffix'] != 'c' or m.group('kind') != 'p':
ctags_entries.append(vimmed_line)
except KeyError:
ctags_entries.append('''" Skipping unrecognised kind '%c' ''' % (m.group('kind'),))
p.close()
# Essentially a uniq() function
ctags_entries = dict.fromkeys(ctags_entries).keys()
# Sort the list
ctags_entries.sort()
if len(ctags_entries) == 0:
print "No tags found"
return
keywordDict = {}
for line in ctags_entries:
m = field_keyword.match(line)
if m is not None:
if not keywordDict.has_key(m.group('kind')):
keywordDict[m.group('kind')] = []
keywordDict[m.group('kind')].append(m.group('keyword'))
if options.check_keywords:
iskeyword = GenerateValidKeywordRange(Parameters['iskeyword'])
matchEntries = []
vimtypes_entries = []
clear_string = 'silent! syn clear '
patternCharacters = "/@#':"
charactersToEscape = '\\' + '~[]*.$^'
if not options.include_locals:
remove_list = []
for key, value in KindList.iteritems():
if value == 'CTagsLocalVariable':
remove_list.append(key)
for key in remove_list:
try:
del(KindList[key])
except KeyError:
pass
UsedTypes = KindList.values()
clear_string += " ".join(UsedTypes)
vimtypes_entries.append(clear_string)
# Specified highest priority first
Priority = [
'CTagsNamespace', 'CTagsClass', 'CTagsDefinedName',
'CTagsType', 'CTagsMethod', 'CTagsFunction',
'CTagsEnumerationValue', 'CTagsEnumeratorName',
'CTagsConstant', 'CTagsGlobalVariable',
'CTagsUnion', 'CTagsProperty', 'CTagsMember',
'CTagsStructure',
]
# Reverse the list as highest priority should be last!
Priority.reverse()
typeList = sorted(keywordDict.keys())
# Reorder type list according to sort order
allTypes = []
for thisType in Priority:
if thisType in typeList:
allTypes.append(thisType)
typeList.remove(thisType)
for thisType in typeList:
allTypes.append(thisType)
#print allTypes
for thisType in allTypes:
if thisType not in UsedTypes:
continue
keystarter = 'syntax keyword ' + thisType
keycommand = keystarter
for keyword in keywordDict[thisType]:
if options.check_keywords:
# In here we should check that the keyword only matches
# vim's \k parameter (which will be different for different
# languages). This is quite slow so is turned off by
# default; however, it is useful for some things where the
# default generated file contains a lot of rubbish. It may
# be worth optimising IsValidKeyword at some point.
if not IsValidKeyword(keyword, iskeyword):
matchDone = False
for patChar in patternCharacters:
if keyword.find(patChar) == -1:
escapedKeyword = keyword
for ch in charactersToEscape:
escapedKeyword = escapedKeyword.replace(ch, '\\' + ch)
if not options.skip_matches:
matchEntries.append('syntax match ' + thisType + ' ' + patChar + escapedKeyword + patChar)
matchDone = True
break
if not matchDone:
print "Skipping keyword '" + keyword + "'"
continue
if keyword.lower() in vim_synkeyword_arguments:
if not options.skip_vimkeywords:
matchEntries.append('syntax match ' + thisType + ' /' + keyword + '/')
continue
temp = keycommand + " " + keyword
if len(temp) >= 512:
vimtypes_entries.append(keycommand)
keycommand = keystarter
keycommand = keycommand + " " + keyword
if keycommand != keystarter:
vimtypes_entries.append(keycommand)
# Essentially a uniq() function
matchEntries = dict.fromkeys(matchEntries).keys()
# Sort the list
matchEntries.sort()
vimtypes_entries.append('')
for thisMatch in matchEntries:
vimtypes_entries.append(thisMatch)
AddList = 'add='
for thisType in allTypes:
if thisType in UsedTypes:
if AddList != 'add=':
AddList += ','
AddList += thisType;
if Parameters['suffix'] in ['c',]:
vimtypes_entries.append('')
vimtypes_entries.append("if exists('b:hlrainbow') && !exists('g:nohlrainbow')")
vimtypes_entries.append('\tsyn cluster cBracketGroup ' + AddList + LocalTagType)
vimtypes_entries.append('\tsyn cluster cCppBracketGroup ' + AddList + LocalTagType)
vimtypes_entries.append('\tsyn cluster cCurlyGroup ' + AddList + LocalTagType)
vimtypes_entries.append('\tsyn cluster cParenGroup ' + AddList + LocalTagType)
vimtypes_entries.append('\tsyn cluster cCppParenGroup ' + AddList + LocalTagType)
vimtypes_entries.append('endif')
if Parameters['suffix'] in ['java',]:
vimtypes_entries.append('')
vimtypes_entries.append('syn cluster javaTop ' + AddList + LocalTagType)
try:
fh = open(outfile, 'wb')
except IOError:
sys.stderr.write("ERROR: Couldn't create %s\n" % (outfile))
sys.exit(1)
try:
for line in vimtypes_entries:
fh.write(line)
fh.write('\n')
except IOError:
sys.stderr.write("ERROR: Couldn't write %s contents\n" % (outfile))
sys.exit(1)
finally:
fh.close()
def main():
import optparse
parser = optparse.OptionParser(version=("Types File Creator (%%prog) %s" % revision))
parser.add_option('-r','-R','--recurse',
action="store_true",
default=False,
dest="recurse",
help="Recurse into subdirectories")
parser.add_option('--ctags-file',
action='store',
default='tags',
dest='ctags_file',
help='CTAGS output filename')
parser.add_option('--types-prefix',
action='store',
default='types',
dest='types_prefix',
help='Vim Types file prefix')
parser.add_option('--ctags-dir',
action='store',
default=None,
dest='ctags_dir',
type='string',
help='CTAGS Executable Directory')
parser.add_option('--include-docs',
action='store_true',
default=False,
dest='include_docs',
help='Include docs or Documentation directory (stripped by default for speed)')
parser.add_option('--do-not-check-keywords',
action='store_false',
default=True,
dest='check_keywords',
help="Do not check validity of keywords (for speed)")
parser.add_option('--include-invalid-keywords-as-matches',
action='store_false',
default=True,
dest='skip_matches',
help='Include invalid keywords as regular expression matches (may slow it loading)')
parser.add_option('--exclude-vim-keywords',
action='store_true',
default=False,
dest='skip_vimkeywords',
help="Don't include Vim keywords (they have to be matched with regular expression matches, which is slower)")
parser.add_option('--do-not-analyse-constants',
action='store_false',
default=True,
dest='parse_constants',
help="Do not treat constants as separate entries")
parser.add_option('--include-language',
action='append',
dest='languages',
type='string',
default=[],
help='Only include specified languages')
parser.add_option('--build-cscopedb',
action='store_true',
default=False,
dest='build_cscopedb',
help="Also build a cscope database")
parser.add_option('--build-cscopedb-if-cscope-file-exists',
action='store_true',
default=False,
dest='build_cscopedb_if_file_exists',
help="Also build a cscope database if cscope.files exists")
parser.add_option('--cscope-dir',
action='store',
default=None,
dest='cscope_dir',
type='string',
help='CSCOPE Executable Directory')
parser.add_option('--include-locals',
action='store_true',
default=False,
dest='include_locals',
help='Include local variables in the database')
parser.add_option('--use-existing-tagfile',
action='store_true',
default=False,
dest='use_existing_tagfile',
help="Do not generate tags if a tag file already exists")
options, remainder = parser.parse_args()
if options.ctags_dir is not None:
global ctags_exe
ctags_exe = os.path.join(options.ctags_dir, 'ctags')
if options.cscope_dir is not None:
global cscope_exe
cscope_exe = options.cscope_dir + '/' + 'cscope'
Configuration = GetCommandArgs(options)
CreateCScopeFile(options)
full_language_list = ['c', 'java', 'perl', 'python', 'ruby', 'vhdl', 'php', 'c#']
if len(options.languages) == 0:
# Include all languages
language_list = full_language_list
else:
language_list = [i for i in full_language_list if i in options.languages]
if options.use_existing_tagfile and not os.path.exists(options.ctags_file):
options.use_existing_tagfile = False
if not options.use_existing_tagfile:
CreateTagsFile(Configuration, language_list, options)
for language in language_list:
Parameters = GetLanguageParameters(language)
CreateTypesFile(Configuration, Parameters, options)
def GetKindList():
LanguageKinds = {}
LanguageKinds['asm'] = \
{
'ctags_d': 'CTagsDefinedName',
'ctags_l': 'CTagsLabel',
'ctags_m': 'CTagsMacro',
'ctags_t': 'CTagsType',
}
LanguageKinds['asp'] = \
{
'ctags_c': 'CTagsConstant',
'ctags_f': 'CTagsFunction',
'ctags_s': 'CTagsSubroutine',
'ctags_v': 'CTagsVariable',
}
LanguageKinds['awk'] = \
{
'ctags_f': 'CTagsFunction',
}
LanguageKinds['basic'] = \
{
'ctags_c': 'CTagsConstant',
'ctags_f': 'CTagsFunction',
'ctags_l': 'CTagsLabel',
'ctags_t': 'CTagsType',
'ctags_v': 'CTagsVariable',
'ctags_g': 'CTagsEnumeration',
}
LanguageKinds['beta'] = \
{
'ctags_f': 'CTagsFragment',
'ctags_p': 'CTagsPattern',
'ctags_s': 'CTagsSlot',
'ctags_v': 'CTagsVirtualPattern',
}
LanguageKinds['c'] = \
{
'ctags_c': 'CTagsClass',
'ctags_d': 'CTagsDefinedName',
'ctags_e': 'CTagsEnumerationValue',
'ctags_f': 'CTagsFunction',
'ctags_g': 'CTagsEnumeratorName',
'ctags_k': 'CTagsConstant',
'ctags_l': 'CTagsLocalVariable',
'ctags_m': 'CTagsMember',
'ctags_n': 'CTagsNamespace',
'ctags_p': 'CTagsFunction',
'ctags_s': 'CTagsStructure',
'ctags_t': 'CTagsType',
'ctags_u': 'CTagsUnion',
'ctags_v': 'CTagsGlobalVariable',
'ctags_x': 'CTagsExtern',
'ctags_F': 'CTagsFile',
}
LanguageKinds['c++'] = \
{
'ctags_c': 'CTagsClass',
'ctags_d': 'CTagsDefinedName',
'ctags_e': 'CTagsEnumerationValue',
'ctags_f': 'CTagsFunction',
'ctags_g': 'CTagsEnumerationName',
'ctags_k': 'CTagsConstant',
'ctags_l': 'CTagsLocalVariable',
'ctags_m': 'CTagsMember',
'ctags_n': 'CTagsNamespace',
'ctags_p': 'CTagsFunction',
'ctags_s': 'CTagsStructure',
'ctags_t': 'CTagsType',
'ctags_u': 'CTagsUnion',
'ctags_v': 'CTagsGlobalVariable',
'ctags_x': 'CTagsExtern',
'ctags_F': 'CTagsFile',
}
LanguageKinds['c#'] = \
{
'ctags_c': 'CTagsClass',
'ctags_d': 'CTagsDefinedName',
'ctags_e': 'CTagsEnumerationValue',
'ctags_E': 'CTagsEvent',
'ctags_f': 'CTagsField',
'ctags_g': 'CTagsEnumerationName',
'ctags_i': 'CTagsInterface',
'ctags_l': 'CTagsLocalVariable',
'ctags_m': 'CTagsMethod',
'ctags_n': 'CTagsNamespace',
'ctags_p': 'CTagsProperty',
'ctags_s': 'CTagsStructure',
'ctags_t': 'CTagsType',
}
LanguageKinds['cobol'] = \
{
'ctags_d': 'CTagsData',
'ctags_f': 'CTagsFileDescription',
'ctags_g': 'CTagsGroupItem',
'ctags_p': 'CTagsParagraph',
'ctags_P': 'CTagsProgram',
'ctags_s': 'CTagsSection',
}
LanguageKinds['eiffel'] = \
{
'ctags_c': 'CTagsClass',
'ctags_f': 'CTagsFeature',
'ctags_l': 'CTagsEntity',
}
LanguageKinds['erlang'] = \
{
'ctags_d': 'CTagsDefinedName',
'ctags_f': 'CTagsFunction',
'ctags_m': 'CTagsModule',
'ctags_r': 'CTagsRecord',
}
LanguageKinds['fortran'] = \
{
'ctags_b': 'CTagsBlockData',
'ctags_c': 'CTagsCommonBlocks',
'ctags_e': 'CTagsEntryPoint',
'ctags_f': 'CTagsFunction',
'ctags_i': 'CTagsInterfaceComponent',
'ctags_k': 'CTagsTypeComponent',
'ctags_l': 'CTagsLabel',
'ctags_L': 'CTagsLocalVariable',
'ctags_m': 'CTagsModule',
'ctags_n': 'CTagsNamelist',
'ctags_p': 'CTagsProgram',
'ctags_s': 'CTagsSubroutine',
'ctags_t': 'CTagsType',
'ctags_v': 'CTagsGlobalVariable',
}
LanguageKinds['html'] = \
{
'ctags_a': 'CTagsAnchor',
'ctags_f': 'CTagsFunction',
}
LanguageKinds['java'] = \
{
'ctags_c': 'CTagsClass',
'ctags_e': 'CTagsEnumerationValue',
'ctags_f': 'CTagsField',
'ctags_g': 'CTagsEnumeratorName',
'ctags_i': 'CTagsInterface',
'ctags_l': 'CTagsLocalVariable',
'ctags_m': 'CTagsMethod',
'ctags_p': 'CTagsPackage',
}
LanguageKinds['javascript'] = \
{
'ctags_f': 'CTagsFunction',
'ctags_c': 'CTagsClass',
'ctags_m': 'CTagsMethod',
'ctags_p': 'CTagsProperty',
'ctags_v': 'CTagsGlobalVariable',
}
LanguageKinds['lisp'] = \
{
'ctags_f': 'CTagsFunction',
}
LanguageKinds['lua'] = \
{
'ctags_f': 'CTagsFunction',
}
LanguageKinds['make'] = \
{
'ctags_m': 'CTagsFunction',
}
LanguageKinds['pascal'] = \
{
'ctags_f': 'CTagsFunction',
'ctags_p': 'CTagsFunction',
}
LanguageKinds['perl'] = \
{
'ctags_c': 'CTagsGlobalConstant',
'ctags_f': 'CTagsFormat',
'ctags_l': 'CTagsLabel',
'ctags_p': 'CTagsPackage',
'ctags_s': 'CTagsFunction',
'ctags_d': 'CTagsFunction',
}
LanguageKinds['php'] = \
{
'ctags_c': 'CTagsClass',
'ctags_i': 'CTagsInterface',
'ctags_d': 'CTagsGlobalConstant',
'ctags_f': 'CTagsFunction',
'ctags_v': 'CTagsGlobalVariable',
'ctags_j': 'CTagsFunction',
}
LanguageKinds['python'] = \
{
'ctags_c': 'CTagsClass',
'ctags_f': 'CTagsFunction',
'ctags_i': 'CTagsImport',
'ctags_m': 'CTagsMember',
'ctags_v': 'CTagsGlobalVariable',
}
LanguageKinds['rexx'] = \
{
'ctags_s': 'CTagsFunction',
}
LanguageKinds['ruby'] = \
{
'ctags_c': 'CTagsClass',
'ctags_f': 'CTagsMethod',
'ctags_m': 'CTagsModule',
'ctags_F': 'CTagsSingleton',
}
LanguageKinds['scheme'] = \
{
'ctags_f': 'CTagsFunction',
'ctags_s': 'CTagsSet',
}
LanguageKinds['sh'] = \
{
'ctags_f': 'CTagsFunction',
'ctags_F': 'CTagsFile',
}
LanguageKinds['slang'] = \
{
'ctags_f': 'CTagsFunction',
'ctags_n': 'CTagsNamespace',
}
LanguageKinds['sml'] = \
{
'ctags_e': 'CTagsException',
'ctags_f': 'CTagsFunction',
'ctags_c': 'CTagsFunctionObject',
'ctags_s': 'CTagsSignature',
'ctags_r': 'CTagsStructure',
'ctags_t': 'CTagsType',
'ctags_v': 'CTagsGlobalVariable',
}
LanguageKinds['sql'] = \
{
'ctags_c': 'CTagsCursor',
'ctags_d': 'CTagsFunction',
'ctags_f': 'CTagsFunction',
'ctags_F': 'CTagsField',
'ctags_l': 'CTagsLocalVariable',
'ctags_L': 'CTagsLabel',
'ctags_P': 'CTagsPackage',
'ctags_p': 'CTagsFunction',
'ctags_r': 'CTagsRecord',
'ctags_s': 'CTagsType',
'ctags_t': 'CTagsTable',
'ctags_T': 'CTagsTrigger',
'ctags_v': 'CTagsGlobalVariable',
'ctags_i': 'CTagsIndex',
'ctags_e': 'CTagsEvent',
'ctags_U': 'CTagsPublication',
'ctags_R': 'CTagsService',
'ctags_D': 'CTagsDomain',
'ctags_V': 'CTagsView',
'ctags_n': 'CTagsSynonym',
}
LanguageKinds['tcl'] = \
{
'ctags_c': 'CTagsClass',
'ctags_m': 'CTagsMethod',
'ctags_p': 'CTagsFunction',
}
LanguageKinds['vera'] = \
{
'ctags_c': 'CTagsClass',
'ctags_d': 'CTagsDefinedName',
'ctags_e': 'CTagsEnumerationValue',
'ctags_f': 'CTagsFunction',
'ctags_g': 'CTagsEnumeratorName',
'ctags_l': 'CTagsLocalVariable',
'ctags_m': 'CTagsMember',
'ctags_p': 'CTagsProgram',
'ctags_P': 'CTagsFunction',
'ctags_t': 'CTagsTask',
'ctags_T': 'CTagsType',
'ctags_v': 'CTagsGlobalVariable',
'ctags_x': 'CTagsExtern',
}
LanguageKinds['verilog'] = \
{
'ctags_c': 'CTagsGlobalConstant',
'ctags_e': 'CTagsEvent',
'ctags_f': 'CTagsFunction',
'ctags_m': 'CTagsModule',
'ctags_n': 'CTagsNetType',
'ctags_p': 'CTagsPort',
'ctags_r': 'CTagsRegisterType',
'ctags_t': 'CTagsTask',
}
LanguageKinds['vhdl'] = \
{
'ctags_c': 'CTagsGlobalConstant',
'ctags_t': 'CTagsType',
'ctags_T': 'CTagsTypeComponent',
'ctags_r': 'CTagsRecord',
'ctags_e': 'CTagsEntity',
'ctags_C': 'CTagsComponent',
'ctags_d': 'CTagsPrototype',
'ctags_f': 'CTagsFunction',
'ctags_p': 'CTagsFunction',
'ctags_P': 'CTagsPackage',
'ctags_l': 'CTagsLocalVariable',
}
LanguageKinds['vim'] = \
{
'ctags_a': 'CTagsAutoCommand',
'ctags_c': 'CTagsCommand',
'ctags_f': 'CTagsFunction',
'ctags_m': 'CTagsMap',
'ctags_v': 'CTagsGlobalVariable',
}
LanguageKinds['yacc'] = \
{
'ctags_l': 'CTagsLabel',
}
return LanguageKinds
if __name__ == "__main__":
main()
# vim: noet ts=4 sw=4
| {
"content_hash": "c52fade3baccb078aa7314e687af16fc",
"timestamp": "",
"source": "github",
"line_count": 878,
"max_line_length": 142,
"avg_line_length": 27.361047835990888,
"alnum_prop": 0.6497106939183283,
"repo_name": "lcycon/.vim",
"id": "b6ef292730d25ce3350f00e1b73437ba4b63e03b",
"size": "24143",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mktypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "145156"
},
{
"name": "C#",
"bytes": "11528"
},
{
"name": "C++",
"bytes": "31791"
},
{
"name": "Java",
"bytes": "135131"
},
{
"name": "Python",
"bytes": "64099"
},
{
"name": "Ruby",
"bytes": "59796"
},
{
"name": "VimL",
"bytes": "1150713"
}
],
"symlink_target": ""
} |
"""
Simple timer class
"""
class IannaTimer():
def __init__ (self):
self.timer = 0
self.active = False
def activate (self):
self.active = True
def deactivate (self):
self.active = False
def isactive (self):
return self.active
def tick(self):
if self.active:
self.timer = self.timer - 1
if self.timer == 0:
self.deactivate()
def setvalue(self,value):
self.timer = value
def getvalue(self):
return self.timer
def reset(self):
self.timer = 0
global_timer = IannaTimer()
| {
"content_hash": "98321a9796217cd7ccc1ee33523d44e1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 30,
"avg_line_length": 15.235294117647058,
"alnum_prop": 0.6447876447876448,
"repo_name": "fjpena/sword-of-ianna-zx",
"id": "5a977126ebe0d7fd411e59fa92f21819d6e82959",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_src/timer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2278532"
},
{
"name": "Batchfile",
"bytes": "747"
},
{
"name": "C",
"bytes": "896"
},
{
"name": "Makefile",
"bytes": "15627"
},
{
"name": "Python",
"bytes": "197969"
},
{
"name": "Visual Basic",
"bytes": "240"
}
],
"symlink_target": ""
} |
try:
import ConfigParser as configparser
except ImportError:
# python 3
import configparser
import os.path
from bash import bash
class bash_no_errors(bash):
def bash(self, cmd):
super(bash_no_errors, self).bash(cmd)
if self.stderr:
raise Exception(self.stderr)
return self
def get_config_file():
if os.path.isfile('tox.ini'):
return 'tox.ini'
if os.path.isfile('setup.cfg'):
return 'setup.cfg'
else:
# should never reach here
return 'tox.ini'
def get_files(commit_only=True, copy_dest=None):
"Get copies of files for analysis."
if commit_only:
real_files = bash(
"git diff --cached --name-status | "
"grep -v -E '^D' | "
"awk '{ print ( $(NF) ) }' "
).value().strip()
else:
real_files = bash(
"git ls-tree --name-only --full-tree -r HEAD"
).value().strip()
if real_files:
return create_fake_copies(real_files.split('\n'), copy_dest)
return []
def create_fake_copies(files, destination):
"""
Create copies of the given list of files in the destination given.
Creates copies of the actual files to be committed using
git show :<filename>
Return a list of destination files.
"""
dest_files = []
for filename in files:
leaf_dest_folder = os.path.join(destination, os.path.dirname(filename))
if not os.path.exists(leaf_dest_folder):
os.makedirs(leaf_dest_folder)
dest_file = os.path.join(destination, filename)
bash("git show :{filename} > {dest_file}".format(
filename=filename,
dest_file=dest_file)
)
dest_files.append(os.path.realpath(dest_file))
return dest_files
def filter_python_files(files):
"Get all python files from the list of files."
py_files = []
for f in files:
# If we end in .py, or if we don't have an extension and file says that
# we are a python script, then add us to the list
extension = os.path.splitext(f)[-1]
if extension:
if extension == '.py':
py_files.append(f)
elif 'python' in open(f, 'r').readline():
py_files.append(f)
elif 'python script' in bash('file {}'.format(f)).value().lower():
py_files.append(f)
return py_files
class HookConfig(object):
def __init__(self, config_filename):
self.config_filename = config_filename
self._config = {}
def get_file(self):
return open(self.config_filename)
@property
def config(self):
if not self._config and os.path.exists(self.config_filename):
c = configparser.SafeConfigParser()
c.readfp(self.get_file())
try:
self._config = dict(c.items('captainhook'))
except configparser.NoSectionError:
pass
return self._config
def is_enabled(self, plugin, default='off'):
setting = self.configuration(plugin)[0]
return setting == 'on' or (setting == 'default' and default == 'on')
def arguments(self, plugin):
return self.configuration(plugin)[1].strip()
def configuration(self, plugin):
"""
Get plugin configuration.
Return a tuple of (on|off|default, args)
"""
conf = self.config.get(plugin, "default;").split(';')
if len(conf) == 1:
conf.append('')
return tuple(conf)
| {
"content_hash": "a7c424de9e6366b09e1f9eae79ffd5be",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 27.84251968503937,
"alnum_prop": 0.5789027149321267,
"repo_name": "alexcouper/captainhook",
"id": "8e58854c28fc07b306e84c4a1a38c5c1e5e29fdb",
"size": "3620",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "captainhook/checkers/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "4161"
},
{
"name": "Python",
"bytes": "36044"
}
],
"symlink_target": ""
} |
"""
Render a Versa model as [Mermaid](https://mermaid-js.github.io/)
Note: you'll probably want something like mermaid-cli
"""
# Need npm to install mermaid-cli, so see: https://nodejs.org/en/
import sys
from slugify import slugify # pip install python-slugify
from amara3 import iri
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET, VLABEL_REL, VTYPE_REL
from versa.util import all_origins, labels
__all__ = ['parse', 'parse_iter', 'write',
# Non-standard
]
TAG_MAX_STEM_LENGTH = 12
def lookup_tag(obj, tag_map, label, is_node=True):
'''
'''
stem = tag_map.get(obj)
disambig = ''
if stem is None:
# FIXME: A bit wasteful here. We could just maintain the set after one-time creation
existing_tags = set(tag_map.values())
stem = str(obj).split('/')[-1]
if len(stem) >= TAG_MAX_STEM_LENGTH:
split_point = TAG_MAX_STEM_LENGTH // 2
# Tried using '\u2026' but leads to Mermaid syntax error
stem = stem[:split_point] + '...' + stem[-split_point:]
disambig = 0
while f'{stem}-{disambig}' in existing_tags:
disambig += 1
disambig = '' if not disambig else str(disambig)
tag_map[obj] = f'{stem}{"-" if disambig else ""}{disambig}'
asc_stem = slugify(stem)
# Raw node ID
node_id = f'{asc_stem}{disambig}'
# Node label
if label:
# Implies its a resource
if len(label) >= TAG_MAX_STEM_LENGTH:
split_point = TAG_MAX_STEM_LENGTH // 2
# Tried using '\u2026' but leads to Mermaid syntax error
label = label[:split_point] + '...' + label[-split_point:]
return f'{node_id}(fa:fa-tag {label})'
label = f'{stem}{"-" if disambig else ""}{disambig}'
if is_node:
if isinstance(obj, I):
return f'{node_id}({label})'
else:
return f'{node_id}[{label}]'
else:
return label
# TODO: Use stereotype to indicate @type
def write(model, out=sys.stdout):
'''
models - input Versa model from which output is generated.
'''
resource_tags = {}
property_tags = {}
value_tags = {}
out.write('graph TD\n')
for o in all_origins(model):
o_label = next(labels(model, o), None)
o_tag = lookup_tag(o, resource_tags, o_label)
for _, r, t, a in model.match(o):
r_tag = lookup_tag(r, property_tags, None, is_node=False)
if isinstance(t, I):
t_label = next(labels(model, t), None)
t_tag = lookup_tag(t, resource_tags, t_label)
else:
t_tag = lookup_tag(t, value_tags, None)
out.write(f' {o_tag} -->|{r_tag}| {t_tag}\n')
out.write('\n')
return
# Intentionally not supporting parse
def parse():
raise NotImplementedError
def parse_iter():
raise NotImplementedError
| {
"content_hash": "75e400cc8ba4cb0e4ee908d7562eb031",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 92,
"avg_line_length": 28.203883495145632,
"alnum_prop": 0.5783132530120482,
"repo_name": "uogbuji/versa",
"id": "8185c34aa16e2bd294b9327eb638eb8d3f18cabb",
"size": "2929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/py/serial/mermaid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "764"
},
{
"name": "Python",
"bytes": "405909"
}
],
"symlink_target": ""
} |
import idaapi
g_description = "plugin that prevents some obsolete opcode to be created as code"
g_comment = "Use on compiled binaries only"
g_bytecodes = [] * 256
#--------------------------------------------------------------------------
class dumbx86hook(idaapi.IDP_Hooks):
def __init__(self):
idaapi.IDP_Hooks.__init__(self)
self.cmd = idaapi.cmd
def custom_ana(self):
b = idaapi.get_many_bytes(self.cmd.ea, 1)
if idaapi.get_many_bytes(self.cmd.ea, 1) != "\x6a":
pass # print "prout"
g_bytecodes[b] += 1
return False
# deactivated for now
return True
#--------------------------------------------------------------------------
class dumbx86_t(idaapi.plugin_t):
# Processor fix plugin module
flags = idaapi.PLUGIN_PROC | idaapi.PLUGIN_HIDE
comment = g_comment
wanted_hotkey = ""
help = g_description
wanted_name = "dumbx86"
def init(self):
self.prochook = None
if idaapi.ph_get_id() != idaapi.PLFM_386:
# print "dumbx86_t.init() skipped!"
return idaapi.PLUGIN_SKIP
self.prochook = dumbx86hook()
self.prochook.hook()
print "dumbx86_t.init() called!"
return idaapi.PLUGIN_KEEP
def run(self, arg):
pass
def term(self):
print "************* dumbx86_t.term() called!"
if self.prochook:
self.prochook.unhook()
#--------------------------------------------------------------------------
def PLUGIN_ENTRY():
return dumbx86_t()
| {
"content_hash": "7b4c9fdebb7e0aa7f779ad6e2ac354a3",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 81,
"avg_line_length": 29.654545454545456,
"alnum_prop": 0.48252605763335377,
"repo_name": "angea/corkami",
"id": "31a6351face0d211ee603d34adbf22a6e2477eb5",
"size": "1684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/python/dumbx86.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "1408739"
},
{
"name": "C",
"bytes": "73175"
},
{
"name": "HTML",
"bytes": "942"
},
{
"name": "Jasmin",
"bytes": "289733"
},
{
"name": "Java",
"bytes": "3524"
},
{
"name": "JavaScript",
"bytes": "8554"
},
{
"name": "Makefile",
"bytes": "26280"
},
{
"name": "Pascal",
"bytes": "1790"
},
{
"name": "Python",
"bytes": "398033"
},
{
"name": "Shell",
"bytes": "24847"
},
{
"name": "TeX",
"bytes": "24841"
},
{
"name": "Visual Basic",
"bytes": "1092"
}
],
"symlink_target": ""
} |
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import socket
import sys
import tensorflow as tf
import numpy as np
from vae.data_loaders.lab_imageloader import lab_imageloader
from vae.arch.vae_skipconn import vae_skipconn as vae
from vae.arch.network import network
from third_party.save_zhang_feats import save_zhang_feats
flags = tf.flags
#Directory params
flags.DEFINE_string("out_dir", "", "")
flags.DEFINE_string("in_dir", "", "")
flags.DEFINE_string("list_dir", "", "")
#Dataset Params
flags.DEFINE_integer("batch_size", 32, "batch size")
flags.DEFINE_integer("updates_per_epoch", 1, "number of updates per epoch")
flags.DEFINE_integer("log_interval", 1, "input image height")
flags.DEFINE_integer("img_width", 64, "input image width")
flags.DEFINE_integer("img_height", 64, "input image height")
#Network Params
flags.DEFINE_boolean("is_only_data", False, "Is training flag")
flags.DEFINE_boolean("is_train", False, "Is training flag")
flags.DEFINE_boolean("is_run_cvae", False, "Is training flag")
flags.DEFINE_integer("hidden_size", 64, "size of the hidden VAE unit")
flags.DEFINE_float("lr_vae", 1e-6, "learning rate for vae")
flags.DEFINE_integer("max_epoch_vae", 10, "max epoch")
flags.DEFINE_integer("pc_comp", 20, "number of principle components")
FLAGS = flags.FLAGS
def main():
FLAGS.log_interval = 1
FLAGS.list_dir = None
FLAGS.in_dir = 'data/testimgs/'
FLAGS.ext = 'JPEG'
data_loader = lab_imageloader(FLAGS.in_dir, \
'data/output/testimgs', listdir=None, ext=FLAGS.ext)
img_fns = data_loader.test_img_fns
if(FLAGS.is_only_data == True):
feats_fns = save_zhang_feats(img_fns, ext=FLAGS.ext)
with open('%s/list.train.txt' % FLAGS.in_dir, 'w') as fp:
for feats_fn in feats_fns:
fp.write('%s\n' % feats_fn)
with open('%s/list.test.txt' % FLAGS.in_dir, 'w') as fp:
for feats_fn in feats_fns:
fp.write('%s\n' % feats_fn)
np.save('%s/lv_color_train.mat.npy' % FLAGS.in_dir, \
np.zeros((len(img_fns), 2*FLAGS.hidden_size)))
np.save('%s/lv_color_test.mat.npy' % FLAGS.in_dir, \
np.zeros((len(img_fns), 2*FLAGS.hidden_size)))
else:
nmix = 8
lv_mdn_test = np.load(os.path.join(FLAGS.in_dir, 'lv_color_mdn_test.mat.npy'))
num_batches = np.int_(np.ceil((lv_mdn_test.shape[0]*1.)/FLAGS.batch_size))
graph_divcolor = tf.Graph()
with graph_divcolor.as_default():
model_colorfield = vae(FLAGS, nch=2, condinference_flag=True)
dnn = network(model_colorfield, data_loader, 2, FLAGS)
dnn.run_divcolor('data/imagenet_models/' , \
lv_mdn_test, num_batches=num_batches)
if __name__ == "__main__":
main()
| {
"content_hash": "26c5af5dda48dc7cac4c806d63a7f49d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 82,
"avg_line_length": 34.96052631578947,
"alnum_prop": 0.6740684983063605,
"repo_name": "aditya12agd5/divcolor",
"id": "70518d58df001843eef5ac4f4ca993e92f02547d",
"size": "2657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77055"
},
{
"name": "Shell",
"bytes": "571"
}
],
"symlink_target": ""
} |
"""do2things URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url
from .views import HomePageView, DecohereView, DecoherenceView
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^decohere/', DecohereView.as_view(), name='decohere'),
url(r'^decoherence/', DecoherenceView.as_view(), name='decoherence'),
]
| {
"content_hash": "fbbb05920db3c5cc231faf8e34536324",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 38.583333333333336,
"alnum_prop": 0.6954643628509719,
"repo_name": "tlake/do2things",
"id": "975928e056df70e28e9e06a6da6200ebbd539499",
"size": "926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "do2things/decohere/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5771"
},
{
"name": "HTML",
"bytes": "2736"
},
{
"name": "Python",
"bytes": "8410"
},
{
"name": "Ruby",
"bytes": "750"
}
],
"symlink_target": ""
} |
"""
.. todo::
WRITEME
"""
import logging
import warnings
from .general import is_iterable, contains_nan, contains_inf, isfinite
import theano
from theano.compat.six.moves import input, zip as izip
# Delay import of pylearn2.config.yaml_parse and pylearn2.datasets.control
# to avoid circular imports
yaml_parse = None
control = None
cuda = None
import numpy as np
from theano.compat import six
from functools import partial
from pylearn2.utils.exc import reraise_as
WRAPPER_ASSIGNMENTS = ('__module__', '__name__')
WRAPPER_CONCATENATIONS = ('__doc__',)
WRAPPER_UPDATES = ('__dict__',)
logger = logging.getLogger(__name__)
def make_name(variable, anon="anonymous_variable"):
"""
If variable has a name, returns that name. Otherwise, returns anon.
Parameters
----------
variable : tensor_like
WRITEME
anon : str, optional
WRITEME
Returns
-------
WRITEME
"""
if hasattr(variable, 'name') and variable.name is not None:
return variable.name
return anon
def sharedX(value, name=None, borrow=False, dtype=None):
"""
Transform value into a shared variable of type floatX
Parameters
----------
value : WRITEME
name : WRITEME
borrow : WRITEME
dtype : str, optional
data type. Default value is theano.config.floatX
Returns
-------
WRITEME
"""
if dtype is None:
dtype = theano.config.floatX
return theano.shared(theano._asarray(value, dtype=dtype),
name=name,
borrow=borrow)
def as_floatX(variable):
"""
Casts a given variable into dtype `config.floatX`. Numpy ndarrays will
remain numpy ndarrays, python floats will become 0-D ndarrays and
all other types will be treated as theano tensors
Parameters
----------
variable : WRITEME
Returns
-------
WRITEME
"""
if isinstance(variable, float):
return np.cast[theano.config.floatX](variable)
if isinstance(variable, np.ndarray):
return np.cast[theano.config.floatX](variable)
return theano.tensor.cast(variable, theano.config.floatX)
def constantX(value):
"""
Returns a constant of value `value` with floatX dtype
Parameters
----------
variable : WRITEME
Returns
-------
WRITEME
"""
return theano.tensor.constant(np.asarray(value,
dtype=theano.config.floatX))
def subdict(d, keys):
"""
Create a subdictionary of d with the keys in keys
Parameters
----------
d : WRITEME
keys : WRITEME
Returns
-------
WRITEME
"""
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def safe_update(dict_to, dict_from):
"""
Like dict_to.update(dict_from), except don't overwrite any keys.
Parameters
----------
dict_to : WRITEME
dict_from : WRITEME
Returns
-------
WRITEME
"""
for key, val in six.iteritems(dict_from):
if key in dict_to:
raise KeyError(key)
dict_to[key] = val
return dict_to
class CallbackOp(theano.gof.Op):
"""
A Theano Op that implements the identity transform but also does an
arbitrary (user-specified) side effect.
Parameters
----------
callback : WRITEME
"""
view_map = {0: [0]}
def __init__(self, callback):
self.callback = callback
def make_node(self, xin):
"""
.. todo::
WRITEME
"""
xout = xin.type.make_variable()
return theano.gof.Apply(op=self, inputs=[xin], outputs=[xout])
def perform(self, node, inputs, output_storage):
"""
.. todo::
WRITEME
"""
xin, = inputs
xout, = output_storage
xout[0] = xin
self.callback(xin)
def grad(self, inputs, output_gradients):
"""
.. todo::
WRITEME
"""
return output_gradients
def R_op(self, inputs, eval_points):
"""
.. todo::
WRITEME
"""
return [x for x in eval_points]
def __eq__(self, other):
"""
.. todo::
WRITEME
"""
return type(self) == type(other) and self.callback == other.callback
def hash(self):
"""
.. todo::
WRITEME
"""
return hash(self.callback)
def __hash__(self):
"""
.. todo::
WRITEME
"""
return self.hash()
def get_dataless_dataset(model):
"""
Loads the dataset that model was trained on, without loading data.
This is useful if you just need the dataset's metadata, like for
formatting views of the model's weights.
Parameters
----------
model : Model
Returns
-------
dataset : Dataset
The data-less dataset as described above.
"""
global yaml_parse
global control
if yaml_parse is None:
from pylearn2.config import yaml_parse
if control is None:
from pylearn2.datasets import control
control.push_load_data(False)
try:
rval = yaml_parse.load(model.dataset_yaml_src)
finally:
control.pop_load_data()
return rval
def safe_zip(*args):
"""Like zip, but ensures arguments are of same length"""
base = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != base:
raise ValueError("Argument 0 has length %d but argument %d has "
"length %d" % (base, i+1, len(arg)))
return zip(*args)
def safe_izip(*args):
"""Like izip, but ensures arguments are of same length"""
assert all([len(arg) == len(args[0]) for arg in args])
return izip(*args)
def gpu_mem_free():
"""
Memory free on the GPU
Returns
-------
megs_free : float
Number of megabytes of memory free on the GPU used by Theano
"""
global cuda
if cuda is None:
from theano.sandbox import cuda
return cuda.mem_info()[0]/1024./1024
class _ElemwiseNoGradient(theano.tensor.Elemwise):
"""
A Theano Op that applies an elementwise transformation and reports
having no gradient.
"""
def connection_pattern(self, node):
"""
Report being disconnected to all inputs in order to have no gradient
at all.
Parameters
----------
node : WRITEME
"""
return [[False]]
def grad(self, inputs, output_gradients):
"""
Report being disconnected to all inputs in order to have no gradient
at all.
Parameters
----------
inputs : WRITEME
output_gradients : WRITEME
"""
return [theano.gradient.DisconnectedType()()]
# Call this on a theano variable to make a copy of that variable
# No gradient passes through the copying operation
# This is equivalent to making my_copy = var.copy() and passing
# my_copy in as part of consider_constant to tensor.grad
# However, this version doesn't require as much long range
# communication between parts of the code
block_gradient = _ElemwiseNoGradient(theano.scalar.identity)
def is_block_gradient(op):
"""
Parameters
----------
op : object
Returns
-------
is_block_gradient : bool
True if op is a gradient-blocking op, False otherwise
"""
return isinstance(op, _ElemwiseNoGradient)
def safe_union(a, b):
"""
Does the logic of a union operation without the non-deterministic ordering
of python sets.
Parameters
----------
a : list
b : list
Returns
-------
c : list
A list containing one copy of each element that appears in at
least one of `a` or `b`.
"""
if not isinstance(a, list):
raise TypeError("Expected first argument to be a list, but got " +
str(type(a)))
assert isinstance(b, list)
c = []
for x in a + b:
if x not in c:
c.append(x)
return c
# This was moved to theano, but I include a link to avoid breaking
# old imports
from theano.printing import hex_digest as _hex_digest
def hex_digest(*args, **kwargs):
warnings.warn("hex_digest has been moved into Theano. "
"pylearn2.utils.hex_digest will be removed on or after "
"2014-08-26")
def function(*args, **kwargs):
"""
A wrapper around theano.function that disables the on_unused_input error.
Almost no part of pylearn2 can assume that an unused input is an error, so
the default from theano is inappropriate for this project.
"""
return theano.function(*args, on_unused_input='ignore', **kwargs)
def grad(*args, **kwargs):
"""
A wrapper around theano.gradient.grad that disable the disconnected_inputs
error. Almost no part of pylearn2 can assume that a disconnected input
is an error.
"""
return theano.gradient.grad(*args, disconnected_inputs='ignore', **kwargs)
# Groups of Python types that are often used together in `isinstance`
if six.PY3:
py_integer_types = (int, np.integer)
py_number_types = (int, float, complex, np.number)
else:
py_integer_types = (int, long, np.integer) # noqa
py_number_types = (int, long, float, complex, np.number) # noqa
py_float_types = (float, np.floating)
py_complex_types = (complex, np.complex)
def get_choice(choice_to_explanation):
"""
.. todo::
WRITEME
Parameters
----------
choice_to_explanation : dict
Dictionary mapping possible user responses to strings describing
what that response will cause the script to do
Returns
-------
WRITEME
"""
d = choice_to_explanation
for key in d:
logger.info('\t{0}: {1}'.format(key, d[key]))
prompt = '/'.join(d.keys())+'? '
first = True
choice = ''
while first or choice not in d.keys():
if not first:
warnings.warn('unrecognized choice')
first = False
choice = input(prompt)
return choice
def float32_floatX(f):
"""
This function changes floatX to float32 for the call to f.
Useful in GPU tests.
Parameters
----------
f : WRITEME
Returns
-------
WRITEME
"""
def new_f(*args, **kwargs):
"""
.. todo::
WRITEME
"""
old_floatX = theano.config.floatX
theano.config.floatX = 'float32'
try:
f(*args, **kwargs)
finally:
theano.config.floatX = old_floatX
# If we don't do that, tests function won't be run.
new_f.func_name = f.func_name
return new_f
def update_wrapper(wrapper,
wrapped,
assigned=WRAPPER_ASSIGNMENTS,
concatenated=WRAPPER_CONCATENATIONS,
append=False,
updated=WRAPPER_UPDATES,
replace_before=None):
"""
A Python decorator which acts like `functools.update_wrapper` but
also has the ability to concatenate attributes.
Parameters
----------
wrapper : function
Function to be updated
wrapped : function
Original function
assigned : tuple, optional
Tuple naming the attributes assigned directly from the wrapped
function to the wrapper function.
Defaults to `utils.WRAPPER_ASSIGNMENTS`.
concatenated : tuple, optional
Tuple naming the attributes from the wrapped function
concatenated with the ones from the wrapper function.
Defaults to `utils.WRAPPER_CONCATENATIONS`.
append : bool, optional
If True, appends wrapped attributes to wrapper attributes
instead of prepending them. Defaults to False.
updated : tuple, optional
Tuple naming the attributes of the wrapper that are updated
with the corresponding attribute from the wrapped function.
Defaults to `functools.WRAPPER_UPDATES`.
replace_before : str, optional
If `append` is `False` (meaning we are prepending), delete
docstring lines occurring before the first line equal to this
string (the docstring line is stripped of leading/trailing
whitespace before comparison). The newline of the line preceding
this string is preserved.
Returns
-------
wrapper : function
Updated wrapper function
Notes
-----
This can be used to concatenate the wrapper's docstring with the
wrapped's docstring and should help reduce the ammount of
documentation to write: one can use this decorator on child
classes' functions when their implementation is similar to the one
of the parent class. Conversely, if a function defined in a child
class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the
whole docstring.
"""
assert not (append and replace_before), ("replace_before cannot "
"be used with append")
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in concatenated:
# Make sure attributes are not None
if getattr(wrapped, attr) is None:
setattr(wrapped, attr, "")
if getattr(wrapper, attr) is None:
setattr(wrapper, attr, "")
if append:
setattr(wrapper,
attr,
getattr(wrapped, attr) + getattr(wrapper, attr))
else:
if replace_before:
assert replace_before.strip() == replace_before, (
'value for replace_before "%s" contains leading/'
'trailing whitespace'
)
split = getattr(wrapped, attr).split("\n")
# Potentially wasting time/memory by stripping everything
# and duplicating it but probably not enough to worry about.
split_stripped = [line.strip() for line in split]
try:
index = split_stripped.index(replace_before.strip())
except ValueError:
reraise_as(ValueError('no line equal to "%s" in wrapped '
'function\'s attribute %s' %
(replace_before, attr)))
wrapped_val = '\n' + '\n'.join(split[index:])
else:
wrapped_val = getattr(wrapped, attr)
setattr(wrapper,
attr,
getattr(wrapper, attr) + wrapped_val)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned=WRAPPER_ASSIGNMENTS,
concatenated=WRAPPER_CONCATENATIONS,
append=False,
updated=WRAPPER_UPDATES,
replace_before=None):
"""
Decorator factory to apply `update_wrapper()` to a wrapper function
Returns a decorator that invokes `update_wrapper()` with the decorated
function as the wrapper argument and the arguments to `wraps()` as the
remaining arguments. Default arguments are as for `update_wrapper()`.
This is a convenience function to simplify applying
`functools.partial()` to `update_wrapper()`.
Parameters
----------
wrapped : function
WRITEME
assigned : tuple, optional
WRITEME
concatenated : tuple, optional
WRITEME
append : bool, optional
WRITEME
updated : tuple, optional
WRITEME
Returns
-------
WRITEME
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
<BLANKLINE>
Parameters
----------
x : int
Variable to increment by 1
<BLANKLINE>
Returns
-------
rval : int
x incremented by 1
<BLANKLINE>
Notes
-----
Also prints the incremented value
"""
return partial(update_wrapper, wrapped=wrapped, assigned=assigned,
append=append,updated=updated,
replace_before=replace_before)
| {
"content_hash": "b4181c580063526d13f1da7a0a76f30f",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 78,
"avg_line_length": 26.0015243902439,
"alnum_prop": 0.5767133728088175,
"repo_name": "JesseLivezey/plankton",
"id": "170b6160265dcd7db106bae9f66c550a574d3d13",
"size": "17057",
"binary": false,
"copies": "5",
"ref": "refs/heads/plankton",
"path": "pylearn2/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42825"
},
{
"name": "C++",
"bytes": "57426"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1267714"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3899299"
},
{
"name": "Shell",
"bytes": "4610"
}
],
"symlink_target": ""
} |
import numpy
import urllib2
from decimal import getcontext
from sparse.core import Sparse
def download_matrix(url):
"""Download information from url"""
request = urllib2.Request(url)
try:
request_handler = urllib2.urlopen(request)
except urllib2.HTTPError as exc:
print "[x] Fail to download information. {}".format(exc)
return
file_content = request_handler.readlines()
yield file_content[0]
for line in file_content[2:]:
if not line:
continue
yield line.strip()
def parse_information(url):
"""Generate new sparse matrix"""
values = []
matrix = Sparse()
information = download_matrix(url)
size = None
for info in information:
if ',' not in info:
if not size:
size = int(info.strip())
else:
try:
values.append(numpy.float64(info))
except ValueError:
pass
else:
unpack = info.split(',')
if not len(unpack) == 3:
continue
matrix.item(int(unpack[1].strip()), int(unpack[2].strip()),
unpack[0].strip())
matrix.size = (size, size)
matrix.sort()
return matrix, values
if __name__ == "__main__":
print "The module has not been designed to be used in this way."
| {
"content_hash": "0441a161579653e7c36456b8c808868a",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 71,
"avg_line_length": 26.653846153846153,
"alnum_prop": 0.5555555555555556,
"repo_name": "c-square/homework",
"id": "82e764f70577f2d61888aaa0229b1e6ebd1c3ddd",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Licență/Anul III/CN/sparse/sparse/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Apex",
"bytes": "12340"
},
{
"name": "C",
"bytes": "1547"
},
{
"name": "C++",
"bytes": "269593"
},
{
"name": "Haskell",
"bytes": "14450"
},
{
"name": "Python",
"bytes": "151009"
},
{
"name": "R",
"bytes": "16961"
},
{
"name": "TeX",
"bytes": "84806"
}
],
"symlink_target": ""
} |
from .kernel import Kernel
from .linear import Linear
from .mlp import MLP
from .mlpext import MLPext
from .additive import Additive
from .compound import Compound
from .constant import Constant
from .identity import Identity
from .piecewise_linear import PiecewiseLinear
| {
"content_hash": "1e0bf1670190317b3ba08302275f5c8b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 27.3,
"alnum_prop": 0.8278388278388278,
"repo_name": "esiivola/GPYgradients",
"id": "795352afe007d411067caa41e58b4c96efc917ea",
"size": "392",
"binary": false,
"copies": "3",
"ref": "refs/heads/devel",
"path": "GPy/mappings/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2030"
},
{
"name": "C++",
"bytes": "1605"
},
{
"name": "Python",
"bytes": "2052350"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
from .GlobalData import global_data
from .Projects import all_projects
from .Apps import all_apps
from .Pods import all_pods
from .Task import Task
from .Session import Session
from .CustomizedTask import customizedTask
from .CerberusIntegration import cerberusIntegration
from .utils.SlackIntegration import slackIntegration
from concurrent.futures import ThreadPoolExecutor
import logging
import os
import time
import datetime
import sys
class TaskManager:
def __init__(self, cerberus_history_file):
self.logger = logging.getLogger('reliability')
self.time_subs = {}
self.time_subs["minute"] = 60
self.time_subs["hour"] = 3600
self.time_subs["day"] = 86400
self.time_subs["week"] = 604800
self.time_subs["month"] = 2419200
self.init_timing()
self.cwd = os.getcwd()
self.cerberus_history_file = cerberus_history_file
def init_timing(self):
def parse_time(time_string):
unit = time_string[-1:]
value = int(time_string[:-1])
if unit == "s" :
value = value
elif unit == "m":
value = value * 60
elif unit == "h":
value = value * 3600
return value
time_subs = {}
time_subs = global_data.config['timeSubstitutions']
for unit in time_subs.keys():
self.time_subs[unit] = parse_time(time_subs[unit])
def init_tasks(self):
self.next_execution_time["minute"] = self.time_subs["minute"]
self.next_execution_time["hour"] = self.time_subs["hour"]
self.next_execution_time["day"] = self.time_subs["day"]
self.next_execution_time["week"] = self.time_subs["week"]
self.next_execution_time["month"] = self.time_subs["month"]
self.next_task = {}
def calculate_next_execution(self):
next_execution_time = sys.maxsize
next_execution = {}
for interval in self.next_execution_time.keys():
if self.next_execution_time[interval] < next_execution_time:
next_execution_time = self.next_execution_time[interval]
next_execution = {}
next_execution[interval] = next_execution_time
elif self.next_execution_time[interval] == next_execution_time:
next_execution[interval] = self.next_execution_time[interval]
return (next_execution, next_execution_time)
def schedule_next(self,execution_type):
self.next_execution_time[execution_type] += self.time_subs[execution_type]
def start_test(self):
all_pods.init()
all_apps.init()
all_projects.init()
task = Task(global_data.config,{'action': 'create', 'resource': 'projects','quantity': 2})
task.execute()
# task = Task(global_data.config,{'action': 'scaleUp', 'resource': 'apps'})
# task.execute()
# task = Task(global_data.config,{'action': 'scaleDown', 'resource': 'apps'})
# task.execute()
# task = Task(global_data.config,{'action': 'visit', 'resource': 'apps'})
# task.execute()
task = Task(global_data.config,{'action': 'delete', 'resource': 'projects'})
task.execute()
def check_desired_state(self):
if os.path.isfile(self.cwd + "/halt"):
slackIntegration.post_message_in_slack("Reliability test is going to halt.")
state = "halt"
self.logger.info("Halt file found, shutting down reliability.")
elif os.path.isfile(self.cwd + "/pause"):
state = "pause"
self.logger.info("Pause file found - pausing.")
else:
state = "run"
if global_data.cerberus_enable:
cerberus_status = cerberusIntegration.get_status(global_data.cerberus_api)
if cerberus_status == "False":
if global_data.cerberus_fail_action == "halt":
state = "halt"
self.logger.warning("Cerberus status is 'False'. Halt reliability test.")
elif global_data.cerberus_fail_action == "pause":
state = "pause"
self.logger.warning("Cerberus status is 'False'. Pause reliability test. Resolve cerberus failure to continue.")
elif global_data.cerberus_fail_action == "continue":
self.logger.warning("Cerberus status is 'False'. Reliability test will continue.")
else:
self.logger.warning(f"Cerberus status is False. cerberus_fail_action '{global_data.cerberus_fail_action}' is not recognized. Reliability test will continue.")
elif cerberus_status == "True":
self.logger.info("Cerberus status is 'True'.")
else:
self.logger.warning(f"Getting Cerberus status failed, response is '{cerberus_status}'.")
cerberusIntegration.save_history(global_data.cerberus_api, self.cerberus_history_file)
return state
# re-login all users to avoid login session token in kubeconfig expiration. The default timeout is 1 day.
def relogin(self):
# re-login 23 hours since last login
if time.time() - global_data.last_login_time > 3600*23:
self.logger.info("Re-login for all users to avoid login session token expiration")
login_args = []
for user in global_data.users:
password = global_data.users[user].password
kubeconfig = global_data.kubeconfigs[user]
login_args.append((user, password, kubeconfig))
# login concurrently
with ThreadPoolExecutor(max_workers=51) as executor:
results = executor.map(lambda t: Session().login(*t), login_args)
for result in results:
self.logger.info(result)
global_data.last_login_time = time.time()
def dump_stats(self):
status = []
status.append(f"Total projects: {str(all_projects.total_projects)}")
status.append(f"Failed apps: {str(all_apps.failed_apps)}")
status.append(f"Successful app visits: {str(global_data.app_visit_succeeded)}")
status.append(f"Failed app visits: {str(global_data.app_visit_failed)}")
status.append(f"Total builds: {str(global_data.total_build_count)}")
status.append(f"Successful customized task: {str(customizedTask.customized_task_succeeded)}")
status.append(f"Failed customized task: {str(customizedTask.customized_task_failed)}")
status = "\n".join(status)
self.logger.info("Reliability test status:\n"+ status)
slackIntegration.post_message_in_slack("Reliability test status:\n" + status)
def start(self):
self.logger.info("Task manager started in working directory: " + self.cwd + " at: " + str(datetime.datetime.now()))
self.next_execution_time = {}
self.init_tasks()
(next_execution, next_execution_time) = self.calculate_next_execution()
current_time = 0
all_pods.init()
all_apps.init()
all_projects.init()
max_projects = global_data.maxProjects
# get the projects creation concurrency
projects_create_concurrency = 0
try:
for tasks in global_data.config["tasks"].values():
for task in tasks:
if task["action"] == "create" and task["resource"] == "projects":
projects_create_concurrency = (task["concurrency"] if (task["concurrency"] > projects_create_concurrency) else projects_create_concurrency)
except KeyError as e :
self.logger.warning("KeyError " + str(e))
if projects_create_concurrency != 0:
if max_projects < projects_create_concurrency:
self.logger.warning(f"maxProjects {max_projects} should be larger than the projects create concurrency {projects_create_concurrency}")
# as projects are created concurrently, the next round will not start if the left capacity is less than the concurrency
all_projects.max_projects = max_projects-max_projects%projects_create_concurrency
self.logger.info(str(all_projects.max_projects) + " is set as the max projects number regarding to the concurrency "
+ str(projects_create_concurrency) + ". Origin maxProjects is " + str(max_projects))
state = "run"
last_state = "run"
while state == "run" or state == "pause":
self.logger.debug("Current time: " + str(current_time) + " next execution: " + str(next_execution))
state = self.check_desired_state()
if current_time >= next_execution_time and state == "run" :
for execution_type in next_execution.keys():
if execution_type in global_data.config["tasks"]:
tasks = global_data.config["tasks"][execution_type]
for task_to_execute in tasks:
self.relogin()
task = Task(task_to_execute)
task.execute()
self.schedule_next(execution_type)
(next_execution, next_execution_time) = self.calculate_next_execution()
last_state = "run"
# only dump state on the first pause state after run state
if state == "pause" and last_state != "pause":
last_state = "pause"
self.dump_stats()
self.logger.info(f"Sleep '{global_data.sleepTime}' seconds before running next task type (minute/hour/day/week/month).")
time.sleep(global_data.sleepTime)
current_time += global_data.sleepTime
self.dump_stats()
| {
"content_hash": "3a8e6458d400ab50b627046e972d5418",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 182,
"avg_line_length": 48.759803921568626,
"alnum_prop": 0.5961596461244596,
"repo_name": "mffiedler/svt",
"id": "140f73bd49482905dabc92b0fdf15a9c7287e581",
"size": "9947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reliability/tasks/TaskManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "1C Enterprise",
"bytes": "2409"
},
{
"name": "Awk",
"bytes": "6973"
},
{
"name": "DIGITAL Command Language",
"bytes": "1375"
},
{
"name": "Dockerfile",
"bytes": "18989"
},
{
"name": "Go",
"bytes": "3048"
},
{
"name": "Groovy",
"bytes": "1206"
},
{
"name": "Jinja",
"bytes": "12917"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Python",
"bytes": "361874"
},
{
"name": "Shell",
"bytes": "367619"
}
],
"symlink_target": ""
} |
import datetime
from decimal import Decimal
from typing import Any, Dict
import pytz
from dirtyfields import DirtyFieldsMixin
from django.conf import settings
from django.db import models, transaction
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.forms.models import model_to_dict
def _add_dict_items_if_not_present(
dict_with_precedence: Dict, dict_to_insert: Dict
) -> Dict:
for k, v in dict_to_insert.items():
if k not in dict_with_precedence:
dict_with_precedence[k] = v
return dict_with_precedence
class PostSaveActionsMixin(DirtyFieldsMixin):
"""
If you want to do pre-save signals,
you can do this so you don't have to use DirtyFieldsMixin:
http://stackoverflow.com/a/37265149/333294
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.transaction_dirty_fields = {}
self.new_object = False
# Create a lambda here so that this function is unique to this object,
# as required by on_commit_announce_set.
self.announce_changes_lambda = lambda: self.announce_changes
def _post_creation_saved(self, *args, **kwargs):
"""
Implement this with a method to be run when this model is saved for
the first time - when a new row will be inserted into the database.
"""
raise NotImplementedError(self._post_creation_saved.__doc__) # pragma: no cover
def _post_changes_saved(self, changed_fields, *args, **kwargs):
"""
Implement this with a method to be run when this model is saved
and some attributes have changed.
"""
raise NotImplementedError(self._post_changes_saved.__doc__) # pragma: no cover
def _post_no_changes_saved(self, *args, **kwargs):
"""
Implement this with a method to be run when this model is saved
but NO attributes have been changed.
Most of the time you can just `pass`.
N.B. if foreign keys have data changed, but the FK still points
to the same FK (ie, user #2), this method will be called.
"""
raise NotImplementedError(
self._post_no_changes_saved.__doc__
) # pragma: no cover
def save(self, *args, **kwargs):
self.new_object = (
self.new_object or self.id is None or kwargs.get("force_insert") is True
)
self.transaction_dirty_fields = _add_dict_items_if_not_present(
self.transaction_dirty_fields,
self.get_dirty_fields(check_relationship=True),
)
save_result = super(PostSaveActionsMixin, self).save(*args, **kwargs)
if not self.on_commit_announce_set():
transaction.on_commit(self.announce_changes)
return save_result
def on_commit_announce_set(self) -> bool:
return any(
[
fun[1] == self.announce_changes_lambda
for fun in transaction.get_connection().run_on_commit
]
)
def announce_changes(self):
if self.new_object:
self._post_creation_saved()
elif self.transaction_dirty_fields:
self._post_changes_saved(self.transaction_dirty_fields)
else:
self._post_no_changes_saved()
self.transaction_dirty_fields = {}
self.new_object = False
| {
"content_hash": "086c822a80b39d44d741f141b2e1ce6e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 88,
"avg_line_length": 36.11702127659574,
"alnum_prop": 0.6318114874815906,
"repo_name": "networklocum/django-changes-on-save",
"id": "8a4e34542450fbbfeedd7471549681f1272bbd8b",
"size": "3395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "changesonsave/post_save_actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32901"
}
],
"symlink_target": ""
} |
import logging
import eventlet
from errors import ExpectedException
try:
import json
except ImportError:
import simplejson as json
import datetime
def get_now():
return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
class User(object):
logger = logging.getLogger('HookboxUser')
_options = {
'reflective': True,
'moderated_message': True,
'per_connection_subscriptions': False,
'global_unsubscriptions': False,
'auto_subscribe':[]
}
def __init__(self, server, name, **options):
self.server = server
self.name = name
self.connections = []
self.channels = {}
self._temp_cookie = ""
self.update_options(**self._options)
self.update_options(**options)
self._frame_errors = {}
def serialize(self):
return {
'channels': [ chan_name for chan_name in self.channels ],
'connections': [ conn.id for conn in self.connections ],
'name': self.name,
'options': dict([ (key, getattr(self, key)) for key in self._options])
}
def extract_valid_options(self, options):
return dict([ (key, options.get(key, self._options[key])) for key in self._options ])
def update_options(self, **options):
# TODO: this can't remain so generic forever. At some point we need
# better checks on values, such as the list of dictionaries
# for history, or the polling options.
# TODO: add support for lists (we only have dicts now)
# TODO: Probably should make this whole function recursive... though
# we only really have one level of nesting now.
# TODO: most of this function is duplicated from Channel#update_options
# (including the TODOs above), could be a lot DRYer
for key, val in options.items():
if key not in self._options:
raise ValueError("Invalid keyword argument %s" % (key))
default = self._options[key]
cls = default.__class__
if cls in (unicode, str):
cls = basestring
if not isinstance(val, cls):
raise ValueError("Invalid type for %s (should be %s)" % (key, default.__class__))
if key == 'state':
self.state_replace(val)
continue
if isinstance(val, dict):
for _key, _val in val.items():
if _key not in self._options[key]:
raise ValueError("Invalid keyword argument %s" % (_key))
default = self._options[key][_key]
cls = default.__class__
if isinstance(default, float) and isinstance(_val, int):
_val = float(_val)
if cls in (unicode, str):
cls = basestring
if not isinstance(_val, cls):
raise ValueError("%s is Invalid type for %s (should be %s)" % (_val, _key, default.__class__))
# two loops forces exception *before* any of the options are set.
for key, val in options.items():
# this should create copies of any dicts or lists that are options
if isinstance(val, dict) and hasattr(self, key):
getattr(self, key).update(val)
else:
setattr(self, key, val.__class__(val))
def add_connection(self, conn):
self.connections.append(conn)
conn.user = self
# call later...
eventlet.spawn(self._send_initial_subscriptions, conn)
def _send_initial_subscriptions(self, conn):
for (channel_name, channel_connections) in self.channels.items():
if self.server.exists_channel(channel_name):
frame = self.server.get_channel(self, channel_name)._build_subscribe_frame(self)
conn.send_frame('SUBSCRIBE', frame)
def remove_connection(self, conn):
if conn not in self.connections:
return
self.connections.remove(conn)
# Remove the connection from the channels it was subscribed to,
# unsubscribing the user from any channels which they no longer
# have open connections to
for (channel_name, channel_connections) in self.channels.items():
if conn in self.channels[channel_name]:
if self.global_unsubscriptions:
del self.channels[channel_name][:]
else:
self.channels[channel_name].remove(conn)
if (self.per_connection_subscriptions or self.global_unsubscriptions) and not self.channels[channel_name]:
if self.server.exists_channel(channel_name):
self.server.get_channel(self, channel_name).unsubscribe(self, needs_auth=True, force_auth=True)
if not self.connections:
# so the disconnect/unsubscribe callbacks have a cookie
self._temp_cookie = conn.get_cookie()
for (channel_name, connections) in self.channels.items():
if self.server.exists_channel(channel_name):
self.server.get_channel(self, channel_name).unsubscribe(self, needs_auth=True, force_auth=True)
self.server.remove_user(self.name)
def channel_subscribed(self, channel, conn=None):
if channel.name not in self.channels:
self.channels[channel.name] = [ conn ]
elif conn not in self.channels[channel.name]:
self.channels[channel.name].append(conn)
def channel_unsubscribed(self, channel):
if channel.name in self.channels:
del self.channels[channel.name]
def get_name(self):
return self.name
def send_frame(self, name, args={}, omit=None, channel=None):
if not self.per_connection_subscriptions:
channel = None
if channel and channel.name not in self.channels:
return
for conn in (self.channels[channel.name] if channel else self.connections)[:]:
if conn is not omit:
channel_name = channel.name if hasattr(channel, name) else '<no channel>'
msg = 'user: %s, conn: %s, channel:%s, frame:%s-%s' % (self.name, conn.id, channel_name, name, args)
if conn.send_frame(name, args) is False:
#log error
self.logger.info('send_frame ( error ): %s' % msg)
self.remove_connection(conn)
# ## Adding for debug purposes
# if name in self._frame_errors:
# error_conns = []
# for conn, e in self._frame_errors[name]:
# if e==args:
# error_conns.append(conn)
#
# if error_conns:
# self.logger.warn('Error sending frame %s for user %s, %s to connections %s' % (name, self.name, args, error_conns))
###############################
## Adding for debug purposes
def add_frame_error(self, conn, name, args):
if name in self._frame_errors:
self._frame_errors[name].append((conn.id, args,))
else:
self._frame_errors[name] = [(conn.id, args,)]
###############################
## Adding for debug purposes
if name in self._frame_errors:
error_conns = []
for conn, e in self._frame_errors[name]:
if e==args:
error_conns.append(conn)
if error_conns:
self.logger.warn('Error sending frame %s, %s to connections %s' % (name, args, error_conns))
###############################
## Adding for debug purposes
def add_frame_error(self, conn, name, args):
if name in self._frame_errors:
self._frame_errors[name].append((conn.id, args,))
else:
self._frame_errors[name] = [(conn.id, args,)]
###############################
def get_cookie(self, conn=None):
if conn:
return conn.get_cookie()
return self._temp_cookie or ""
def send_message(self, recipient_name, payload, conn=None, needs_auth=True):
try:
encoded_payload = json.loads(payload)
except:
raise ExpectedException("Invalid json for payload")
payload = encoded_payload
if needs_auth and self.moderated_message:
form = { 'sender': self.get_name(), 'recipient': recipient_name, 'recipient_exists': self.server.exists_user(recipient_name), 'payload': json.dumps(payload) }
success, options = self.server.http_request('message', self.get_cookie(conn), form, conn=conn)
self.server.maybe_auto_subscribe(self, options, conn=conn)
if not success:
raise ExpectedException(options.get('error', 'Unauthorized'))
payload = options.get('override_payload', payload)
recipient_name = options.get('override_recipient_name', recipient_name)
elif not self.server.exists_user(recipient_name):
raise ExpectedException('Invalid user name')
recipient = self.server.get_user(recipient_name) if self.server.exists_user(recipient_name) else None
frame = {"sender": self.get_name(), "recipient": recipient.get_name() if recipient else "null", "payload": payload, "datetime": get_now()}
if recipient:
recipient.send_frame('MESSAGE', frame)
if self.reflective and (not recipient or recipient.name != self.name):
self.send_frame('MESSAGE', frame)
| {
"content_hash": "75780bc7baf55a7b6939f335bfd8cf7b",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 170,
"avg_line_length": 43.49107142857143,
"alnum_prop": 0.5656949291726545,
"repo_name": "hookbox/hookbox",
"id": "94965606e325df4d997419477932df39a175cbd8",
"size": "9742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hookbox/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "254136"
},
{
"name": "PHP",
"bytes": "11067"
},
{
"name": "Python",
"bytes": "126033"
},
{
"name": "Racket",
"bytes": "106"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
import mock
import os
from oslo_config import cfg
from oslo_middleware import request_id
import webob
from senlin.common import context
from senlin.common import exception
from senlin.tests.common import base
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
class TestRequestContext(base.SenlinTestCase):
def setUp(self):
self.ctx = {'username': 'mick',
'auth_token': '123',
'auth_token_info': {'123info': 'woop'},
'domain_id': 'this domain',
'project_domain_id': 'a project domain',
'project_id': 'a project',
'is_admin': False,
'user': 'mick',
'user_domain_id': 'user-domain',
'password': 'foo',
'show_deleted': False,
'roles': ['arole', 'notadmin'],
'tenant_id': '456tenant',
'user_id': 'fooUser',
'tenant': 'atenant',
'auth_url': 'http://xyz',
'trusts': None,
'region_name': 'regionOne'}
super(TestRequestContext, self).setUp()
def test_request_context_init(self):
ctx = context.RequestContext(
username=self.ctx.get('username'),
auth_token=self.ctx.get('auth_token'),
auth_token_info=self.ctx.get('auth_token_info'),
domain_id=self.ctx.get('domain_id'),
project_domain_id=self.ctx.get('project_domain_id'),
project_id=self.ctx.get('project_id'),
is_admin=self.ctx.get('is_admin'),
user=self.ctx.get('user'),
user_domain_id=self.ctx.get('user_domain_id'),
password=self.ctx.get('password'),
show_deleted=self.ctx.get('show_deleted'),
roles=self.ctx.get('roles'),
tenant_id=self.ctx.get('tenant_id'),
user_id=self.ctx.get('user_id'),
tenant=self.ctx.get('tenant'),
auth_url=self.ctx.get('auth_url'),
trusts=self.ctx.get('trusts'),
region_name=self.ctx.get('region_name'))
ctx_dict = ctx.to_dict()
del(ctx_dict['request_id'])
self.assertEqual(self.ctx, ctx_dict)
def test_request_context_from_dict(self):
ctx = context.RequestContext.from_dict(self.ctx)
ctx_dict = ctx.to_dict()
del(ctx_dict['request_id'])
self.assertEqual(self.ctx, ctx_dict)
def test_request_context_update(self):
ctx = context.RequestContext.from_dict(self.ctx)
for k in self.ctx:
self.assertEqual(self.ctx.get(k), ctx.to_dict().get(k))
override = '%s_override' % k
setattr(ctx, k, override)
self.assertEqual(override, ctx.to_dict().get(k))
def test_get_admin_context(self):
ctx = context.get_admin_context()
self.assertTrue(ctx.is_admin)
self.assertFalse(ctx.show_deleted)
def test_get_admin_context_show_deleted(self):
ctx = context.get_admin_context(show_deleted=True)
self.assertTrue(ctx.is_admin)
self.assertTrue(ctx.show_deleted)
def test_admin_context_policy_true(self):
policy_check = 'senlin.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = True
ctx = context.RequestContext(roles=['admin'])
self.assertTrue(ctx.is_admin)
def test_admin_context_policy_false(self):
policy_check = 'senlin.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(roles=['notadmin'])
self.assertFalse(ctx.is_admin)
class RequestContextMiddlewareTest(base.SenlinTestCase):
scenarios = [(
'empty_headers',
dict(
environ=None,
headers={},
expected_exception=None,
context_dict={
'auth_token': None,
'auth_token_info': None,
'auth_url': None,
'is_admin': False,
'password': None,
'roles': [],
'show_deleted': False,
'tenant': None,
'tenant_id': None,
'user': None,
'user_id': None,
'username': None
})
), (
'username_password',
dict(
environ=None,
headers={
'X-Auth-User': 'my_username',
'X-Auth-Key': 'my_password',
'X-Auth-EC2-Creds': '{"ec2Credentials": {}}',
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken',
'X-Tenant-Name': 'my_tenant',
'X-Tenant-Id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3'
},
expected_exception=None,
context_dict={
'auth_token': 'atoken',
'auth_url': 'http://192.0.2.1:5000/v1',
'is_admin': False,
'password': 'my_password',
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant',
'tenant_id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'user': 'my_username',
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': 'my_username'
})
), (
'token_creds',
dict(
environ={'keystone.token_info': {'info': 123}},
headers={
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken2',
'X-Tenant-Name': 'my_tenant2',
'X-Tenant-Id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3',
},
expected_exception=None,
context_dict={
'auth_token': 'atoken2',
'auth_token_info': {'info': 123},
'auth_url': 'http://192.0.2.1:5000/v1',
'is_admin': False,
'password': None,
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant2',
'tenant_id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9',
'user': None,
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': None
})
), (
'malformed_roles',
dict(
environ=None,
headers={
'X-Roles': [],
},
expected_exception=exception.NotAuthenticated)
)]
def setUp(self):
super(RequestContextMiddlewareTest, self).setUp()
opts = [
cfg.StrOpt('config_dir', default=policy_path),
cfg.StrOpt('config_file', default='foo'),
cfg.StrOpt('project', default='senlin'),
]
cfg.CONF.register_opts(opts)
cfg.CONF.set_override('policy_file', 'check_admin.json')
def test_context_middleware(self):
middleware = context.ContextMiddleware(None, None)
request = webob.Request.blank('/clusters', headers=self.headers,
environ=self.environ)
if self.expected_exception:
self.assertRaises(
self.expected_exception, middleware.process_request, request)
else:
self.assertIsNone(middleware.process_request(request))
ctx = request.context.to_dict()
for k, v in self.context_dict.items():
self.assertEqual(v, ctx[k], 'Key %s values do not match' % k)
self.assertIsNotNone(ctx.get('request_id'))
def test_context_middleware_with_requestid(self):
middleware = context.ContextMiddleware(None, None)
request = webob.Request.blank('/clusters', headers=self.headers,
environ=self.environ)
req_id = 'req-5a63f0d7-1b69-447b-b621-4ea87cc7186d'
request.environ[request_id.ENV_REQUEST_ID] = req_id
if self.expected_exception:
self.assertRaises(
self.expected_exception, middleware.process_request, request)
else:
self.assertIsNone(middleware.process_request(request))
ctx = request.context.to_dict()
for k, v in self.context_dict.items():
self.assertEqual(v, ctx[k], 'Key %s values do not match' % k)
self.assertEqual(
ctx.get('request_id'), req_id,
'Key request_id values do not match')
| {
"content_hash": "3d2140e7f4637de751f129812cae9b52",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 77,
"avg_line_length": 38.58620689655172,
"alnum_prop": 0.5179848078641645,
"repo_name": "tengqm/senlin",
"id": "4636a4639220e087cd4448374c1edb17e1e2d73c",
"size": "9501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senlin/tests/test_common_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "628"
},
{
"name": "Python",
"bytes": "1060167"
},
{
"name": "Shell",
"bytes": "12694"
}
],
"symlink_target": ""
} |
__author__ = 'mpetyx'
from collections import OrderedDict
import json
import requests
from Parser import Parser
from pyapi.entities import APIRoot, APIResource, APIMethod, APIQueryParameter
class SwaggerParser(Parser):
api = APIRoot(raml_version=str(0.8))
def parse(self, location):
# self.api.g_version = g.g_version
# g = swaggerpy.load_file('test-data/1.1/simple/resources.json')
if "http://" in location:
response = requests.get(location).json()
# import pprint
# pprint.pprint(response)
data = response
else:
with open(location) as data_file:
data = json.load(data_file)
# self.api.title = data['info']['title']
# self.api.title = g.title
# self.api.version = g.version
# try:
# # data['swaggerVersion'] == "1.1"
# return self.version_11(data=data)
# except:
# return self.version_12(data=data)
return self.version_11(data=data)
def version_12(self, data):
# self.api.title = data['info']['title']
# self.api.title = g.title
# self.api.version = g.version
self.api.baseUri = data['basePath']
# self.api.protocols = data['schemes']
# self.api.mediaType = g.mediaType
# self.api.documentation = g.documentation
# self.api.resourceTypes = g.resourceTypes
resources = OrderedDict()
for path in data['paths']:
resource = APIResource()
resource.displayName = str(path)
resource.description = "example of the api"
# Parse methods
methods = OrderedDict()
for operation in data['paths'][path]:
method = APIMethod(notNull=True)
if "summary" in data['paths'][path][operation]:
method.description = data['paths'][path][operation]['summary']
else:
method.description = data['paths'][path][operation]['description']
if "parameters" in data['paths'][path][operation]:
parameters = OrderedDict()
index = 0
while index < len(data['paths'][path][operation]['parameters']):
param = APIQueryParameter()
param.name = data['paths'][path][operation]['parameters'][index]['name']
index = index + 1
parameters[param.name] = param
method.queryParameters = parameters
methods[str(operation)] = method
if len(methods):
resource.methods = methods
resources[str(path)] = resource
if resources > 0:
self.api.resources = resources
return self.api
def version_11(self, data):
# self.api.title = data['info']['title']
# self.api.title = g.title
# self.api.version = g.version
self.api.baseUri = data['basePath']
# self.api.protocols = data['schemes']
# self.api.mediaType = g.mediaType
# self.api.documentation = g.documentation
# self.api.resourceTypes = g.resourceTypes
resources = OrderedDict()
for api in data['apis']:
path = api['path']
resource = APIResource()
resource.displayName = str(path)
resource.description = str(api['description'])
# Parse methods
methods = OrderedDict()
for operation in api['operations']:
method = APIMethod(notNull=True)
if "summary" in operation:
method.description = operation['summary']
else:
method.description = data['paths'][path][operation]['description']
if "parameters" in operation:
parameters = OrderedDict()
index = 0
while index < len(operation['parameters']):
param = APIQueryParameter()
param.name = operation['parameters'][index]['name']
index = index + 1
parameters[param.name] = param
method.queryParameters = parameters
try:
methods[str(operation['httpMethod'])] = method
except:
methods[str(operation['method'])] = method
if len(methods):
resource.methods = methods
resources[str(path)] = resource
if resources > 0:
self.api.resources = resources
return self.api
| {
"content_hash": "af15254927503aaa413e57646e3a5cf4",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 96,
"avg_line_length": 35.21641791044776,
"alnum_prop": 0.5293494384403475,
"repo_name": "mpetyx/pyapi",
"id": "9350f64f928d854f8bdd3a5ff13ca91bbafaa15e",
"size": "4719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyapi/parsers/SwaggerParser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "443"
},
{
"name": "Python",
"bytes": "147307"
}
],
"symlink_target": ""
} |
"""
Testing for the base mechanics and methods of replicas.
"""
##########################################################################
## Imports
##########################################################################
import unittest
try:
from unittest import mock
except ImportError:
import mock
from cloudscope.config import settings
from cloudscope.replica.base import Replica
from cloudscope.simulation.main import ConsistencySimulation
from cloudscope.replica.base import State, Consistency, Location
##########################################################################
## Base Replica Test Cases
##########################################################################
class BaseReplicaTests(unittest.TestCase):
def setUp(self):
self.sim = ConsistencySimulation()
Replica.counter.reset()
def tearDown(self):
self.sim = None
def test_replica_defaults(self):
"""
Test that a base replica has meaningful defaults
"""
replica = Replica(self.sim)
self.assertIsNotNone(replica.id)
self.assertEqual(replica.type, settings.simulation.default_replica)
self.assertIsNotNone(replica.label)
self.assertEqual(replica.state, State.READY)
self.assertEqual(replica.location, "unknown")
self.assertEqual(
replica.consistency, Consistency.get(settings.simulation.default_consistency)
)
def test_increasing_replica_ids(self):
"""
Test that replicas get an increasing id by default
"""
for idx in xrange(10):
replica = Replica(self.sim)
self.assertEqual(replica.id, "r{}".format(idx+1))
def test_on_state_change_calls(self):
"""
Ensure that on state change event handler is called
"""
replica = Replica(self.sim)
replica.on_state_change = mock.MagicMock()
states = (
State.UNKNOWN, State.LOADING, State.ERRORED,
State.READY, State.FOLLOWER, State.CANDIDATE, State.TAGGING,
State.LEADER, State.OWNER
)
for state in states:
replica.state = state
replica.on_state_change.assert_called_with()
self.assertEqual(replica.on_state_change.call_count, len(states))
def build_neighbors(self):
"""
Helper function to add a bunch of replicas to the simulation for
neighborhood testing that follows below.
"""
# Add a bunch of replicas to the simulation
replicas = [
{"consistency":Consistency.STRONG, "location":Location.ALPHA},
{"consistency":Consistency.EVENTUAL, "location":Location.ALPHA},
{"consistency":Consistency.EVENTUAL, "location":Location.ALPHA},
{"consistency":Consistency.STRONG, "location":Location.BRAVO},
{"consistency":Consistency.EVENTUAL, "location":Location.BRAVO},
{"consistency":Consistency.CAUSAL, "location":Location.BRAVO},
{"consistency":Consistency.STRONG, "location":Location.BRAVO},
{"consistency":Consistency.EVENTUAL, "location":Location.BRAVO},
{"consistency":Consistency.CAUSAL, "location":Location.BRAVO},
{"consistency":Consistency.CAUSAL, "location":Location.CHARLIE},
{"consistency":Consistency.CAUSAL, "location":Location.CHARLIE},
]
# Add replicas to the simulation
for kwargs in replicas:
self.sim.replicas.append(Replica(self.sim, **kwargs))
# Add connections to the simulation (fully connected)
for idx, source in enumerate(self.sim.replicas):
for target in self.sim.replicas[idx+1:]:
self.sim.network.add_connection(source, target, True)
def test_neighbors(self):
"""
Test that the neighbor listing returns all neighbors.
"""
self.build_neighbors()
# Test the neighborhood
for replica in self.sim.replicas:
neighbors = list(replica.neighbors())
self.assertEqual(len(neighbors), len(self.sim.replicas)-1)
def test_neighbor_consistency_filter(self):
"""
Test that the neighbors can be filtered on consistency.
"""
self.build_neighbors()
# Test the neighborhood filtering on a single consistency
for replica in self.sim.replicas:
neighbors = list(replica.neighbors(consistency=Consistency.STRONG))
if replica.consistency == Consistency.STRONG:
self.assertEqual(len(neighbors), 2)
else:
self.assertEqual(len(neighbors), 3)
for neighbor in neighbors:
self.assertEqual(neighbor.consistency, Consistency.STRONG)
# Test the neighborhood filtering on multiple consistencies
for replica in self.sim.replicas:
consistencies = {Consistency.STRONG, Consistency.EVENTUAL}
neighbors = list(replica.neighbors(consistency=consistencies))
if replica.consistency in consistencies:
self.assertEqual(len(neighbors), 6)
else:
self.assertEqual(len(neighbors), 7)
for neighbor in neighbors:
self.assertIn(neighbor.consistency, consistencies)
def test_neighbor_consistency_exclusion_filter(self):
"""
Test that the neighbors can be excluded by consistency.
"""
self.build_neighbors()
# Test the neighborhood filtering on a single consistency
for replica in self.sim.replicas:
neighbors = list(replica.neighbors(consistency=Consistency.STRONG, exclude=True))
if replica.consistency != Consistency.STRONG:
self.assertEqual(len(neighbors), 7)
else:
self.assertEqual(len(neighbors), 8)
for neighbor in neighbors:
self.assertNotEqual(neighbor.consistency, Consistency.STRONG)
# Test the neighborhood filtering on multiple consistencies
for replica in self.sim.replicas:
consistencies = {Consistency.STRONG, Consistency.EVENTUAL}
neighbors = list(replica.neighbors(consistency=consistencies, exclude=True))
if replica.consistency not in consistencies:
self.assertEqual(len(neighbors), 3)
else:
self.assertEqual(len(neighbors), 4)
for neighbor in neighbors:
self.assertNotIn(neighbor.consistency, consistencies)
def test_neighbor_location_filter(self):
"""
Test that the neighbors can be filtered on location.
"""
self.build_neighbors()
# Test the neighborhood filtering on a single location
for replica in self.sim.replicas:
neighbors = list(replica.neighbors(location=Location.ALPHA))
if replica.location == Location.ALPHA:
self.assertEqual(len(neighbors), 2)
else:
self.assertEqual(len(neighbors), 3)
for neighbor in neighbors:
self.assertEqual(neighbor.location, Location.ALPHA)
# Test the neighborhood filtering on multiple locations
for replica in self.sim.replicas:
locations = {Location.ALPHA, Location.CHARLIE}
neighbors = list(replica.neighbors(location=locations))
if replica.location in locations:
self.assertEqual(len(neighbors), 4)
else:
self.assertEqual(len(neighbors), 5)
for neighbor in neighbors:
self.assertIn(neighbor.location, locations)
def test_neighbor_location_exclusion_filter(self):
"""
Test that the neighbors can be excluded by location.
"""
self.build_neighbors()
# Test the neighborhood filtering on a single location
for replica in self.sim.replicas:
neighbors = list(replica.neighbors(location=Location.ALPHA, exclude=True))
if replica.location != Location.ALPHA:
self.assertEqual(len(neighbors), 7)
else:
self.assertEqual(len(neighbors), 8)
for neighbor in neighbors:
self.assertNotEqual(neighbor.location, Location.ALPHA)
# Test the neighborhood filtering on multiple locations
for replica in self.sim.replicas:
locations = {Location.ALPHA, Location.CHARLIE}
neighbors = list(replica.neighbors(location=locations, exclude=True))
if replica.location not in locations:
self.assertEqual(len(neighbors), 5)
else:
self.assertEqual(len(neighbors), 6)
for neighbor in neighbors:
self.assertNotIn(neighbor.location, locations)
def test_neighbor_location_and_consistency_filter(self):
"""
Test that the neighbors can be filtered on both location and consistency.
"""
self.build_neighbors()
# Test the neighborhood filtering on a single location
for replica in self.sim.replicas:
neighbors = list(replica.neighbors(location=Location.BRAVO, consistency=Consistency.STRONG))
if replica.location == Location.BRAVO and replica.consistency == Consistency.STRONG:
self.assertEqual(len(neighbors), 1)
else:
self.assertEqual(len(neighbors), 2)
for neighbor in neighbors:
self.assertEqual(neighbor.location, Location.BRAVO)
self.assertEqual(neighbor.consistency, Consistency.STRONG)
# Test the neighborhood filtering on multiple locations/consistencies
for replica in self.sim.replicas:
locations = {Location.ALPHA, Location.CHARLIE}
consistencies = {Consistency.CAUSAL, Consistency.EVENTUAL}
neighbors = list(replica.neighbors(location=locations, consistency=consistencies))
if replica.location in locations and replica.consistency in consistencies:
self.assertEqual(len(neighbors), 3)
else:
self.assertEqual(len(neighbors), 4)
for neighbor in neighbors:
self.assertIn(neighbor.location, locations)
self.assertIn(neighbor.consistency, consistencies)
def test_neighbor_location_and_consistency_exclusion_filter(self):
"""
Test that the neighbors can be excluded by both location and consistency.
"""
self.build_neighbors()
# Test the neighborhood filtering on a single location
for replica in self.sim.replicas:
neighbors = list(replica.neighbors(location=Location.BRAVO, consistency=Consistency.STRONG, exclude=True))
if replica.location != Location.BRAVO and replica.consistency != Consistency.STRONG:
self.assertEqual(len(neighbors), 3)
else:
self.assertEqual(len(neighbors), 4)
for neighbor in neighbors:
self.assertNotEqual(neighbor.location, Location.BRAVO)
self.assertNotEqual(neighbor.consistency, Consistency.STRONG)
# Test the neighborhood filtering on multiple locations/consistencies
for replica in self.sim.replicas:
locations = {Location.ALPHA, Location.CHARLIE}
consistencies = {Consistency.CAUSAL, Consistency.EVENTUAL}
neighbors = list(replica.neighbors(location=locations, consistency=consistencies, exclude=True))
if replica.location not in locations and replica.consistency not in consistencies:
self.assertEqual(len(neighbors), 1)
else:
self.assertEqual(len(neighbors), 2)
for neighbor in neighbors:
self.assertNotIn(neighbor.location, locations)
self.assertNotIn(neighbor.consistency, consistencies)
| {
"content_hash": "a940db666bb602bdd3e6aae9b0b6e512",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 118,
"avg_line_length": 38.12380952380953,
"alnum_prop": 0.6166208676825714,
"repo_name": "bbengfort/cloudscope",
"id": "467691b5a66b0d7a75209106f7330c901bf1f75d",
"size": "12333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_replica/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2096"
},
{
"name": "HTML",
"bytes": "14259"
},
{
"name": "JavaScript",
"bytes": "30593"
},
{
"name": "Jupyter Notebook",
"bytes": "22404160"
},
{
"name": "Makefile",
"bytes": "832"
},
{
"name": "Python",
"bytes": "757413"
}
],
"symlink_target": ""
} |
import platform
import mock
from os_brick.initiator import connector
from nova.objects import fields as obj_fields
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import fibrechannel
class LibvirtFibreChannelVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_fibrechan_driver(self):
for multipath in (True, False):
self.flags(volume_use_multipath=multipath, group='libvirt')
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_host)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnector)
if hasattr(libvirt_driver.connector, 'use_multipath'):
self.assertEqual(
multipath, libvirt_driver.connector.use_multipath)
def _test_libvirt_fibrechan_driver_s390(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_host)
self.assertIsInstance(libvirt_driver.connector,
connector.FibreChannelConnectorS390X)
@mock.patch.object(platform, 'machine',
return_value=obj_fields.Architecture.S390)
def test_libvirt_fibrechan_driver_s390(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
@mock.patch.object(platform, 'machine',
return_value=obj_fields.Architecture.S390X)
def test_libvirt_fibrechan_driver_s390x(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
def test_libvirt_fibrechan_driver_get_config(self):
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_host)
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual('block', tree.get('type'))
self.assertEqual(device_path, tree.find('./source').get('dev'))
self.assertEqual('raw', tree.find('./driver').get('type'))
self.assertEqual('native', tree.find('./driver').get('io'))
def test_extend_volume(self):
device_path = '/dev/fake-dev'
connection_info = {'data': {'device_path': device_path}}
libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
self.fake_host)
libvirt_driver.connector.extend_volume = mock.MagicMock(return_value=1)
new_size = libvirt_driver.extend_volume(connection_info,
mock.sentinel.instance)
self.assertEqual(1, new_size)
libvirt_driver.connector.extend_volume.assert_called_once_with(
connection_info['data'])
| {
"content_hash": "ad099a2c000d9540ee80c41ed8b8b695",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 79,
"avg_line_length": 44.161764705882355,
"alnum_prop": 0.6177156177156177,
"repo_name": "phenoxim/nova",
"id": "4289ac5aefb6b340c06408e040b25c1f39197e47",
"size": "3576",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16289098"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "282020"
}
],
"symlink_target": ""
} |
"""Setup script for Concurrent_AP, a scalable and concurrent programming
implementation of Affinity Propagation clustering.
Affinity Propagation is a clustering algorithm based on passing
messages between data-points.
Storing and updating matrices of 'affinities', 'responsibilities'
and 'similarities' between samples can be memory-intensive.
We address this issue through the use of an HDF5 data structure,
allowing Affinity Propagation clustering of arbitrarily large data-sets,
where other Python implementations would return a MemoryError
on most machines.
We also significantly speed up the computations by splitting them up
across subprocesses, thereby taking full advantage of the resources
of multi-core processors and bypassing the Global Interpreter Lock
of the standard Python interpreter, CPython.
Reference
---------
Brendan J. Frey and Delbert Dueck., "Clustering by Passing Messages Between Data Points".
In: Science, Vol. 315, no. 5814, pp. 972-976. 2007
"""
from codecs import open
from os import path
from sys import version
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding = 'utf-8') as f:
long_description = f.read()
setup(name = 'Concurrent_AP',
version = '1.4',
description = 'Scalable and parallel programming implementation of Affinity Propagation clustering',
long_description = long_description,
url = 'https://github.com/GGiecold/Concurrent_AP',
download_url = 'https://github.com/GGiecold/Concurrent_AP',
author = 'Gregory Giecold',
author_email = '[email protected]',
maintainer = 'Gregory Giecold',
maintainer_email = '[email protected]',
license = 'MIT License',
packages = find_packages(),
py_modules = ['Concurrent_AP'],
platforms = ('Any',),
install_requires = ['numpy>=1.9.0', 'psutil', 'sklearn', 'setuptools', 'tables'],
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Mathematics', ],
keywords = 'parallel multiprocessing machine-learning concurrent clustering',
entry_points = {
'console_scripts': ['Concurrent_AP = Concurrent_AP:main'],
}
)
| {
"content_hash": "e07b6f1038609f1bb89e2e4c347f2213",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 106,
"avg_line_length": 38.025,
"alnum_prop": 0.6314924391847468,
"repo_name": "GGiecold/Concurrent_AP",
"id": "fee98fe2c98d51dd7587aba890b57e490f29ee99",
"size": "3234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45244"
}
],
"symlink_target": ""
} |
import datetime
from django.conf import settings
from rest_framework.settings import APISettings
USER_SETTINGS = getattr(settings, 'JWT_AUTH', None)
DEFAULTS = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_PAYLOAD_GET_USERNAME_HANDLER':
'rest_framework_jwt.utils.jwt_get_username_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': settings.SECRET_KEY,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=300),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
'JWT_AUTH_QUERY_STRING_PREFIX': 'JWT',
}
# List of settings that may be in string import notation.
IMPORT_STRINGS = (
'JWT_ENCODE_HANDLER',
'JWT_DECODE_HANDLER',
'JWT_PAYLOAD_HANDLER',
'JWT_PAYLOAD_GET_USER_ID_HANDLER',
'JWT_PAYLOAD_GET_USERNAME_HANDLER',
'JWT_RESPONSE_PAYLOAD_HANDLER',
)
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
| {
"content_hash": "391f42561c6d2350f6cb2b54b2d7acfd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 69,
"avg_line_length": 27.90740740740741,
"alnum_prop": 0.6914399469143995,
"repo_name": "sandipbgt/django-rest-framework-jwt",
"id": "65ca6a80e1c65c61f2fdf4ab75faa06c912e9462",
"size": "1507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_jwt/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57812"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from django.db import migrations
from django.contrib.postgres.operations import TrigramExtension, UnaccentExtension
from django.conf import settings
class Migration(migrations.Migration):
# Install the postgres extensions in dev only
if settings.DEBUG or getattr(settings, 'CREATE_DB_EXTENSIONS', False):
dependencies = [
('organizations', '0002_auto_20160610_1554'),
]
operations = [
UnaccentExtension(),
TrigramExtension()
]
else:
print('Skipping postgresql \'unaccent\' and \'trigram\' extension installation')
| {
"content_hash": "1b9a483f66ed5d680fb69d507051b6d0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 88,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.685672514619883,
"repo_name": "onepercentclub/bluebottle",
"id": "6a2d748547fb4c5c0dcdc744df63fc45d8488ef9",
"size": "757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/organizations/migrations/0003_auto_20170314_0900.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
import os
import json
class CredentialStore(object):
"""
Load Credentials from local store
"""
creds = None
def __init__(self):
super(CredentialStore, self).__init__()
if CredentialStore.creds is None:
module_dir = os.path.dirname(__file__)
file_path = os.path.join(module_dir, 'credentials.json')
print("Looking for file ", file_path)
try:
with open(file_path) as f:
CredentialStore.creds = json.loads(f.read())
except FileNotFoundError:
print("Credential File was not found")
def getCreds(self, service):
if CredentialStore.creds:
return CredentialStore.creds.get(service, None)
else:
return None
| {
"content_hash": "d02bd98799fc87b0e0cecd91d02b8dae",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 27,
"alnum_prop": 0.6186556927297668,
"repo_name": "watson-developer-cloud/python-primer-companion-code",
"id": "4c8931efcca6a0b67ccd57a6875aadb7c8144dd5",
"size": "1323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "episode-6/flask/src/watsonutils/creds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "17961"
},
{
"name": "JavaScript",
"bytes": "12882"
},
{
"name": "Python",
"bytes": "169307"
},
{
"name": "Shell",
"bytes": "6043"
}
],
"symlink_target": ""
} |
from ... colors import gamma
from .. channel_order import ChannelOrder
from . base import SPIBase
class APA102(SPIBase):
"""Driver for APA102/SK9822 based LED strips on devices like
the Raspberry Pi and BeagleBone
Provides the same parameters as
:py:class:`bibliopixel.drivers.SPI.SPIBase`
"""
def __init__(self, num, gamma=gamma.APA102, **kwargs):
super().__init__(num, gamma=gamma, **kwargs)
# APA102/SK9822 requires latch bytes at the end)
# Many thanks to this article for combined APA102/SK9822 protocol
# https://cpldcpu.com/2016/12/13/sk9822-a-clone-of-the-apa102/
self._start_frame = 4 # start frame is [0, 0, 0, 0]
self._pixel_bytes = self.numLEDs * 4 # 4 byte frames [bright, r, g, b]
self._pixel_stop = self._start_frame + self._pixel_bytes
self._reset_frame = 4 # for SK9822 [0, 0, 0, 0]
self._end_frame = (num // 2) + 1
self._packet = self.maker.bytes(self._start_frame + self._pixel_bytes +
self._reset_frame + self._end_frame)
self.set_device_brightness(0xFF) # required to setup _packet
def set_device_brightness(self, val):
"""
APA102 & SK9822 support on-chip brightness control, allowing greater
color depth.
APA102 superimposes a 440Hz PWM on the 19kHz base PWM to control
brightness. SK9822 uses a base 4.7kHz PWM but controls brightness with a
variable current source.
Because of this SK9822 will have much less flicker at lower levels.
Either way, this option is better and faster than scaling in
BiblioPixel.
"""
# bitshift to scale from 8 bit to 5
self._chipset_brightness = (val >> 3)
self._brightness_list = [0xE0 + self._chipset_brightness] * self.numLEDs
self._packet[self._start_frame:self._pixel_stop:4] = (
self._brightness_list)
def _compute_packet(self):
self._render()
self._packet[self._start_frame + 1:self._pixel_stop:4] = self._buf[0::3]
self._packet[self._start_frame + 2:self._pixel_stop:4] = self._buf[1::3]
self._packet[self._start_frame + 3:self._pixel_stop:4] = self._buf[2::3]
| {
"content_hash": "20b5fcad0f1233d7178f7341c0a750e7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 80,
"avg_line_length": 42.490566037735846,
"alnum_prop": 0.622113676731794,
"repo_name": "rec/BiblioPixel",
"id": "46e311d92999d21c1733a5ef10e4db77cf646c71",
"size": "2252",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "bibliopixel/drivers/SPI/APA102.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20651"
},
{
"name": "HTML",
"bytes": "3310"
},
{
"name": "JavaScript",
"bytes": "5140"
},
{
"name": "Python",
"bytes": "673520"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
} |
'''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
| {
"content_hash": "ccc81786388c77ac2b56c714f9af56fc",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 117,
"avg_line_length": 28.21311475409836,
"alnum_prop": 0.6693782684485764,
"repo_name": "jlas/ml.q",
"id": "cb6f44a5d2fe2d527d241196a158c299f2bc82a6",
"size": "1721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbscan/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1721"
},
{
"name": "q",
"bytes": "25386"
}
],
"symlink_target": ""
} |
""" Module for motif activity prediction """
from __future__ import print_function
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import os
import sys
import shutil
from functools import partial
try:
from itertools import izip
except:
izip = zip
import itertools
import logging
from multiprocessing import Pool
import pandas as pd
import numpy as np
from scipy.stats import ks_2samp, hypergeom,mannwhitneyu
from scipy.cluster.hierarchy import linkage, fcluster
from statsmodels.sandbox.stats.multicomp import multipletests
from tqdm import tqdm
# scikit-learn
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.ensemble import BaggingClassifier,RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import Ridge,MultiTaskLasso,BayesianRidge
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import scale, LabelEncoder
from lightning.classification import CDClassifier
from lightning.regression import CDRegressor
import xgboost
from gimmemotifs import __version__
from gimmemotifs.motif import read_motifs
from gimmemotifs.scanner import Scanner
from gimmemotifs.config import MotifConfig
from gimmemotifs.utils import pwmfile_location
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
logger = logging.getLogger("gimme.maelstrom")
class Moap(object):
"""Moap base class.
Motif activity prediction.
"""
_predictors = {}
name = None
@classmethod
def create(cls, name, ncpus=None):
"""Create a Moap instance based on the predictor name.
Parameters
----------
name : str
Name of the predictor (eg. Xgboost, BayesianRidge, ...)
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Returns
-------
moap : Moap instance
moap instance.
"""
try:
return cls._predictors[name.lower()](ncpus=ncpus)
except KeyError:
raise Exception("Unknown class")
@classmethod
def register_predictor(cls, name):
"""Register method to keep list of predictors."""
def decorator(subclass):
"""Register as decorator function."""
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator
@classmethod
def list_predictors(self):
"""List available predictors."""
return list(self._predictors.keys())
@classmethod
def list_classification_predictors(self):
"""List available classification predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "classification"]
@classmethod
def list_regression_predictors(self):
"""List available regression predictors."""
preds = [self.create(x) for x in self._predictors.keys()]
return [x.name for x in preds if x.ptype == "regression"]
register_predictor = Moap.register_predictor
def br_fit(X, y):
model = BayesianRidge()
model.fit(X, y)
return model.coef_
def br_fit_star(args):
return br_fit(*args)
@register_predictor('BayesianRidge')
class BayesianRidgeMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using Bayesian Ridge Regression.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Coefficients of the regression model.
"""
self.act_description = ("activity values: coefficients of the"
"regression model")
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting BayesianRidge")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
logger.debug("Scaling motif scores")
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
#logger.debug("Scaling y")
# Normalize across samples and features
#y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
logger.debug("Fitting model")
pool = Pool(self.ncpus)
coefs = [x for x in tqdm(pool.imap(br_fit_star, izip(itertools.repeat(X), [y[col] for col in y.columns])), total=len(y.columns))]
logger.info("Done")
self.act_ = pd.DataFrame(coefs, columns=X.columns, index=y.columns).T
@register_predictor('Xgboost')
class XgboostRegressionMoap(Moap):
def __init__(self, scale=True, ncpus=None):
"""Predict motif activities using XGBoost.
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
Feature scores.
"""
self.act_description = ("activity values: feature scores from"
"fitted model")
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y):
logger.info("Fitting XGBoostRegression")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
#y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
# Define model
xgb = xgboost.XGBRegressor(
n_estimators=100,
learning_rate=0.1,
nthread=self.ncpus,
min_child_weight=2,
max_depth=3,
subsample=0.75,
colsample_bytree=0.75,
objective='reg:linear')
logger.debug("xgb: 0%")
self.act_ = pd.DataFrame(index=X.columns)
# Fit model
for i,col in enumerate(tqdm(y.columns)):
xgb.fit(X, y[col].values)
d = xgb.get_booster().get_fscore()
self.act_[col] = [d.get(m, 0) for m in X.columns]
for motif in self.act_.index:
if self.act_.loc[motif, col] != 0:
high = df_y.loc[df_X[motif] >= df_X[motif].quantile(0.75), col].mean()
low = df_y.loc[df_X[motif] <= df_X[motif].quantile(0.25), col].mean()
if low > high:
self.act_.loc[motif, col] *= -1
logger.debug("..{}%".format(int(float(i + 1)/ len(y.columns) * 100)))
logger.info("Done")
@register_predictor('LightningRegressor')
class LightningRegressionMoap(Moap):
def __init__(self, scale=True, cv=3, ncpus=None):
"""Predict motif activities using lightning CDRegressor
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
cv : int, optional, default 3
Cross-validation k-fold parameter.
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted coefficients
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.act_description = ("activity values: coefficients from "
"fitted model")
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.kfolds = cv
self.scale = scale
self.act_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y, batch_size=50, shuffle=True, tmpdir=None):
logger.info("Fitting LightningRegression")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
# Normalize across samples and features
#y = df_y.apply(scale, 1).apply(scale, 0)
y = df_y
X = df_X.loc[y.index]
if not y.shape[0] == X.shape[0]:
raise ValueError("number of regions is not equal")
# Define model
cd = CDRegressor(penalty="l1/l2", C=1.0/X.shape[0])
parameters = {
"alpha": [np.exp(-x) for x in np.arange(0, 8, 1/2.5)],
}
clf = GridSearchCV(cd, parameters, n_jobs=self.ncpus)
nsplits = int(y.shape[1] / batch_size)
if shuffle:
idx = list(y.sample(y.shape[1], axis=1, random_state=42).columns)
else:
idx = list(y.columns)
if tmpdir:
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
coefs = pd.DataFrame(index=X.columns)
start_i = 0
if tmpdir:
for i in range(0, len(idx), batch_size):
fname = os.path.join(tmpdir, "{}.feather".format(i))
if os.path.exists(fname) and os.path.exists(fname + ".done"):
tmp = pd.read_feather(fname)
tmp = tmp.set_index(tmp.columns[0])
coefs = coefs.join(tmp)
else:
logger.info("Resuming at batch {}".format(i))
start_i = i
break
for i in tqdm(range(start_i, len(idx), batch_size)):
split_y = y[idx[i:i+batch_size]]
# Fit model
clf.fit(X.values, split_y.values)
tmp = pd.DataFrame(clf.best_estimator_.coef_.T,
index=X.columns, columns = split_y.columns)
if tmpdir:
fname = os.path.join(tmpdir, "{}.feather".format(i))
tmp.reset_index().rename(columns=str).to_feather(fname)
# Make sure we don't read corrupted files
open(fname + ".done", "a").close()
# Get coefficients
coefs = coefs.join(tmp)
# Get coefficients
self.act_ = coefs[y.columns]
logger.info("Done")
@register_predictor('LightningClassification')
class LightningClassificationMoap(Moap):
def __init__(self, scale=True, permute=False, ncpus=None):
"""Predict motif activities using lightning CDClassifier
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted coefficients
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.act_description = ("activity values: coefficients from "
"fitted model")
#self.cdc = CDClassifier(random_state=args.seed)
self.cdc = CDClassifier()
self.parameters = {
"penalty": ["l1/l2"],
"loss": ["squared_hinge"],
"multiclass":[True],
"max_iter":[20],
"alpha": [np.exp(-x) for x in np.arange(0, 10, 1/3.0)],
"C":[0.001, 0.01, 0.1, 0.5, 1.0],
"tol":[1e-3]
}
self.kfolds = 10
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.clf = GridSearchCV(self.cdc, self.parameters,
cv=self.kfolds, n_jobs=ncpus)
self.scale = scale
self.permute = permute
self.act_ = None
self.sig_ = None
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting LightningClassification")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
idx = list(range(df_y.shape[0]))
y = df_y.iloc[idx]
X = df_X.loc[y.index].values
y = y.values.flatten()
# Convert (putative) string labels
l = LabelEncoder()
y = l.fit_transform(y)
# Split data
X_train,X_test,y_train,y_test = train_test_split(X,y)
logger.debug("Setting parameters through cross-validation")
# Determine best parameters based on CV
self.clf.fit(X_train,y_train)
logger.debug("Average score ({} fold CV): {}".format(
self.kfolds,
self.clf.score(X_test, y_test)
))
logger.debug("Estimate coefficients using bootstrapping")
# Estimate coefficients using bootstrappig
#b = BaggingClassifier(self.clf.best_estimator_,
# max_samples=0.75, n_jobs=-1, random_state=state)
b = BaggingClassifier(self.clf.best_estimator_,
max_samples=0.75, n_jobs=-1)
b.fit(X,y)
# Get mean coefficients
coeffs = np.array([e.coef_ for e in b.estimators_]).mean(axis=0)
# Create dataframe of predicted coefficients
if len(l.classes_) == 2:
self.act_ = pd.DataFrame(np.hstack((-coeffs.T, coeffs.T)))
else:
self.act_ = pd.DataFrame(coeffs.T)
# Convert labels back to original names
self.act_.columns = l.inverse_transform(range(len(l.classes_)))
self.act_.index = df_X.columns
if self.permute:
# Permutations
logger.debug("Permutations")
random_dfs = []
for _ in range(10):
y_random = np.random.permutation(y)
b.fit(X,y_random)
coeffs = np.array([e.coef_ for e in b.estimators_]).mean(axis=0)
if len(l.classes_) == 2:
random_dfs.append(pd.DataFrame(np.hstack((-coeffs.T, coeffs.T))))
else:
random_dfs.append(pd.DataFrame(coeffs.T))
random_df = pd.concat(random_dfs)
# Select cutoff based on percentile
high_cutoffs = random_df.quantile(0.99)
low_cutoffs = random_df.quantile(0.01)
# Set significance
self.sig_ = pd.DataFrame(index=df_X.columns)
self.sig_["sig"] = False
for col,c_high,c_low in zip(
self.act_.columns, high_cutoffs, low_cutoffs):
self.sig_["sig"].loc[self.act_[col] >= c_high] = True
self.sig_["sig"].loc[self.act_[col] <= c_low] = True
logger.info("Done")
@register_predictor('MWU')
class MWUMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using Mann-Whitney U p-value
This method compares the motif score distribution of each
cluster versus the motif score distribution of all other
clusters.
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the Mann-Whitney U p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = ("activity values: BH-corrected "
"-log10 Mann-Whitney U p-value")
self.pref_table = "score"
self.supported_tables = ["score"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting MWU")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
# calculate Mann-Whitney U p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
for cluster in clusters:
pos = df_X[df_y.iloc[:,0] == cluster]
neg = df_X[df_y.iloc[:,0] != cluster]
p = []
for m in pos:
try:
p.append(mannwhitneyu(pos[m], neg[m], alternative="greater")[1])
except Exception as e:
sys.stderr.write(str(e) + "\n")
sys.stderr.write("motif {} failed, setting to p = 1\n".format(m))
p.append(1)
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(),
method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T),
columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor('Hypergeom')
class HypergeomMoap(Moap):
def __init__(self, *args, **kwargs):
"""Predict motif activities using hypergeometric p-value
Parameters
----------
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
-log10 of the hypergeometric p-value, corrected for multiple
testing using the Benjamini-Hochberg correction
"""
self.act_ = None
self.act_description = ("activity values: BH-corrected "
"hypergeometric p-values")
self.pref_table = "count"
self.supported_tables = ["count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting Hypergeom")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
if set(df_X.dtypes) != set([np.dtype(int)]):
raise ValueError("need motif counts, not scores")
# calculate hypergeometric p-values
pvals = []
clusters = df_y[df_y.columns[0]].unique()
M = df_X.shape[0]
for cluster in clusters:
pos = df_X[df_y.iloc[:,0] == cluster]
neg = df_X[df_y.iloc[:,0] != cluster]
pos_true = (pos > 0).sum(0)
pos_false = (pos == 0).sum(0)
neg_true = (neg > 0).sum(0)
p = []
for pt, pf, nt in zip(pos_true, pos_false, neg_true):
n = pt + nt
N = pt + pf
x = pt - 1
p.append(hypergeom.sf(x, M, n, N))
pvals.append(p)
# correct for multipe testing
pvals = np.array(pvals)
fpr = multipletests(pvals.flatten(),
method="fdr_bh")[1].reshape(pvals.shape)
# create output DataFrame
self.act_ = pd.DataFrame(-np.log10(fpr.T),
columns=clusters, index=df_X.columns)
logger.info("Done")
@register_predictor('RF')
class RFMoap(Moap):
def __init__(self, ncpus=None):
"""Predict motif activities using a random forest classifier
Parameters
----------
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
feature importances from the model
"""
self.act_ = None
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
self.act_description = ("activity values: feature importances "
"from fitted Random Forest model")
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "classification"
def fit(self, df_X, df_y):
logger.info("Fitting RF")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if df_y.shape[1] != 1:
raise ValueError("y needs to have 1 label column")
le = LabelEncoder()
y = le.fit_transform(df_y.iloc[:,0].values)
clf = RandomForestClassifier(n_estimators=100, n_jobs=self.ncpus)
# Multiclass
if len(le.classes_) > 2:
orc = OneVsRestClassifier(clf)
orc.fit(df_X.values, y)
importances = np.array([c.feature_importances_ for c in orc.estimators_]).T
else: # Only two classes
clf.fit(df_X.values, y)
importances = np.array([
clf.feature_importances_,
clf.feature_importances_
]).T
for i,c in enumerate(le.classes_):
diff = df_X.loc[y == i].quantile(q=0.75) - df_X.loc[y != i].quantile(q=0.75)
sign = (diff >= 0) * 2 - 1
importances[:,i] *= sign
# create output DataFrame
self.act_ = pd.DataFrame(importances,
columns=le.inverse_transform(range(len(le.classes_))),
index=df_X.columns)
logger.info("Done")
@register_predictor('Lasso')
class LassoMoap(Moap):
def __init__(self, scale=True, kfolds=4, alpha_stepsize=1.0, ncpus=None):
"""Predict motif activities using Lasso MultiTask regression
Parameters
----------
scale : boolean, optional, default True
If ``True``, the motif scores will be scaled
before classification
kfolds : integer, optional, default 5
number of kfolds for parameter search
alpha_stepsize : float, optional, default 1.0
stepsize for use in alpha gridsearch
ncpus : int, optional
Number of threads. Default is the number specified in the config.
Attributes
----------
act_ : DataFrame, shape (n_motifs, n_clusters)
fitted motif activities
sig_ : DataFrame, shape (n_motifs,)
boolean values, if coefficients are higher/lower than
the 1%t from random permutation
"""
self.kfolds = kfolds
self.act_description = ("activity values: coefficients from "
"fitted model")
self.scale = scale
if ncpus is None:
ncpus = int(MotifConfig().get_default_params().get("ncpus", 2))
self.ncpus = ncpus
# initialize attributes
self.act_ = None
self.sig_ = None
mtk = MultiTaskLasso()
parameters = {
"alpha": [np.exp(-x) for x in np.arange(0, 10, alpha_stepsize)],
}
self.clf = GridSearchCV(mtk, parameters, cv=kfolds, n_jobs=self.ncpus)
self.pref_table = "score"
self.supported_tables = ["score", "count"]
self.ptype = "regression"
def fit(self, df_X, df_y, permute=False):
logger.info("Fitting Lasso")
if not df_y.shape[0] == df_X.shape[0]:
raise ValueError("number of regions is not equal")
if self.scale:
# Scale motif scores
df_X[:] = scale(df_X, axis=0)
idx = list(range(df_y.shape[0]))
y = df_y.iloc[idx]
X = df_X.loc[y.index].values
y = y.values
# fit coefficients
coefs = self._get_coefs(X, y)
self.act_ = pd.DataFrame(coefs.T)
# convert labels back to original names
self.act_.columns = df_y.columns
self.act_.index = df_X.columns
if permute:
# Permutations
logger.info("permutations\n")
random_dfs = []
for _ in range(10):
y_random = y[np.random.permutation(range(y.shape[0]))]
coefs = self._get_coefs(X, y_random)
random_dfs.append(pd.DataFrame(coefs.T))
random_df = pd.concat(random_dfs)
# Select cutoff based on percentile
high_cutoffs = random_df.quantile(0.99)
low_cutoffs = random_df.quantile(0.01)
# Set significance
self.sig_ = pd.DataFrame(index=df_X.columns)
self.sig_["sig"] = False
for col,c_high,c_low in zip(
self.act_.columns, high_cutoffs, low_cutoffs):
self.sig_["sig"].loc[self.act_[col] >= c_high] = True
self.sig_["sig"].loc[self.act_[col] <= c_low] = True
logger.info("Done")
def _get_coefs(self, X, y):
logger.info("set alpha through cross-validation\n")
# Determine best parameters based on CV
self.clf.fit(X, y)
logger.debug("average score ({} fold CV): {}".format(
self.kfolds,
self.clf.best_score_
))
logger.info("Estimate coefficients using bootstrapping\n")
n_samples = 0.75 * X.shape[0]
max_samples = X.shape[0]
m = self.clf.best_estimator_
coefs = []
for _ in range(10):
idx = np.random.randint(0, n_samples, max_samples)
m.fit(X[idx], y[idx])
coefs.append(m.coef_)
coefs = np.array(coefs).mean(axis=0)
return coefs
def moap(inputfile, method="hypergeom", scoring=None, outfile=None, motiffile=None, pwmfile=None, genome=None, fpr=0.01, ncpus=None,
subsample=None):
"""Run a single motif activity prediction algorithm.
Parameters
----------
inputfile : str
:1File with regions (chr:start-end) in first column and either cluster
name in second column or a table with values.
method : str, optional
Motif activity method to use. Any of 'hypergeom', 'lasso',
'lightningclassification', 'lightningregressor', 'bayesianridge',
'rf', 'xgboost'. Default is 'hypergeom'.
scoring: str, optional
Either 'score' or 'count'
outfile : str, optional
Name of outputfile to save the fitted activity values.
motiffile : str, optional
Table with motif scan results. First column should be exactly the same
regions as in the inputfile.
pwmfile : str, optional
File with motifs in pwm format. Required when motiffile is not
supplied.
genome : str, optional
Genome name, as indexed by gimme. Required when motiffile is not
supplied
fpr : float, optional
FPR for motif scanning
ncpus : int, optional
Number of threads to use. Default is the number specified in the config.
Returns
-------
pandas DataFrame with motif activity
"""
if scoring and scoring not in ['score', 'count']:
raise ValueError("valid values are 'score' and 'count'")
config = MotifConfig()
if inputfile.endswith("feather"):
df = pd.read_feather(inputfile)
df = df.set_index(df.columns[0])
else:
# read data
df = pd.read_table(inputfile, index_col=0, comment="#")
clf = Moap.create(method, ncpus=ncpus)
if clf.ptype == "classification":
if df.shape[1] != 1:
raise ValueError("1 column expected for {}".format(method))
else:
if np.dtype('object') in set(df.dtypes):
raise ValueError(
"columns should all be numeric for {}".format(method))
if motiffile is None:
if genome is None:
raise ValueError("need a genome")
pwmfile = pwmfile_location(pwmfile)
try:
motifs = read_motifs(pwmfile)
except:
sys.stderr.write("can't read motifs from {}".format(pwmfile))
raise
# initialize scanner
s = Scanner(ncpus=ncpus)
sys.stderr.write(pwmfile + "\n")
s.set_motifs(pwmfile)
s.set_genome(genome)
s.set_background(genome=genome)
# scan for motifs
sys.stderr.write("scanning for motifs\n")
motif_names = [m.id for m in read_motifs(pwmfile)]
scores = []
if method == 'classic' or scoring == "count":
s.set_threshold(fpr=fpr)
for row in s.count(list(df.index)):
scores.append(row)
else:
for row in s.best_score(list(df.index), normalize=True):
scores.append(row)
motifs = pd.DataFrame(scores, index=df.index, columns=motif_names)
else:
motifs = pd.read_table(motiffile, index_col=0, comment="#")
if outfile and os.path.exists(outfile):
out = pd.read_table(outfile, index_col=0, comment="#")
ncols = df.shape[1]
if ncols == 1:
ncols = len(df.iloc[:,0].unique())
if out.shape[0] == motifs.shape[1] and out.shape[1] == ncols:
logger.warn("%s output already exists... skipping", method)
return out
if subsample is not None:
n = int(subsample * df.shape[0])
logger.debug("Subsampling %d regions", n)
df = df.sample(n)
motifs = motifs.loc[df.index]
if method == "lightningregressor":
outdir = os.path.dirname(outfile)
tmpname = os.path.join(outdir, ".lightning.tmp")
clf.fit(motifs, df, tmpdir=tmpname)
shutil.rmtree(tmpname)
else:
clf.fit(motifs, df)
if outfile:
with open(outfile, "w") as f:
f.write("# maelstrom - GimmeMotifs version {}\n".format(__version__))
f.write("# method: {} with motif {}\n".format(method, scoring))
if genome:
f.write("# genome: {}\n".format(genome))
if motiffile:
f.write("# motif table: {}\n".format(motiffile))
f.write("# {}\n".format(clf.act_description))
with open(outfile, "a") as f:
clf.act_.to_csv(f, sep="\t")
return clf.act_
| {
"content_hash": "58ec10b65490e38a853623b8b0c31e10",
"timestamp": "",
"source": "github",
"line_count": 950,
"max_line_length": 137,
"avg_line_length": 33.76421052631579,
"alnum_prop": 0.542118718044644,
"repo_name": "simonvh/gimmemotifs",
"id": "0dd892455f97153f28a19ff94a2a702a521d24a2",
"size": "32319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gimmemotifs/moap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1111371"
},
{
"name": "C",
"bytes": "10269329"
},
{
"name": "C++",
"bytes": "81751"
},
{
"name": "CSS",
"bytes": "22883"
},
{
"name": "HTML",
"bytes": "1499341"
},
{
"name": "JavaScript",
"bytes": "173148"
},
{
"name": "M4",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "1192597"
},
{
"name": "Objective-C",
"bytes": "27794"
},
{
"name": "Perl",
"bytes": "455806"
},
{
"name": "PostScript",
"bytes": "116832"
},
{
"name": "Prolog",
"bytes": "16013"
},
{
"name": "Python",
"bytes": "495382"
},
{
"name": "Shell",
"bytes": "470821"
},
{
"name": "XSLT",
"bytes": "437878"
}
],
"symlink_target": ""
} |
from zope.interface import implements
from norm.interface import IAsyncCursor
from norm.orm.base import (classInfo, objectInfo, Converter, BaseOperator)
from norm.orm.props import String, Unicode
from norm.orm.expr import compiler, Compiler
def translateSQL(sql):
# this is naive
return sql.replace('?', '%s')
class PostgresCursorWrapper(object):
implements(IAsyncCursor)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, sql, params=()):
sql = translateSQL(sql)
ret = self.cursor.execute(sql, params)
return ret
def lastRowId(self):
d = self.cursor.execute('select lastval()')
d.addCallback(lambda _: self.cursor.fetchone())
return d.addCallback(lambda row: row[0])
def fetchone(self):
return self.cursor.fetchone()
def fetchall(self):
return self.cursor.fetchall()
def close(self):
return self.cursor.close()
toDB = Converter()
@toDB.when(str)
@toDB.when(String)
def stringToDB(pythonval):
if pythonval is None:
return None
return buffer(pythonval)
fromDB = Converter()
@fromDB.when(String)
def strToString(dbval):
if type(dbval) is unicode:
return dbval.encode('utf-8')
elif type(dbval) is buffer:
return str(dbval)
return dbval
@fromDB.when(Unicode)
def unicodeToString(dbval):
if type(dbval) is unicode:
return dbval
elif type(dbval) is str:
return dbval.decode('utf-8')
elif type(dbval) is buffer:
return str(dbval).decode('utf-8')
return dbval
postgres_compiler = Compiler([compiler])
class PostgresOperator(BaseOperator):
"""
I provide PostgreSQL-specific methods for ORM-based database interactions.
"""
compiler = postgres_compiler
fromDB = fromDB
toDB = toDB
def insert(self, cursor, obj):
"""
Insert a row into the database. This function expects to be run in an
asynchronous interaction.
"""
info = objectInfo(obj)
cls_info = classInfo(obj)
changed = info.changed()
# insert
insert = []
insert_args = []
if not changed:
# no changes
insert = ['INSERT INTO %s DEFAULT VALUES' % (cls_info.table,)]
else:
# changes
columns = []
for prop in changed:
columns.append(prop.column_name)
value = toDB.convert(prop.__class__, prop.toDatabase(obj))
insert_args.append(value)
value_placeholders = ['?'] * len(columns)
insert = ['INSERT INTO %s (%s) VALUES (%s)' % (cls_info.table,
','.join(columns), ','.join(value_placeholders))]
# returning
columns = cls_info.columns.keys()
returning = ['RETURNING %s' % (','.join(columns),)]
sql = ' '.join(insert + returning)
args = tuple(insert_args)
d = cursor.execute(sql, args)
d.addCallback(lambda _: cursor.fetchone())
d.addCallback(self._updateObject, obj)
return d
| {
"content_hash": "5b392babeb962ada882b93d361823eb8",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 78,
"avg_line_length": 22.99264705882353,
"alnum_prop": 0.6002558362647905,
"repo_name": "iffy/norm",
"id": "77330ad59cffe6928cdfd932b962d6e68594381e",
"size": "3185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "norm/postgres.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141944"
},
{
"name": "Shell",
"bytes": "6697"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Installation()
result.template = "object/installation/battlefield/destructible/shared_small_turret.iff"
result.attribute_template_id = -1
result.stfName("battlefield","small_turret")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "af3c99daa8befcc57b31336fbf03ad55",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7195121951219512,
"repo_name": "anhstudios/swganh",
"id": "90dd2da70b91eb66cc8fc1891f35e1d14adaf731",
"size": "473",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/installation/battlefield/destructible/shared_small_turret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
This is the cmdset for Player (OOC) commands. These are
stored on the Player object and should thus be able to handle getting
a Player object as caller rather than a Character.
Note - in order for session-rerouting (in MULTISESSION_MODE=2) to
function, all commands in this cmdset should use the self.msg()
command method rather than caller.msg().
"""
from src.commands.cmdset import CmdSet
from src.commands.default import help, comms, admin, system
from src.commands.default import building, player
class PlayerCmdSet(CmdSet):
"""
Implements the player command set.
"""
key = "DefaultPlayer"
priority = -10
def at_cmdset_creation(self):
"Populates the cmdset"
# Player-specific commands
self.add(player.CmdOOCLook())
self.add(player.CmdIC())
self.add(player.CmdOOC())
self.add(player.CmdCharCreate())
#self.add(player.CmdSessions())
self.add(player.CmdWho())
self.add(player.CmdEncoding())
self.add(player.CmdQuit())
self.add(player.CmdPassword())
self.add(player.CmdColorTest())
self.add(player.CmdQuell())
# testing
self.add(building.CmdExamine())
# Help command
self.add(help.CmdHelp())
# system commands
self.add(system.CmdReload())
self.add(system.CmdReset())
self.add(system.CmdShutdown())
self.add(system.CmdPy())
# Admin commands
self.add(admin.CmdDelPlayer())
self.add(admin.CmdNewPassword())
# Comm commands
self.add(comms.CmdAddCom())
self.add(comms.CmdDelCom())
self.add(comms.CmdAllCom())
self.add(comms.CmdChannels())
self.add(comms.CmdCdestroy())
self.add(comms.CmdChannelCreate())
self.add(comms.CmdClock())
self.add(comms.CmdCBoot())
self.add(comms.CmdCemit())
self.add(comms.CmdCWho())
self.add(comms.CmdCdesc())
self.add(comms.CmdPage())
self.add(comms.CmdIRC2Chan())
self.add(comms.CmdRSS2Chan())
#self.add(comms.CmdIMC2Chan())
#self.add(comms.CmdIMCInfo())
#self.add(comms.CmdIMCTell())
| {
"content_hash": "0ebfbce6123e826eb1d9ec2756418480",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 69,
"avg_line_length": 29.675675675675677,
"alnum_prop": 0.6302367941712204,
"repo_name": "Pathel/deuterium",
"id": "81e1485f3c2920195807f70b3d2b93da7a82e1a7",
"size": "2196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/commands/default/cmdset_player.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "22126"
},
{
"name": "Python",
"bytes": "2117297"
}
],
"symlink_target": ""
} |
import numpy as np
from pandas import date_range
import pandas._testing as tm
class TestSplit:
def test_split_non_utc(self):
# GH#14042
indices = date_range("2016-01-01 00:00:00+0200", freq="S", periods=10)
result = np.split(indices, indices_or_sections=[])[0]
expected = indices._with_freq(None)
tm.assert_index_equal(result, expected)
| {
"content_hash": "f94c1b85345e246bd58b4eb009f9a8a0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 29.53846153846154,
"alnum_prop": 0.6536458333333334,
"repo_name": "gfyoung/pandas",
"id": "301466c0da41c1f369cd735d19dcb7f78f207517",
"size": "384",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/datetimes/test_npfuncs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
print "Edson Lopes Portal"
print "Tiago"
| {
"content_hash": "d5b9540d5fedcaa252e8f0ae601c0c76",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 26,
"avg_line_length": 20.5,
"alnum_prop": 0.7560975609756098,
"repo_name": "mateusportal/testandogit",
"id": "be52ef5b2b1cab25df88a4177a6011ea72cf3ede",
"size": "42",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edson.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "132"
}
],
"symlink_target": ""
} |
import itertools
import re
from sqlalchemy import Column, Integer, MetaData, String, Table
from sqlalchemy.sql.expression import select
from nova.openstack.common import log as logging
from oslo.config import cfg
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def _is_ephemeral(device_name):
return bool(_ephemeral.match(device_name))
def _is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or _is_ephemeral(device_name)))
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
tables = [Table(table, meta, autoload=True)
for table in
('block_device_mapping', 'shadow_block_device_mapping')]
for block_device_mapping in tables:
source_type = Column('source_type', String(255))
destination_type = Column('destination_type', String(255))
guest_format = Column('guest_format', String(255))
device_type = Column('device_type', String(255))
disk_bus = Column('disk_bus', String(255))
boot_index = Column('boot_index', Integer)
image_id = Column('image_id', String(36))
source_type.create(block_device_mapping)
destination_type.create(block_device_mapping)
guest_format.create(block_device_mapping)
device_type.create(block_device_mapping)
disk_bus.create(block_device_mapping)
boot_index.create(block_device_mapping)
image_id.create(block_device_mapping)
device_name = block_device_mapping.c.device_name
device_name.alter(nullable=True)
_upgrade_bdm_v2(meta, *tables)
for block_device_mapping in tables:
virtual_name = block_device_mapping.c.virtual_name
virtual_name.drop()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
for table in ('block_device_mapping', 'shadow_block_device_mapping'):
block_device_mapping = Table(table, meta, autoload=True)
virtual_name = Column('virtual_name', String(255), nullable=True)
virtual_name.create(block_device_mapping)
_downgrade_bdm_v2(meta, block_device_mapping)
device_name = block_device_mapping.c.device_name
device_name.alter(nullable=True)
block_device_mapping.c.source_type.drop()
block_device_mapping.c.destination_type.drop()
block_device_mapping.c.guest_format.drop()
block_device_mapping.c.device_type.drop()
block_device_mapping.c.disk_bus.drop()
block_device_mapping.c.boot_index.drop()
block_device_mapping.c.image_id.drop()
def _upgrade_bdm_v2(meta, bdm_table, bdm_shadow_table):
# Rows needed to do the upgrade
_bdm_rows_v1 = ('id', 'device_name', 'virtual_name',
'snapshot_id', 'volume_id', 'instance_uuid')
_instance_cols = ('uuid', 'image_ref', 'root_device_name')
def _get_columns(table, names):
return [getattr(table.c, name) for name in names]
def _default_bdm():
# Set some common default values
default = {}
default['destination_type'] = 'local'
default['device_type'] = 'disk'
default['boot_index'] = -1
return default
instance_table = Table('instances', meta, autoload=True)
instance_shadow_table = Table('shadow_instances', meta, autoload=True)
live_q = select(_get_columns(instance_table, _instance_cols) +
_get_columns(bdm_table, _bdm_rows_v1),
from_obj=instance_table.join(bdm_table,
instance_table.c.uuid == bdm_table.c.instance_uuid))
live_on_shadow_q = select(_get_columns(instance_table, _instance_cols) +
_get_columns(bdm_shadow_table, _bdm_rows_v1),
from_obj=instance_table.join(bdm_shadow_table,
instance_table.c.uuid ==
bdm_shadow_table.c.instance_uuid))
shadow_q = select(_get_columns(instance_shadow_table, _instance_cols) +
_get_columns(bdm_shadow_table, _bdm_rows_v1),
from_obj=instance_shadow_table.join(bdm_shadow_table,
instance_shadow_table.c.uuid ==
bdm_shadow_table.c.instance_uuid))
instance_image_dict = {}
for ((instance_uuid, instance_image_ref, instance_root_device,
bdm_id, device_name, virtual_name, snapshot_id, volume_id,
_), is_shadow) in itertools.chain(
((data, False) for data in live_q.execute().fetchall()),
((data, True) for data in live_on_shadow_q.execute().fetchall()),
((data, True) for data in shadow_q.execute().fetchall())):
if instance_image_ref and instance_uuid not in instance_image_dict:
image_bdm = _default_bdm()
image_bdm['source_type'] = 'image'
image_bdm['instance_uuid'] = instance_uuid
image_bdm['image_id'] = instance_image_ref
image_bdm['boot_index'] = 0
instance_image_dict[instance_uuid] = image_bdm
bdm_v2 = _default_bdm()
# Copy over some fields we'll need
bdm_v2['id'] = bdm_id
bdm_v2['device_name'] = device_name
virt_name = virtual_name
if _is_swap_or_ephemeral(virt_name):
bdm_v2['source_type'] = 'blank'
if virt_name == 'swap':
bdm_v2['guest_format'] = 'swap'
else:
bdm_v2['guest_format'] = CONF.default_ephemeral_format
elif snapshot_id:
bdm_v2['source_type'] = 'snapshot'
bdm_v2['destination_type'] = 'volume'
elif volume_id:
bdm_v2['source_type'] = 'volume'
bdm_v2['destination_type'] = 'volume'
else: # Log a warning that the bdm is not as expected
LOG.warn("Got an unexpected block device %s"
"that cannot be converted to v2 format")
# NOTE (ndipanov): Mark only the image or the bootable volume
# with boot index, as we don't support it yet.
# Also, make sure that instances started with
# the old syntax of specifying an image *and*
# a bootable volume still have consistend data.
bootable = ((strip_dev(device_name) ==
strip_dev(instance_root_device))
and bdm_v2['source_type'] != 'blank')
if bootable:
bdm_v2['boot_index'] = 0
if instance_uuid in instance_image_dict:
instance_image_dict[instance_uuid]['boot_index'] = -1
# Update the DB
my_table = bdm_table
if is_shadow:
my_table = bdm_shadow_table
my_table.update().where(
my_table.c.id == bdm_id
).values(**bdm_v2).execute()
# Create image bdms
for instance_uuid, image_bdm in instance_image_dict.iteritems():
bdm_table.insert().values(**image_bdm).execute()
def _downgrade_bdm_v2(meta, bdm_table):
# First delete all the image bdms
# NOTE (ndipanov): This will delete all the image bdms, even the ones
# that were potentially created as part of th normal
# operation, not only the upgrade. We have to do it,
# as we have no way of handling them in the old code.
bdm_table.delete().where(bdm_table.c.source_type == 'image').execute()
# NOTE (ndipanov): Set all NULL device_names (if any) to '' and let the
# Nova code deal with that. This is needed so that the
# return of nullable=True does not break, and should
# happen only if there are instances that are just
# starting up when we do the downgrade
bdm_table.update().where(
bdm_table.c.device_name == None
).values(device_name='').execute()
instance = Table('instances', meta, autoload=True)
instance_shadow = Table('shadow_instances', meta, autoload=True)
instance_q = select([instance.c.uuid])
instance_shadow_q = select([instance_shadow.c.uuid])
for instance_uuid, in itertools.chain(
instance_q.execute().fetchall(),
instance_shadow_q.execute().fetchall()):
# Get all the bdms for an instance
bdm_q = select(
[bdm_table.c.id, bdm_table.c.source_type, bdm_table.c.guest_format]
).where(
(bdm_table.c.instance_uuid == instance_uuid) &
(bdm_table.c.source_type == 'blank')
).order_by(bdm_table.c.id.asc())
blanks = [
dict(zip(('id', 'source', 'format'), row))
for row in bdm_q.execute().fetchall()
]
swap = [dev for dev in blanks if dev['format'] == 'swap']
assert len(swap) < 2
ephemerals = [dev for dev in blanks if dev not in swap]
for index, eph in enumerate(ephemerals):
eph['virtual_name'] = 'ephemeral' + str(index)
if swap:
swap[0]['virtual_name'] = 'swap'
for bdm in swap + ephemerals:
bdm_table.update().where(
bdm_table.c.id == bdm['id']
).values(**bdm).execute()
| {
"content_hash": "4c959b085596392506ec86fca2487cbb",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 79,
"avg_line_length": 37.40944881889764,
"alnum_prop": 0.590612502631025,
"repo_name": "DirectXMan12/nova-hacking",
"id": "e8b9a1570f21feb001ccebaedbd6f4156b54c5aa",
"size": "10122",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature_novnc_krb",
"path": "nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "10361785"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import watdarepo
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'watdarepo'
copyright = u'2013, Daniel Greenfeld'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = watdarepo.__version__
# The full version, including alpha/beta/rc tags.
release = watdarepo.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'watdarepodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'watdarepo.tex', u'watdarepo Documentation',
u'Daniel Greenfeld', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'watdarepo', u'watdarepo Documentation',
[u'Daniel Greenfeld'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'watdarepo', u'watdarepo Documentation',
u'Daniel Greenfeld', 'watdarepo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | {
"content_hash": "5e59afc2dd60eddcb39725a1b257dd8d",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 32.28861788617886,
"alnum_prop": 0.7064081581266524,
"repo_name": "pydanny/watdarepo",
"id": "e19dd6bbcb36e9757d1188716ea755781095cae9",
"size": "8386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18452"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""Simple script for creating symbolic links for an arbitrary number of path pairs."""
import argparse
import errno
import json
import os
import sys
def main(args):
parser = argparse.ArgumentParser(description='Create symlinks')
parser.add_argument("--link-json",
help="Simple JSON mapping of a source to a linkname",
required=True)
args = parser.parse_args()
with open(args.link_json, 'r') as f:
links = json.load(f)
made_dirs = set()
def make_parent_dirs(path):
path = os.path.dirname(path)
if path in made_dirs:
return
try:
os.makedirs(path, 0o777)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
while path and path not in made_dirs:
made_dirs.add(path)
path = os.path.dirname(path)
for target, linknames in links.items():
for linkname in linknames:
make_parent_dirs(linkname)
os.symlink(target, linkname)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "f7133f845b416c64685a4ba5359c8cad",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 86,
"avg_line_length": 26.128205128205128,
"alnum_prop": 0.6359175662414132,
"repo_name": "luci/recipes-py",
"id": "e1b7d3e28799bb1f50535786ea9626f0484baf11",
"size": "1216",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "recipe_modules/file/resources/symlink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "900422"
},
{
"name": "Shell",
"bytes": "5746"
}
],
"symlink_target": ""
} |
import sys
import colorama
from colorama import Fore, Back, Style
mapping = {
'fg_green' : Fore.GREEN,
'fg_red' : Fore.RED,
}
# initialize colorama
def initialize(on=True):
if on:
colorama.init(autoreset=True, convert=None, strip=None, wrap=True)
else:
colorama.init(autoreset=True, convert=None, strip=True, wrap=True)
return
# deinitialize or stop colourama
def deinitialize():
colorama.deinit()
def cprint(msg, hint=None):
if hint:
if mapping.get(hint):
sys.stdout.write(mapping[hint] + msg)
return
sys.stdout.write(msg)
| {
"content_hash": "854120b43bf1e4534cdbdbac9b6e102d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 19.375,
"alnum_prop": 0.6370967741935484,
"repo_name": "sharethis-github/sgwarden",
"id": "cb91c5249c233f456b3ff601648fa43744fb8d73",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgwarden/sgwarden/prettyprint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "58422"
}
],
"symlink_target": ""
} |
__version__=''' $Id$ '''
__doc__="""
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking
import os
import sys
import re
import hashlib
from string import digits
import tempfile
from math import sin, cos, tan, pi, ceil
from reportlab import rl_config
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import pdfgeom, pathobject, textobject
from reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor
from reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester
from reportlab.lib.rl_accel import fp_str, escapePDF
from reportlab.lib.boxstuff import aspectRatioFix
digitPat = re.compile('\d') #used in decimal alignment
zlib = import_zlib()
# Robert Kern
# Constants for closing paths.
# May be useful if one changes 'arc' and 'rect' to take a
# default argument that tells how to close the path.
# That way we can draw filled shapes.
FILL_EVEN_ODD = 0
FILL_NON_ZERO = 1
#this is used by path-closing routines.
#map stroke, fill, fillmode -> operator
# fillmode: 1 = non-Zero (obviously), 0 = evenOdd
PATH_OPS = {(0, 0, FILL_EVEN_ODD) : 'n', #no op
(0, 0, FILL_NON_ZERO) : 'n', #no op
(1, 0, FILL_EVEN_ODD) : 'S', #stroke only
(1, 0, FILL_NON_ZERO) : 'S', #stroke only
(0, 1, FILL_EVEN_ODD) : 'f*', #Fill only
(0, 1, FILL_NON_ZERO) : 'f', #Fill only
(1, 1, FILL_EVEN_ODD) : 'B*', #Stroke and Fill
(1, 1, FILL_NON_ZERO) : 'B', #Stroke and Fill
}
def _annFormat(D,color,thickness,dashArray,hradius=0,vradius=0):
from reportlab.pdfbase.pdfdoc import PDFArray, PDFDictionary
if color and 'C' not in D:
D["C"] = PDFArray([color.red, color.green, color.blue])
if 'Border' not in D:
border = [hradius,vradius,thickness or 0]
if dashArray:
border.append(PDFArray(dashArray))
D["Border"] = PDFArray(border)
# BS = PDFDictionary()
# bss = 'S'
# if dashArray:
# BS['D'] = PDFArray(dashArray)
# bss = 'D'
# BS['W'] = thickness or 0
# BS['S'] = bss
# D['BS'] = BS
# helpers to guess color space for gradients
def _normalizeColor(aColor):
if isinstance(aColor, CMYKColor):
d = aColor.density
return "DeviceCMYK", tuple(c*d for c in aColor.cmyk())
elif isinstance(aColor, Color):
return "DeviceRGB", aColor.rgb()
elif isinstance(aColor, (tuple, list)):
l = len(aColor)
if l == 3:
return "DeviceRGB", aColor
elif l == 4:
return "DeviceCMYK", aColor
elif isinstance(aColor, str):
return _normalizeColor(toColor(aColor))
raise ValueError("Unknown color %r" % aColor)
def _normalizeColors(colors):
space = None
outcolors = []
for aColor in colors:
nspace, outcolor = _normalizeColor(aColor)
if space is not None and space != nspace:
raise ValueError("Mismatch in color spaces: %s and %s" % (space, nspace))
space = nspace
outcolors.append(outcolor)
return space, outcolors
def _buildColorFunction(colors, positions):
from reportlab.pdfbase.pdfdoc import PDFExponentialFunction, PDFStitchingFunction
if positions is not None and len(positions) != len(colors):
raise ValueError("need to have the same number of colors and positions")
# simplified functions for edge cases
if len(colors) == 1:
# for completeness
return PDFExponentialFunction(N=1, C0=colors[0], C1=colors[0])
if len(colors) == 2:
if positions is None or (positions[0] == 0 and positions[1] == 1):
return PDFExponentialFunction(N=1, C0=colors[0], C1=colors[1])
# equally distribute if positions not specified
if positions is None:
nc = len(colors)
positions = [(1.0*x)/(nc-1) for x in range(nc)]
else:
# sort positions and colors in increasing order
poscolors = list(zip(positions, colors))
poscolors.sort(key=lambda x: x[0])
# add endpoint positions if not already present
if poscolors[0][0] != 0:
poscolors.insert(0, (0.0, poscolors[0][1]))
if poscolors[-1][0] != 1:
poscolors.append((1.0, poscolors[-1][1]))
positions, colors = list(zip(*poscolors)) # unzip
# build stitching function
functions = []
bounds = [pos for pos in positions[1:-1]]
encode = []
lastcolor = colors[0]
for color in colors[1:]:
functions.append(PDFExponentialFunction(N=1, C0=lastcolor, C1=color))
lastcolor = color
encode.append(0.0)
encode.append(1.0)
return PDFStitchingFunction(functions, bounds, encode, Domain="[0.0 1.0]")
class ExtGState:
defaults = dict(
CA=1,
ca=1,
OP=False,
op=False,
OPM=0,
)
def __init__(self):
self._d = {}
self._c = {}
def set(self,canv,a,v):
d = self.defaults[a]
isbool = isinstance(d,bool)
if isbool: v=bool(v)
if v!=self._d.get(a,d) or (a=='op' and self.getValue('OP')!=d):
self._d[a] = v
if isbool: v=str(v).lower()
t = a,v
if t in self._c:
name = self._c[t]
else:
name = 'gRLs'+str(len(self._c))
self._c[t] = name
canv._code.append('/%s gs' % name)
def getValue(self,a):
return self._d.get(a,self.defaults[a])
def getState(self):
S = {}
for t,name in self._c.items():
S[name] = pdfdoc.PDFDictionary(dict((t,)))
return S and pdfdoc.PDFDictionary(S) or None
def pushCopy(self):
'''the states must be shared across push/pop, but the values not'''
x = self.__class__()
x._d = self._d.copy()
x._c = self._c
return x
class Canvas(textobject._PDFColorSetter):
"""This class is the programmer's interface to the PDF file format. Methods
are (or will be) provided here to do just about everything PDF can do.
The underlying model to the canvas concept is that of a graphics state machine
that at any given point in time has a current font, fill color (for figure
interiors), stroke color (for figure borders), line width and geometric transform, among
many other characteristics.
Canvas methods generally either draw something (like canvas.line) using the
current state of the canvas or change some component of the canvas
state (like canvas.setFont). The current state can be saved and restored
using the saveState/restoreState methods.
Objects are "painted" in the order they are drawn so if, for example
two rectangles overlap the last draw will appear "on top". PDF form
objects (supported here) are used to draw complex drawings only once,
for possible repeated use.
There are other features of canvas which are not visible when printed,
such as outlines and bookmarks which are used for navigating a document
in a viewer.
Here is a very silly example usage which generates a Hello World pdf document.
Example::
from reportlab.pdfgen import canvas
c = canvas.Canvas("hello.pdf")
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch,inch)
# define a large font
c.setFont("Helvetica", 80)
# choose some colors
c.setStrokeColorRGB(0.2,0.5,0.3)
c.setFillColorRGB(1,0,1)
# draw a rectangle
c.rect(inch,inch,6*inch,9*inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0,0,0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(3*inch, -3*inch, "Hello World")
c.showPage()
c.save()
"""
def __init__(self,filename,
pagesize=None,
bottomup = 1,
pageCompression=None,
invariant = None,
verbosity=0,
encrypt=None,
cropMarks=None,
pdfVersion=None,
enforceColorSpace=None,
):
"""Create a canvas of a given size. etc.
You may pass a file-like object to filename as an alternative to
a string.
For more information about the encrypt parameter refer to the setEncrypt method.
Most of the attributes are private - we will use set/get methods
as the preferred interface. Default page size is A4.
cropMarks may be True/False or an object with parameters borderWidth, markColor, markWidth
and markLength
if enforceColorSpace is in ('cmyk', 'rgb', 'sep','sep_black','sep_cmyk') then one of
the standard _PDFColorSetter callables will be used to enforce appropriate color settings.
If it is a callable then that will be used.
"""
if pagesize is None: pagesize = rl_config.defaultPageSize
if invariant is None: invariant = rl_config.invariant
self._filename = filename
self._doc = pdfdoc.PDFDocument(compression=pageCompression,
invariant=invariant, filename=filename,
pdfVersion=pdfVersion or pdfdoc.PDF_VERSION_DEFAULT,
)
self._enforceColorSpace = _chooseEnforceColorSpace(enforceColorSpace)
#this only controls whether it prints 'saved ...' - 0 disables
self._verbosity = verbosity
#this is called each time a page is output if non-null
self._onPage = None
self._cropMarks = cropMarks
self._pagesize = pagesize
self._hanging_pagesize = None
self._pageRotation = 0
#self._currentPageHasImages = 0
self._pageTransition = None
self._pageDuration = None
self._destinations = {} # dictionary of destinations for cross indexing.
self.setPageCompression(pageCompression)
self._pageNumber = 1 # keep a count
# when we create a form we need to save operations not in the form
self._codeStack = []
self._restartAccumulators() # restart all accumulation state (generalized, arw)
self._annotationCount = 0
self._outlines = [] # list for a name tree
self._psCommandsBeforePage = [] #for postscript tray/font commands
self._psCommandsAfterPage = [] #for postscript tray/font commands
#PostScript has the origin at bottom left. It is easy to achieve a top-
#down coord system by translating to the top of the page and setting y
#scale to -1, but then text is inverted. So self.bottomup is used
#to also set the text matrix accordingly. You can now choose your
#drawing coordinates.
self.bottomup = bottomup
self.imageCaching = rl_config.defaultImageCaching
self.init_graphics_state()
self._make_preamble()
self.state_stack = []
self.setEncrypt(encrypt)
def setEncrypt(self, encrypt):
'''
Set the encryption used for the pdf generated by this canvas.
If encrypt is a string object, it is used as the user password for the pdf.
If encrypt is an instance of reportlab.lib.pdfencrypt.StandardEncryption, this object is
used to encrypt the pdf. This allows more finegrained control over the encryption settings.
'''
if encrypt:
from reportlab.lib import pdfencrypt
if isStr(encrypt): #encrypt is the password itself
if isUnicode(encrypt):
encrypt = encrypt.encode('utf-8')
encrypt = pdfencrypt.StandardEncryption(encrypt) #now it's the encrypt object
encrypt.setAllPermissions(1)
elif not isinstance(encrypt, pdfencrypt.StandardEncryption):
raise TypeError('Expected string or instance of reportlab.lib.pdfencrypt.StandardEncryption as encrypt parameter but got %r' % encrypt)
self._doc.encrypt = encrypt
else:
try:
del self._doc.encrypt
except AttributeError:
pass
def init_graphics_state(self):
#initial graphics state, never modify any of these in place
self._x = 0
self._y = 0
self._fontname = rl_config.canvas_basefontname
self._fontsize = 12
self._textMode = 0 #track if between BT/ET
self._leading = 14.4
self._currentMatrix = (1., 0., 0., 1., 0., 0.)
self._fillMode = 0 #even-odd
#text state
self._charSpace = 0
self._wordSpace = 0
self._horizScale = 100
self._textRenderMode = 0
self._rise = 0
self._textLineMatrix = (1., 0., 0., 1., 0., 0.)
self._textMatrix = (1., 0., 0., 1., 0., 0.)
# line drawing
self._lineCap = 0
self._lineJoin = 0
self._lineDash = None #not done
self._lineWidth = 1
self._mitreLimit = 0
self._fillColorObj = self._strokeColorObj = rl_config.canvas_baseColor or (0,0,0)
self._extgstate = ExtGState()
def push_state_stack(self):
state = {}
d = self.__dict__
for name in self.STATE_ATTRIBUTES:
state[name] = d[name] #getattr(self, name)
self.state_stack.append(state)
self._extgstate = self._extgstate.pushCopy()
def pop_state_stack(self):
self.__dict__.update(self.state_stack.pop())
STATE_ATTRIBUTES = """_x _y _fontname _fontsize _textMode _leading _currentMatrix _fillMode
_charSpace _wordSpace _horizScale _textRenderMode _rise _textLineMatrix
_textMatrix _lineCap _lineJoin _lineDash _lineWidth _mitreLimit _fillColorObj
_strokeColorObj _extgstate""".split()
STATE_RANGE = list(range(len(STATE_ATTRIBUTES)))
#self._addStandardFonts()
def _make_preamble(self):
P = [].append
if self.bottomup:
P('1 0 0 1 0 0 cm')
else:
P('1 0 0 -1 0 %s cm' % fp_str(self._pagesize[1]))
C = self._code
n = len(C)
if self._fillColorObj != (0,0,0):
self.setFillColor(self._fillColorObj)
if self._strokeColorObj != (0,0,0):
self.setStrokeColor(self._strokeColorObj)
P(' '.join(C[n:]))
del C[n:]
font = pdfmetrics.getFont(self._fontname)
if not font._dynamicFont:
#set an initial font
if font.face.builtIn or not getattr(self,'_drawTextAsPath',False):
P('BT %s 12 Tf 14.4 TL ET' % self._doc.getInternalFontName(self._fontname))
self._preamble = ' '.join(P.__self__)
def _escape(self, s):
return escapePDF(s)
#info functions - non-standard
def setAuthor(self, author):
"""identify the author for invisible embedding inside the PDF document.
the author annotation will appear in the the text of the file but will
not automatically be seen when the document is viewed, but is visible
in document properties etc etc."""
self._doc.setAuthor(author)
def setDateFormatter(self, dateFormatter):
"""accepts a func(yyyy,mm,dd,hh,m,s) used to create embedded formatted date"""
self._doc.setDateFormatter(dateFormatter)
def addOutlineEntry(self, title, key, level=0, closed=None):
"""Adds a new entry to the outline at given level. If LEVEL not specified,
entry goes at the top level. If level specified, it must be
no more than 1 greater than the outline level in the last call.
The key must be the (unique) name of a bookmark.
the title is the (non-unique) name to be displayed for the entry.
If closed is set then the entry should show no subsections by default
when displayed.
Example::
c.addOutlineEntry("first section", "section1")
c.addOutlineEntry("introduction", "s1s1", 1, closed=1)
c.addOutlineEntry("body", "s1s2", 1)
c.addOutlineEntry("detail1", "s1s2s1", 2)
c.addOutlineEntry("detail2", "s1s2s2", 2)
c.addOutlineEntry("conclusion", "s1s3", 1)
c.addOutlineEntry("further reading", "s1s3s1", 2)
c.addOutlineEntry("second section", "section1")
c.addOutlineEntry("introduction", "s2s1", 1)
c.addOutlineEntry("body", "s2s2", 1, closed=1)
c.addOutlineEntry("detail1", "s2s2s1", 2)
c.addOutlineEntry("detail2", "s2s2s2", 2)
c.addOutlineEntry("conclusion", "s2s3", 1)
c.addOutlineEntry("further reading", "s2s3s1", 2)
generated outline looks like::
- first section
|- introduction
|- body
| |- detail1
| |- detail2
|- conclusion
| |- further reading
- second section
|- introduction
|+ body
|- conclusion
| |- further reading
Note that the second "body" is closed.
Note that you can jump from level 5 to level 3 but not
from 3 to 5: instead you need to provide all intervening
levels going down (4 in this case). Note that titles can
collide but keys cannot.
"""
#to be completed
#self._outlines.append(title)
self._doc.outline.addOutlineEntry(key, level, title, closed=closed)
def setOutlineNames0(self, *nametree): # keep this for now (?)
"""nametree should can be a recursive tree like so::
c.setOutlineNames(
"chapter1dest",
("chapter2dest",
["chapter2section1dest",
"chapter2section2dest",
"chapter2conclusiondest"]
), # end of chapter2 description
"chapter3dest",
("chapter4dest", ["c4s1", "c4s2"])
)
each of the string names inside must be bound to a bookmark
before the document is generated.
"""
self._doc.outline.setNames(*((self,)+nametree))
def setTitle(self, title):
"""write a title into the PDF file that won't automatically display
in the document itself."""
self._doc.setTitle(title)
def setSubject(self, subject):
"""write a subject into the PDF file that won't automatically display
in the document itself."""
self._doc.setSubject(subject)
def setCreator(self, creator):
"""write a creator into the PDF file that won't automatically display
in the document itself. This should be used to name the original app
which is passing data into ReportLab, if you wish to name it."""
self._doc.setCreator(creator)
def setKeywords(self, keywords):
"""write a list of keywords into the PDF file which shows in document properties.
Either submit a single string or a list/tuple"""
if isinstance(keywords,(list,tuple)):
keywords = ', '.join(keywords)
self._doc.setKeywords(keywords)
def pageHasData(self):
"Info function - app can call it after showPage to see if it needs a save"
return len(self._code) == 0
def showOutline(self):
"""Specify that Acrobat Reader should start with the outline tree visible.
showFullScreen() and showOutline() conflict; the one called last
wins."""
self._doc._catalog.showOutline()
def showFullScreen0(self):
"""Specify that Acrobat Reader should start in full screen mode.
showFullScreen() and showOutline() conflict; the one called last
wins."""
self._doc._catalog.showFullScreen()
def _setStrokeAlpha(self,v):
"""
Define the transparency/opacity of strokes. 0 is fully
transparent, 1 is fully opaque.
Note that calling this function will cause a version 1.4 PDF
to be generated (rather than 1.3).
"""
self._doc.ensureMinPdfVersion('transparency')
self._extgstate.set(self,'CA',v)
def _setFillAlpha(self,v):
"""
Define the transparency/opacity of non-strokes. 0 is fully
transparent, 1 is fully opaque.
Note that calling this function will cause a version 1.4 PDF
to be generated (rather than 1.3).
"""
self._doc.ensureMinPdfVersion('transparency')
self._extgstate.set(self,'ca',v)
def _setStrokeOverprint(self,v):
self._extgstate.set(self,'OP',v)
def _setFillOverprint(self,v):
self._extgstate.set(self,'op',v)
def _setOverprintMask(self,v):
self._extgstate.set(self,'OPM',v and 1 or 0)
def _getCmShift(self):
cM = self._cropMarks
if cM:
bleedW = max(0,getattr(cM,'bleedWidth',0))
bw = max(0,getattr(cM,'borderWidth',36))
if bleedW:
bw -= bleedW
return bw
def showPage(self):
"""Close the current page and possibly start on a new page."""
# ensure a space at the end of the stream - Acrobat does
# not mind, but Ghostscript dislikes 'Qendstream' even if
# the length marker finishes after 'Q'
pageWidth = self._pagesize[0]
pageHeight = self._pagesize[1]
cM = self._cropMarks
code = self._code
if cM:
bw = max(0,getattr(cM,'borderWidth',36))
if bw:
markLast = getattr(cM,'markLast',1)
ml = min(bw,max(0,getattr(cM,'markLength',18)))
mw = getattr(cM,'markWidth',0.5)
mc = getattr(cM,'markColor',black)
mg = 2*bw-ml
cx0 = len(code)
if ml and mc:
self.saveState()
self.setStrokeColor(mc)
self.setLineWidth(mw)
self.lines([
(bw,0,bw,ml),
(pageWidth+bw,0,pageWidth+bw,ml),
(bw,pageHeight+mg,bw,pageHeight+2*bw),
(pageWidth+bw,pageHeight+mg,pageWidth+bw,pageHeight+2*bw),
(0,bw,ml,bw),
(pageWidth+mg,bw,pageWidth+2*bw,bw),
(0,pageHeight+bw,ml,pageHeight+bw),
(pageWidth+mg,pageHeight+bw,pageWidth+2*bw,pageHeight+bw),
])
self.restoreState()
if markLast:
#if the marks are to be drawn after the content
#save the code we just drew for later use
L = code[cx0:]
del code[cx0:]
cx0 = len(code)
bleedW = max(0,getattr(cM,'bleedWidth',0))
self.saveState()
self.translate(bw-bleedW,bw-bleedW)
if bleedW:
#scale everything
self.scale(1+(2.0*bleedW)/pageWidth,1+(2.0*bleedW)/pageHeight)
#move our translation/expansion code to the beginning
C = code[cx0:]
del code[cx0:]
code[0:0] = C
self.restoreState()
if markLast:
code.extend(L)
pageWidth = 2*bw + pageWidth
pageHeight = 2*bw + pageHeight
code.append(' ')
page = pdfdoc.PDFPage()
page.pagewidth = pageWidth
page.pageheight = pageHeight
page.Rotate = self._pageRotation
page.hasImages = self._currentPageHasImages
page.setPageTransition(self._pageTransition)
page.setCompression(self._pageCompression)
if self._pageDuration is not None:
page.Dur = self._pageDuration
strm = self._psCommandsBeforePage + [self._preamble] + code + self._psCommandsAfterPage
page.setStream(strm)
self._setColorSpace(page)
self._setExtGState(page)
self._setXObjects(page)
self._setShadingUsed(page)
self._setAnnotations(page)
self._doc.addPage(page)
if self._onPage: self._onPage(self._pageNumber)
self._startPage()
def _startPage(self):
#now get ready for the next one
if self._hanging_pagesize:
self.setPageSize(self._hanging_pagesize)
self._hanging_pagesize = None
self._pageNumber += 1
self._restartAccumulators()
self.init_graphics_state()
self.state_stack = []
def setPageCallBack(self, func):
"""func(pageNum) will be called on each page end.
This is mainly a hook for progress monitoring.
Call setPageCallback(None) to clear a callback."""
self._onPage = func
def _setAnnotations(self,page):
page.Annots = self._annotationrefs
def _setColorSpace(self,obj):
obj._colorsUsed = self._colorsUsed
def _setShadingUsed(self, page):
page._shadingUsed = self._shadingUsed
def _setXObjects(self, thing):
"""for pages and forms, define the XObject dictionary for resources, if needed"""
forms = self._formsinuse
if forms:
xobjectsdict = self._doc.xobjDict(forms)
thing.XObjects = xobjectsdict
else:
thing.XObjects = None
def _bookmarkReference(self, name):
"""get a reference to a (possibly undefined, possibly unbound) bookmark"""
d = self._destinations
try:
return d[name]
except:
result = d[name] = pdfdoc.Destination(name) # newly defined, unbound
return result
def bookmarkPage(self, key,
fit="Fit",
left=None,
top=None,
bottom=None,
right=None,
zoom=None
):
"""
This creates a bookmark to the current page which can
be referred to with the given key elsewhere.
PDF offers very fine grained control over how Acrobat
reader is zoomed when people link to this. The default
is to keep the user's current zoom settings. the last
arguments may or may not be needed depending on the
choice of 'fitType'.
Fit types and the other arguments they use are:
- XYZ left top zoom - fine grained control. null
or zero for any of the parameters means 'leave
as is', so "0,0,0" will keep the reader's settings.
NB. Adobe Reader appears to prefer "null" to 0's.
- Fit - entire page fits in window
- FitH top - top coord at top of window, width scaled
to fit.
- FitV left - left coord at left of window, height
scaled to fit
- FitR left bottom right top - scale window to fit
the specified rectangle
(question: do we support /FitB, FitBH and /FitBV
which are hangovers from version 1.1 / Acrobat 3.0?)"""
dest = self._bookmarkReference(key)
self._doc.inPage() # try to enable page-only features
pageref = self._doc.thisPageRef()
#None = "null" for PDF
if left is None:
left = "null"
if top is None:
top = "null"
if bottom is None:
bottom = "null"
if right is None:
right = "null"
if zoom is None:
zoom = "null"
if fit == "XYZ":
dest.xyz(left,top,zoom)
elif fit == "Fit":
dest.fit()
elif fit == "FitH":
dest.fith(top)
elif fit == "FitV":
dest.fitv(left)
elif fit == "FitR":
dest.fitr(left,bottom,right,top)
#Do we need these (version 1.1 / Acrobat 3 versions)?
elif fit == "FitB":
dest.fitb()
elif fit == "FitBH":
dest.fitbh(top)
elif fit == "FitBV":
dest.fitbv(left)
else:
raise ValueError("Unknown Fit type %s" % ascii(fit))
dest.setPage(pageref)
return dest
def bookmarkHorizontalAbsolute(self, key, top, left=0, fit='XYZ', **kw):
"""Bind a bookmark (destination) to the current page at a horizontal position.
Note that the yhorizontal of the book mark is with respect to the default
user space (where the origin is at the lower left corner of the page)
and completely ignores any transform (translation, scale, skew, rotation,
etcetera) in effect for the current graphics state. The programmer is
responsible for making sure the bookmark matches an appropriate item on
the page."""
#This method should probably be deprecated since it is just a sub-set of bookmarkPage
return self.bookmarkPage(key, fit=fit, top=top, left=left, zoom=0)
def bookmarkHorizontal(self, key, relativeX, relativeY, **kw):
"""w.r.t. the current transformation, bookmark this horizontal."""
(left, top) = self.absolutePosition(relativeX,relativeY)
self.bookmarkHorizontalAbsolute(key, top, left=left, **kw)
#def _inPage0(self): disallowed!
# """declare a page, enable page features"""
# self._doc.inPage()
#def _inForm0(self):
# "deprecated in favore of beginForm...endForm"
# self._doc.inForm()
def doForm(self, name):
"""use a form XObj in current operation stream.
The form should either have been defined previously using
beginForm ... endForm, or may be defined later. If it is not
defined at save time, an exception will be raised. The form
will be drawn within the context of the current graphics
state."""
self._code.append("/%s Do" % self._doc.getXObjectName(name))
self._formsinuse.append(name)
def hasForm(self, name):
"""Query whether form XObj really exists yet."""
return self._doc.hasForm(name)
######################################################
#
# Image routines
#
######################################################
def drawInlineImage(self, image, x,y, width=None,height=None,
preserveAspectRatio=False,anchor='c'):
"""See drawImage, which should normally be used instead...
drawInlineImage behaves like drawImage, but stores the image content
within the graphics stream for the page. This means that the mask
parameter for transparency is not available. It also means that there
is no saving in file size or time if the same image is reused.
In theory it allows images to be displayed slightly faster; however,
we doubt if the difference is noticeable to any human user these days.
Only use this if you have studied the PDF specification and know the
implications.
"""
self._currentPageHasImages = 1
from reportlab.pdfgen.pdfimages import PDFImage
img_obj = PDFImage(image, x,y, width, height)
img_obj.drawInlineImage(self,
preserveAspectRatio=preserveAspectRatio,
anchor=anchor)
return (img_obj.width, img_obj.height)
def drawImage(self, image, x, y, width=None, height=None, mask=None,
preserveAspectRatio=False, anchor='c'):
"""Draws the image (ImageReader object or filename) as specified.
"image" may be an image filename or an ImageReader object.
x and y define the lower left corner of the image you wish to
draw (or of its bounding box, if using preserveAspectRation below).
If width and height are not given, the width and height of the
image in pixels is used at a scale of 1 point to 1 pixel.
If width and height are given, the image will be stretched to fill
the given rectangle bounded by (x, y, x+width, y-height).
If you supply negative widths and/or heights, it inverts them and adjusts
x and y accordingly.
The method returns the width and height of the underlying image, since
this is often useful for layout algorithms and saves you work if you have
not specified them yourself.
The mask parameter supports transparent backgrounds. It takes 6 numbers
and defines the range of RGB values which will be masked out or treated
as transparent. For example with [0,2,40,42,136,139], it will mask out
any pixels with a Red value from 0-2, Green from 40-42 and
Blue from 136-139 (on a scale of 0-255).
New post version 2.0: drawImage can center an image in a box you
provide, while preserving its aspect ratio. For example, you might
have a fixed square box in your design, and a collection of photos
which might be landscape or portrait that you want to appear within
the box. If preserveAspectRatio is true, your image will appear within
the box specified.
If preserveAspectRatio is True, the anchor property can be used to
specify how images should fit into the given box. It should
be set to one of the following values, taken from the points of
the compass (plus 'c' for 'centre'):
nw n ne
w c e
sw s se
The default value is 'c' for 'centre'. Thus, if you want your
bitmaps to always be centred and appear at the top of the given box,
set anchor='n'. There are good examples of this in the output
of test_pdfgen_general.py
Unlike drawInlineImage, this creates 'external images' which
are only stored once in the PDF file but can be drawn many times.
If you give it the same filename twice, even at different locations
and sizes, it will reuse the first occurrence, resulting in a saving
in file size and generation time. If you use ImageReader objects,
it tests whether the image content has changed before deciding
whether to reuse it.
In general you should use drawImage in preference to drawInlineImage
unless you have read the PDF Spec and understand the tradeoffs."""
self._currentPageHasImages = 1
# first, generate a unique name/signature for the image. If ANYTHING
# is different, even the mask, this should be different.
if isinstance(image,ImageReader):
rawdata = image.getRGBData()
smask = image._dataA
if mask=='auto' and smask:
mdata = smask.getRGBData()
else:
mdata = str(mask)
if isUnicode(mdata):
mdata = mdata.encode('utf8')
name = _digester(rawdata+mdata)
else:
#filename, use it
s = '%s%s' % (image, mask)
if isUnicode(s):
s = s.encode('utf-8')
name = _digester(s)
# in the pdf document, this will be prefixed with something to
# say it is an XObject. Does it exist yet?
regName = self._doc.getXObjectName(name)
imgObj = self._doc.idToObject.get(regName, None)
if not imgObj:
#first time seen, create and register the PDFImageXobject
imgObj = pdfdoc.PDFImageXObject(name, image, mask=mask)
imgObj.name = name
self._setXObjects(imgObj)
self._doc.Reference(imgObj, regName)
self._doc.addForm(name, imgObj)
smask = getattr(imgObj,'_smask',None)
if smask: #set up the softmask obtained above
mRegName = self._doc.getXObjectName(smask.name)
mImgObj = self._doc.idToObject.get(mRegName, None)
if not mImgObj:
self._setXObjects(smask)
imgObj.smask = self._doc.Reference(smask,mRegName)
else:
imgObj.smask = pdfdoc.PDFObjectReference(mRegName)
del imgObj._smask
# ensure we have a size, as PDF will make it 1x1 pixel otherwise!
x,y,width,height,scaled = aspectRatioFix(preserveAspectRatio,anchor,x,y,width,height,imgObj.width,imgObj.height)
# scale and draw
self.saveState()
self.translate(x, y)
self.scale(width, height)
self._code.append("/%s Do" % regName)
self.restoreState()
# track what's been used on this page
self._formsinuse.append(name)
return (imgObj.width, imgObj.height)
def _restartAccumulators(self):
if self._codeStack:
# restore the saved code
saved = self._codeStack[-1]
del self._codeStack[-1]
self._code, self._formsinuse, self._annotationrefs, self._formData,self._colorsUsed, self._shadingUsed = saved
else:
self._code = [] # ready for more...
self._psCommandsAfterPage = []
self._currentPageHasImages = 1 # for safety...
self._formsinuse = []
self._annotationrefs = []
self._formData = None
self._colorsUsed = {}
self._shadingUsed = {}
def _pushAccumulators(self):
"when you enter a form, save accumulator info not related to the form for page (if any)"
saved = (self._code, self._formsinuse, self._annotationrefs, self._formData, self._colorsUsed, self._shadingUsed)
self._codeStack.append(saved)
self._code = [] # ready for more...
self._currentPageHasImages = 1 # for safety...
self._formsinuse = []
self._annotationrefs = []
self._formData = None
self._colorsUsed = {}
self._shadingUsed = {}
def _setExtGState(self, obj):
obj.ExtGState = self._extgstate.getState()
def beginForm(self, name, lowerx=0, lowery=0, upperx=None, uppery=None):
"""declare the current graphics stream to be a named form.
A graphics stream can either be a page or a form, not both.
Some operations (like bookmarking) are permitted for pages
but not forms. The form will not automatically be shown in the
document but must be explicitly referenced using doForm in pages
that require the form."""
self.push_state_stack()
self.init_graphics_state()
if self._code or self._formData:
# save the code that is not in the formf
self._pushAccumulators()
#self._codeStack.append(self._code)
#self._code = []
self._formData = (name, lowerx, lowery, upperx, uppery)
self._doc.inForm()
#self._inForm0()
def endForm(self,**extra_attributes):
"""emit the current collection of graphics operations as a Form
as declared previously in beginForm."""
(name, lowerx, lowery, upperx, uppery) = self._formData
#self.makeForm0(name, lowerx, lowery, upperx, uppery)
# fall through! makeForm0 disallowed
#def makeForm0(self, name, lowerx=0, lowery=0, upperx=None, uppery=None):
"""Like showpage, but make a form using accumulated operations instead"""
# deprecated in favor or beginForm(...)... endForm()
(w,h) = self._pagesize
if upperx is None: upperx=w
if uppery is None: uppery=h
form = pdfdoc.PDFFormXObject(lowerx=lowerx, lowery=lowery, upperx=upperx, uppery=uppery)
form.compression = self._pageCompression
form.setStreamList([self._preamble] + self._code) # ??? minus preamble (seems to be needed!)
for k, v in extra_attributes.items():
setattr(form,k,v)
self._setColorSpace(form)
self._setExtGState(form)
self._setXObjects(form)
self._setAnnotations(form)
self._doc.addForm(name, form)
self._restartAccumulators()
self.pop_state_stack()
def addPostScriptCommand(self, command, position=1):
"""Embed literal Postscript in the document.
With position=0, it goes at very beginning of page stream;
with position=1, at current point; and
with position=2, at very end of page stream. What that does
to the resulting Postscript depends on Adobe's header :-)
Use with extreme caution, but sometimes needed for printer tray commands.
Acrobat 4.0 will export Postscript to a printer or file containing
the given commands. Adobe Reader 6.0 no longer does as this feature is
deprecated. 5.0, I don't know about (please let us know!). This was
funded by Bob Marshall of Vector.co.uk and tested on a Lexmark 750.
See test_pdfbase_postscript.py for 2 test cases - one will work on
any Postscript device, the other uses a 'setpapertray' command which
will error in Distiller but work on printers supporting it.
"""
#check if we've done this one already...
if isUnicode(command):
rawName = 'PS' + hashlib.md5(command.encode('utf-8')).hexdigest()
else:
rawName = 'PS' + hashlib.md5(command).hexdigest()
regName = self._doc.getXObjectName(rawName)
psObj = self._doc.idToObject.get(regName, None)
if not psObj:
#first use of this chunk of Postscript, make an object
psObj = pdfdoc.PDFPostScriptXObject(command + '\r\n')
self._setXObjects(psObj)
self._doc.Reference(psObj, regName)
self._doc.addForm(rawName, psObj)
if position == 0:
self._psCommandsBeforePage.append("/%s Do" % regName)
elif position==1:
self._code.append("/%s Do" % regName)
else:
self._psCommandsAfterPage.append("/%s Do" % regName)
self._formsinuse.append(rawName)
def _absRect(self,rect,relative=0):
if not rect:
w,h = self._pagesize
rect = (0,0,w,h)
elif relative:
lx, ly, ux, uy = rect
xll,yll = self.absolutePosition(lx,ly)
xur,yur = self.absolutePosition(ux, uy)
xul,yul = self.absolutePosition(lx, uy)
xlr,ylr = self.absolutePosition(ux, ly)
xs = xll, xur, xul, xlr
ys = yll, yur, yul, ylr
xmin, ymin = min(xs), min(ys)
xmax, ymax = max(xs), max(ys)
rect = xmin, ymin, xmax, ymax
bw = self._getCmShift()
if bw:
rect = rect[0]+bw,rect[1]+bw,rect[2]+bw,rect[3]+bw
return rect
def freeTextAnnotation(self, contents, DA, Rect=None, addtopage=1, name=None, relative=0, **kw):
"""DA is the default appearance string???"""
Rect = self._absRect(Rect,relative)
self._addAnnotation(pdfdoc.FreeTextAnnotation(Rect, contents, DA, **kw), name, addtopage)
def textAnnotation(self, contents, Rect=None, addtopage=1, name=None, relative=0, **kw):
"""Experimental, but works.
"""
Rect = self._absRect(Rect,relative)
self._addAnnotation(pdfdoc.TextAnnotation(Rect, contents, **kw), name, addtopage)
textAnnotation0 = textAnnotation #deprecated
def highlightAnnotation(self, contents, Rect, QuadPoints=None, Color=[0.83, 0.89, 0.95], addtopage=1,
name=None, relative=0, **kw):
"""
Allows adding of a highlighted annotation.
Rect: Mouseover area to show contents of annotation
QuadPoints: List of four x/y points [TOP-LEFT, TOP-RIGHT, BOTTOM-LEFT, BOTTOM-RIGHT]
These points outline the areas to highlight.
You can have multiple groups of four to allow multiple highlighted areas.
Is in the format [x1, y1, x2, y2, x3, y3, x4, y4, x1, y1, x2, y2, x3, y3, x4, y4] etc
QuadPoints defaults to be area inside of passed in Rect
Color: The color of the highlighting.
"""
Rect = self._absRect(Rect, relative)
if not QuadPoints:
QuadPoints = pdfdoc.rect_to_quad(Rect)
self._addAnnotation(pdfdoc.HighlightAnnotation(Rect, contents, QuadPoints, Color, **kw), name, addtopage)
def inkAnnotation(self, contents, InkList=None, Rect=None, addtopage=1, name=None, relative=0, **kw):
raise NotImplementedError
"Experimental"
Rect = self._absRect(Rect,relative)
if not InkList:
InkList = ((100,100,100,h-100,w-100,h-100,w-100,100),)
self._addAnnotation(pdfdoc.InkAnnotation(Rect, contents, InkList, **kw), name, addtopage)
inkAnnotation0 = inkAnnotation #deprecated
def linkAbsolute(self, contents, destinationname, Rect=None, addtopage=1, name=None,
thickness=0, color=None, dashArray=None, **kw):
"""rectangular link annotation positioned wrt the default user space.
The identified rectangle on the page becomes a "hot link" which
when clicked will send the viewer to the page and position identified
by the destination.
Rect identifies (lowerx, lowery, upperx, uppery) for lower left
and upperright points of the rectangle. Translations and other transforms
are IGNORED (the rectangular position is given with respect
to the default user space.
destinationname should be the name of a bookmark (which may be defined later
but must be defined before the document is generated).
You may want to use the keyword argument Border='[0 0 0]' to
suppress the visible rectangle around the during viewing link."""
return self.linkRect(contents, destinationname, Rect, addtopage, name, relative=0,
thickness=thickness, color=color, dashArray=dashArray, **kw)
def linkRect(self, contents, destinationname, Rect=None, addtopage=1, name=None, relative=1,
thickness=0, color=None, dashArray=None, **kw):
"""rectangular link annotation w.r.t the current user transform.
if the transform is skewed/rotated the absolute rectangle will use the max/min x/y
"""
destination = self._bookmarkReference(destinationname) # permitted to be undefined... must bind later...
Rect = self._absRect(Rect,relative)
kw["Rect"] = Rect
kw["Contents"] = contents
kw["Destination"] = destination
_annFormat(kw,color,thickness,dashArray)
return self._addAnnotation(pdfdoc.LinkAnnotation(**kw), name, addtopage)
def linkURL(self, url, rect, relative=0, thickness=0, color=None, dashArray=None, kind="URI", **kw):
"""Create a rectangular URL 'hotspot' in the given rectangle.
if relative=1, this is in the current coord system, otherwise
in absolute page space.
The remaining options affect the border appearance; the border is
drawn by Acrobat, not us. Set thickness to zero to hide it.
Any border drawn this way is NOT part of the page stream and
will not show when printed to a Postscript printer or distilled;
it is safest to draw your own."""
from reportlab.pdfbase.pdfdoc import PDFDictionary, PDFName, PDFArray, PDFString
#tried the documented BS element in the pdf spec but it
#does not work, and Acrobat itself does not appear to use it!
ann = PDFDictionary(dict=kw)
ann["Type"] = PDFName("Annot")
ann["Subtype"] = PDFName("Link")
ann["Rect"] = PDFArray(self._absRect(rect,relative)) # the whole page for testing
# the action is a separate dictionary
A = PDFDictionary()
A["Type"] = PDFName("Action") # not needed?
uri = PDFString(url)
A['S'] = PDFName(kind)
if kind=="URI":
A["URI"] = uri
elif kind=='GoToR':
A["F"] = uri
A["D"] = "[ 0 /XYZ null null null ]"
else:
raise ValueError("Unknown linkURI kind '%s'" % kind)
ann["A"] = A
_annFormat(ann,color,thickness,dashArray)
self._addAnnotation(ann)
def _addAnnotation(self, annotation, name=None, addtopage=1):
count = self._annotationCount = self._annotationCount+1
if not name: name="NUMBER"+repr(count)
self._doc.addAnnotation(name, annotation)
if addtopage:
self._annotatePage(name)
def _annotatePage(self, name):
ref = self._doc.refAnnotation(name)
self._annotationrefs.append(ref)
def getPageNumber(self):
"get the page number for the current page being generated."
return self._pageNumber
def save(self):
"""Saves and close the PDF document in the file.
If there is current data a ShowPage is executed automatically.
After this operation the canvas must not be used further."""
if len(self._code): self.showPage()
self._doc.SaveToFile(self._filename, self)
def getpdfdata(self):
"""Returns the PDF data that would normally be written to a file.
If there is current data a ShowPage is executed automatically.
After this operation the canvas must not be used further."""
if len(self._code): self.showPage()
s = self._doc.GetPDFData(self)
if isUnicode(s):
s = s.encode('utf-8')
return s
def setPageSize(self, size):
"""accepts a 2-tuple in points for paper size for this
and subsequent pages"""
self._pagesize = size
self._make_preamble()
def setPageRotation(self, rot):
"""Instruct display device that this page is to be rotated"""
assert rot % 90.0 == 0.0, "Rotation must be a multiple of 90 degrees"
self._pageRotation = rot
def addLiteral(self, s, escaped=1):
"""introduce the literal text of PDF operations s into the current stream.
Only use this if you are an expert in the PDF file format."""
s = str(s) # make sure its a string
if escaped==0:
s = self._escape(s) # convert to string for safety
self._code.append(s)
######################################################################
#
# coordinate transformations
#
######################################################################
def resetTransforms(self):
"""I want to draw something (eg, string underlines) w.r.t. the default user space.
Reset the matrix! This should be used usually as follows::
canv.saveState()
canv.resetTransforms()
#...draw some stuff in default space coords...
canv.restoreState() # go back!
"""
# we have to adjoin the inverse, since reset is not a basic operation (without save/restore)
(selfa, selfb, selfc, selfd, selfe, selff) = self._currentMatrix
det = selfa*selfd - selfc*selfb
resulta = selfd/det
resultc = -selfc/det
resulte = (selfc*selff - selfd*selfe)/det
resultd = selfa/det
resultb = -selfb/det
resultf = (selfe*selfb - selff*selfa)/det
self.transform(resulta, resultb, resultc, resultd, resulte, resultf)
def transform(self, a,b,c,d,e,f):
"""adjoin a mathematical transform to the current graphics state matrix.
Not recommended for beginners."""
#How can Python track this?
if ENABLE_TRACKING:
a0,b0,c0,d0,e0,f0 = self._currentMatrix
self._currentMatrix = (a0*a+c0*b, b0*a+d0*b,
a0*c+c0*d, b0*c+d0*d,
a0*e+c0*f+e0, b0*e+d0*f+f0)
if self._code and self._code[-1][-3:]==' cm':
L = self._code[-1].split()
a0, b0, c0, d0, e0, f0 = list(map(float,L[-7:-1]))
s = len(L)>7 and join(L)+ ' %s cm' or '%s cm'
self._code[-1] = s % fp_str(a0*a+c0*b,b0*a+d0*b,a0*c+c0*d,b0*c+d0*d,a0*e+c0*f+e0,b0*e+d0*f+f0)
else:
self._code.append('%s cm' % fp_str(a,b,c,d,e,f))
def absolutePosition(self, x, y):
"""return the absolute position of x,y in user space w.r.t. default user space"""
if not ENABLE_TRACKING:
raise ValueError("tracking not enabled! (canvas.ENABLE_TRACKING=0)")
(a,b,c,d,e,f) = self._currentMatrix
xp = a*x + c*y + e
yp = b*x + d*y + f
return (xp, yp)
def translate(self, dx, dy):
"""move the origin from the current (0,0) point to the (dx,dy) point
(with respect to the current graphics state)."""
self.transform(1,0,0,1,dx,dy)
def scale(self, x, y):
"""Scale the horizontal dimension by x and the vertical by y
(with respect to the current graphics state).
For example canvas.scale(2.0, 0.5) will make everything short and fat."""
self.transform(x,0,0,y,0,0)
def rotate(self, theta):
"""Canvas.rotate(theta)
Rotate the canvas by the angle theta (in degrees)."""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
self.transform(c, s, -s, c, 0, 0)
def skew(self, alpha, beta):
tanAlpha = tan(alpha * pi / 180)
tanBeta = tan(beta * pi / 180)
self.transform(1, tanAlpha, tanBeta, 1, 0, 0)
######################################################################
#
# graphics state management
#
######################################################################
def saveState(self):
"""Save the current graphics state to be restored later by restoreState.
For example:
canvas.setFont("Helvetica", 20)
canvas.saveState()
...
canvas.setFont("Courier", 9)
...
canvas.restoreState()
# if the save/restore pairs match then font is Helvetica 20 again.
"""
self.push_state_stack()
self._code.append('q')
def restoreState(self):
"""restore the graphics state to the matching saved state (see saveState)."""
self._code.append('Q')
self.pop_state_stack()
###############################################################
#
# Drawing methods. These draw things directly without
# fiddling around with Path objects. We can add any geometry
# methods we wish as long as their meaning is precise and
# they are of general use.
#
# In general there are two patterns. Closed shapes
# have the pattern shape(self, args, stroke=1, fill=0);
# by default they draw an outline only. Line segments come
# in three flavours: line, bezier, arc (which is a segment
# of an elliptical arc, approximated by up to four bezier
# curves, one for each quadrant.
#
# In the case of lines, we provide a 'plural' to unroll
# the inner loop; it is useful for drawing big grids
################################################################
#--------first the line drawing methods-----------------------
def line(self, x1,y1, x2,y2):
"""draw a line segment from (x1,y1) to (x2,y2) (with color, thickness and
other attributes determined by the current graphics state)."""
self._code.append('n %s m %s l S' % (fp_str(x1, y1), fp_str(x2, y2)))
def lines(self, linelist):
"""Like line(), permits many lines to be drawn in one call.
for example for the figure::
|
-- --
|
crosshairs = [(20,0,20,10), (20,30,20,40), (0,20,10,20), (30,20,40,20)]
canvas.lines(crosshairs)
"""
self._code.append('n')
for (x1,y1,x2,y2) in linelist:
self._code.append('%s m %s l' % (fp_str(x1, y1), fp_str(x2, y2)))
self._code.append('S')
def grid(self, xlist, ylist):
"""Lays out a grid in current line style. Supply list of
x an y positions."""
assert len(xlist) > 1, "x coordinate list must have 2+ items"
assert len(ylist) > 1, "y coordinate list must have 2+ items"
lines = []
y0, y1 = ylist[0], ylist[-1]
x0, x1 = xlist[0], xlist[-1]
for x in xlist:
lines.append((x,y0,x,y1))
for y in ylist:
lines.append((x0,y,x1,y))
self.lines(lines)
def bezier(self, x1, y1, x2, y2, x3, y3, x4, y4):
"Bezier curve with the four given control points"
self._code.append('n %s m %s c S' %
(fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
)
def arc(self, x1,y1, x2,y2, startAng=0, extent=90):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2."""
pathobject.PDFPathObject(code=self._code).arc(x1,y1,x2,y2,startAng,extent)
self._strokeAndFill(1,0)
#--------now the shape drawing methods-----------------------
def rect(self, x, y, width, height, stroke=1, fill=0):
"draws a rectangle with lower left corner at (x,y) and width and height as given."
self._code.append('n %s re ' % fp_str(x, y, width, height)
+ PATH_OPS[stroke, fill, self._fillMode])
def ellipse(self, x1, y1, x2, y2, stroke=1, fill=0):
"""Draw an ellipse defined by an enclosing rectangle.
Note that (x1,y1) and (x2,y2) are the corner points of
the enclosing rectangle.
"""
pathobject.PDFPathObject(code=self._code).ellipse(x1, y1, x2-x1, y2-y1)
self._strokeAndFill(stroke, fill)
def wedge(self, x1,y1, x2,y2, startAng, extent, stroke=1, fill=0):
"""Like arc, but connects to the centre of the ellipse.
Most useful for pie charts and PacMan!"""
p = pathobject.PDFPathObject(code=self._code)
p.moveTo(0.5*(x1+x2),0.5*(y1+y2))
p.arcTo(x1,y1,x2,y2,startAng,extent)
p.close()
self._strokeAndFill(stroke,fill)
def circle(self, x_cen, y_cen, r, stroke=1, fill=0):
"""draw a cirle centered at (x_cen,y_cen) with radius r (special case of ellipse)"""
x1 = x_cen - r
x2 = x_cen + r
y1 = y_cen - r
y2 = y_cen + r
self.ellipse(x1, y1, x2, y2, stroke, fill)
def roundRect(self, x, y, width, height, radius, stroke=1, fill=0):
"""Draws a rectangle with rounded corners. The corners are
approximately quadrants of a circle, with the given radius."""
#make the path operators draw into our code
pathobject.PDFPathObject(code=self._code).roundRect(x, y, width, height, radius)
self._strokeAndFill(stroke,fill)
def _addShading(self, shading):
name = self._doc.addShading(shading)
self._shadingUsed[name] = name
return name
def shade(self, shading):
name = self._addShading(shading)
self._code.append('/%s sh' % name)
def linearGradient(self, x0, y0, x1, y1, colors, positions=None, extend=True):
#this code contributed by Peter Johnson <[email protected]>
from reportlab.pdfbase.pdfdoc import PDFAxialShading
colorSpace, ncolors = _normalizeColors(colors)
fcn = _buildColorFunction(ncolors, positions)
if extend:
extendStr = "[true true]"
else:
extendStr = "[false false]"
shading = PDFAxialShading(x0, y0, x1, y1, Function=fcn,
ColorSpace=colorSpace, Extend=extendStr)
self.shade(shading)
def radialGradient(self, x, y, radius, colors, positions=None, extend=True):
#this code contributed by Peter Johnson <[email protected]>
from reportlab.pdfbase.pdfdoc import PDFRadialShading
colorSpace, ncolors = _normalizeColors(colors)
fcn = _buildColorFunction(ncolors, positions)
if extend:
extendStr = "[true true]"
else:
extendStr = "[false false]"
shading = PDFRadialShading(x, y, 0.0, x, y, radius, Function=fcn,
ColorSpace=colorSpace, Extend=extendStr)
self.shade(shading)
##################################################
#
# Text methods
#
# As with graphics, a separate object ensures that
# everything is bracketed between text operators.
# The methods below are a high-level convenience.
# use PDFTextObject for multi-line text.
##################################################
def drawString(self, x, y, text, mode=None, charSpace=0):
"""Draws a string in the current text styles."""
if sys.version_info[0] == 3 and not isinstance(text, str):
text = text.decode('utf-8')
#we could inline this for speed if needed
t = self.beginText(x, y)
if mode is not None: t.setTextRenderMode(mode)
if charSpace: t.setCharSpace(charSpace)
t.textLine(text)
if charSpace: t.setCharSpace(0)
if mode is not None: t.setTextRenderMode(0)
self.drawText(t)
def drawRightString(self, x, y, text, mode=None, charSpace=0):
"""Draws a string right-aligned with the x coordinate"""
if sys.version_info[0] == 3 and not isinstance(text, str):
text = text.decode('utf-8')
width = self.stringWidth(text, self._fontname, self._fontsize)
if charSpace: width += (len(text)-1)*charSpace
t = self.beginText(x - width, y)
if mode is not None: t.setTextRenderMode(mode)
if charSpace: t.setCharSpace(charSpace)
t.textLine(text)
if charSpace: t.setCharSpace(0)
if mode is not None: t.setTextRenderMode(0)
self.drawText(t)
def drawCentredString(self, x, y, text, mode=None, charSpace=0):
"""Draws a string centred on the x coordinate.
We're British, dammit, and proud of our spelling!"""
if sys.version_info[0] == 3 and not isinstance(text, str):
text = text.decode('utf-8')
width = self.stringWidth(text, self._fontname, self._fontsize)
if charSpace: width += (len(text)-1)*charSpace
t = self.beginText(x - 0.5*width, y)
if mode is not None: t.setTextRenderMode(mode)
if charSpace: t.setCharSpace(charSpace)
t.textLine(text)
if charSpace: t.setCharSpace(0)
if mode is not None: t.setTextRenderMode(0)
self.drawText(t)
def drawAlignedString(self, x, y, text, pivotChar=rl_config.decimalSymbol, mode=None, charSpace=0):
"""Draws a string aligned on the first '.' (or other pivot character).
The centre position of the pivot character will be used as x.
So, you could draw a straight line down through all the decimals in a
column of numbers, and anything without a decimal should be
optically aligned with those that have.
There is one special rule to help with accounting formatting. Here's
how normal numbers should be aligned on the 'dot'. Look at the
LAST two::
12,345,67
987.15
42
-1,234.56
(456.78)
(456)
27 inches
13cm
Since the last three do not contain a dot, a crude dot-finding
rule would place them wrong. So we test for the special case
where no pivot is found, digits are present, but the last character
is not a digit. We then work back from the end of the string
This case is a tad slower but hopefully rare.
"""
parts = text.split(pivotChar,1)
pivW = self.stringWidth(pivotChar, self._fontname, self._fontsize)
if len(parts) == 1 and digitPat.search(text) is not None and text[-1] not in digits:
#we have no decimal but it ends in a bracket, or 'in' or something.
#the cut should be after the last digit.
leftText = parts[0][0:-1]
rightText = parts[0][-1]
#any more?
while leftText[-1] not in digits:
rightText = leftText[-1] + rightText
leftText = leftText[0:-1]
self.drawRightString(x-0.5*pivW, y, leftText, mode=mode, charSpace=charSpace)
self.drawString(x-0.5*pivW, y, rightText, mode=mode, charSpace=charSpace)
else:
#normal case
leftText = parts[0]
self.drawRightString(x-0.5*pivW, y, leftText, mode=mode, charSpace=charSpace)
if len(parts) > 1:
rightText = pivotChar + parts[1]
self.drawString(x-0.5*pivW, y, rightText, mode=mode, charSpace=charSpace)
def getAvailableFonts(self):
"""Returns the list of PostScript font names available.
Standard set now, but may grow in future with font embedding."""
fontnames = self._doc.getAvailableFonts()
fontnames.sort()
return fontnames
def addFont(self, fontObj):
"add a new font for subsequent use."
self._doc.addFont(fontObj)
def _addStandardFonts(self):
"""Ensures the standard 14 fonts are available in the system encoding.
Called by canvas on initialization"""
for fontName in pdfmetrics.standardFonts:
self.addFont(pdfmetrics.fontsByName[fontName])
def listLoadedFonts0(self):
"Convenience function to list all loaded fonts"
names = list(pdfmetrics.widths.keys())
names.sort()
return names
def setFont(self, psfontname, size, leading = None):
"""Sets the font. If leading not specified, defaults to 1.2 x
font size. Raises a readable exception if an illegal font
is supplied. Font names are case-sensitive! Keeps track
of font name and size for metrics."""
self._fontname = psfontname
self._fontsize = size
if leading is None:
leading = size * 1.2
self._leading = leading
font = pdfmetrics.getFont(self._fontname)
if not font._dynamicFont:
if font.face.builtIn or not getattr(self,'_drawTextAsPath',False):
pdffontname = self._doc.getInternalFontName(psfontname)
self._code.append('BT %s %s Tf %s TL ET' % (pdffontname, fp_str(size), fp_str(leading)))
def setFontSize(self, size=None, leading=None):
'''Sets font size or leading without knowing the font face'''
if size is None: size = self._fontsize
if leading is None: leading = self._leading
self.setFont(self._fontname, size, leading)
def stringWidth(self, text, fontName=None, fontSize=None):
"gets width of a string in the given font and size"
return pdfmetrics.stringWidth(text, fontName or self._fontname,
(fontSize,self._fontsize)[fontSize is None])
# basic graphics modes
def setLineWidth(self, width):
self._lineWidth = width
self._code.append('%s w' % fp_str(width))
def setLineCap(self, mode):
"""0=butt,1=round,2=square"""
assert mode in (0,1,2), "Line caps allowed: 0=butt,1=round,2=square"
self._lineCap = mode
self._code.append('%d J' % mode)
def setLineJoin(self, mode):
"""0=mitre, 1=round, 2=bevel"""
assert mode in (0,1,2), "Line Joins allowed: 0=mitre, 1=round, 2=bevel"
self._lineJoin = mode
self._code.append('%d j' % mode)
def setMiterLimit(self, limit):
self._miterLimit = limit
self._code.append('%s M' % fp_str(limit))
def setDash(self, array=[], phase=0):
"""Two notations. pass two numbers, or an array and phase"""
if isinstance(array,(int,float)):
self._code.append('[%s %s] 0 d' % (array, phase))
elif isSeq(array):
assert phase >= 0, "phase is a length in user space"
textarray = ' '.join([str(s) for s in array])
self._code.append('[%s] %s d' % (textarray, phase))
# path stuff - the separate path object builds it
def beginPath(self):
"""Returns a fresh path object. Paths are used to draw
complex figures. The object returned follows the protocol
for a pathobject.PDFPathObject instance"""
return pathobject.PDFPathObject()
def drawPath(self, aPath, stroke=1, fill=0):
"Draw the path object in the mode indicated"
self._code.append(str(aPath.getCode()))
self._strokeAndFill(stroke,fill)
def _strokeAndFill(self,stroke,fill):
self._code.append(PATH_OPS[stroke, fill, self._fillMode])
def clipPath(self, aPath, stroke=1, fill=0):
"clip as well as drawing"
gc = aPath.getCode(); pathops = PATH_OPS[stroke, fill, self._fillMode]
clip = (self._fillMode == FILL_EVEN_ODD and ' W* ' or ' W ')
item = "%s%s%s" % (gc, clip, pathops) # ensure string conversion
self._code.append(item)
#self._code.append( aPath.getCode()
# + (self._fillMode == FILL_EVEN_ODD and ' W* ' or ' W ')
# + PATH_OPS[stroke,fill,self._fillMode])
def beginText(self, x=0, y=0):
"""Returns a fresh text object. Text objects are used
to add large amounts of text. See textobject.PDFTextObject"""
return textobject.PDFTextObject(self, x, y)
def drawText(self, aTextObject):
"""Draws a text object"""
self._code.append(str(aTextObject.getCode()))
def setPageCompression(self, pageCompression=1):
"""Possible values None, 1 or 0
If None the value from rl_config will be used.
If on, the page data will be compressed, leading to much
smaller files, but takes a little longer to create the files.
This applies to all subsequent pages, or until setPageCompression()
is next called."""
if pageCompression is None: pageCompression = rl_config.pageCompression
if pageCompression and not zlib:
self._pageCompression = 0
else:
self._pageCompression = pageCompression
self._doc.setCompression(self._pageCompression)
def setPageDuration(self, duration=None):
"""Allows hands-off animation of presentations :-)
If this is set to a number, in full screen mode, Acrobat Reader
will advance to the next page after this many seconds. The
duration of the transition itself (fade/flicker etc.) is controlled
by the 'duration' argument to setPageTransition; this controls
the time spent looking at the page. This is effective for all
subsequent pages."""
self._pageDuration = duration
def setPageTransition(self, effectname=None, duration=1,
direction=0,dimension='H',motion='I'):
"""PDF allows page transition effects for use when giving
presentations. There are six possible effects. You can
just guive the effect name, or supply more advanced options
to refine the way it works. There are three types of extra
argument permitted, and here are the allowed values::
direction_arg = [0,90,180,270]
dimension_arg = ['H', 'V']
motion_arg = ['I','O'] (start at inside or outside)
This table says which ones take which arguments::
PageTransitionEffects = {
'Split': [direction_arg, motion_arg],
'Blinds': [dimension_arg],
'Box': [motion_arg],
'Wipe' : [direction_arg],
'Dissolve' : [],
'Glitter':[direction_arg]
}
Have fun!
"""
# This builds a Python dictionary with the right arguments
# for the Trans dictionary in the PDFPage object,
# and stores it in the variable _pageTransition.
# showPage later passes this to the setPageTransition method
# of the PDFPage object, which turns it to a PDFDictionary.
self._pageTransition = {}
if not effectname:
return
#first check each optional argument has an allowed value
if direction in [0,90,180,270]:
direction_arg = ('Di', '/%d' % direction)
else:
raise pdfdoc.PDFError(' directions allowed are 0,90,180,270')
if dimension in ['H', 'V']:
dimension_arg = ('Dm', '/' + dimension)
else:
raise pdfdoc.PDFError('dimension values allowed are H and V')
if motion in ['I','O']:
motion_arg = ('M', '/' + motion)
else:
raise pdfdoc.PDFError('motion values allowed are I and O')
# this says which effects require which argument types from above
PageTransitionEffects = {
'Split': [direction_arg, motion_arg],
'Blinds': [dimension_arg],
'Box': [motion_arg],
'Wipe' : [direction_arg],
'Dissolve' : [],
'Glitter':[direction_arg]
}
try:
args = PageTransitionEffects[effectname]
except KeyError:
raise pdfdoc.PDFError('Unknown Effect Name "%s"' % effectname)
# now build the dictionary
transDict = {}
transDict['Type'] = '/Trans'
transDict['D'] = '%d' % duration
transDict['S'] = '/' + effectname
for (key, value) in args:
transDict[key] = value
self._pageTransition = transDict
def getCurrentPageContent(self):
"""Return uncompressed contents of current page buffer.
This is useful in creating test cases and assertions of what
got drawn, without necessarily saving pages to disk"""
return '\n'.join(self._code)
def setViewerPreference(self,pref,value):
'''set one of the allowed enbtries in the documents viewer preferences'''
catalog = self._doc.Catalog
VP = getattr(catalog,'ViewerPreferences',None)
if VP is None:
from reportlab.pdfbase.pdfdoc import ViewerPreferencesPDFDictionary
VP = catalog.ViewerPreferences = ViewerPreferencesPDFDictionary()
VP[pref] = value
def getViewerPreference(self,pref):
'''you'll get an error here if none have been set'''
return self._doc.Catalog.ViewerPreferences[pref]
def delViewerPreference(self,pref):
'''you'll get an error here if none have been set'''
del self._doc.Catalog.ViewerPreferences[pref]
def setCatalogEntry(self,key,value):
from reportlab.pdfbase.pdfdoc import PDFDictionary, PDFArray, PDFString
if isStr(value):
value = PDFString(value)
elif isinstance(value,(list,tuple)):
value = PDFArray(value)
elif isinstance(value,dict):
value = PDFDictionary(value)
setattr(self._doc.Catalog,key,value)
def getCatalogEntry(self,key):
return getattr(self._doc.Catalog,key)
def delCatalogEntry(self,key):
'''you'll get an error here if it's not been set'''
delattr(self._doc.Catalog,key)
def addPageLabel(self, pageNum, style=None, start=None, prefix=None):
'''add a PDFPageLabel for pageNum'''
catalog = self._doc.Catalog
PL = getattr(catalog,'PageLabels',None)
if PL is None:
from reportlab.pdfbase.pdfdoc import PDFPageLabels
PL = catalog.PageLabels = PDFPageLabels()
from reportlab.pdfbase.pdfdoc import PDFPageLabel
PL.addPageLabel(pageNum,PDFPageLabel(style,start,prefix))
if __name__ == '__main__':
print('For test scripts, look in tests')
| {
"content_hash": "3adeca65a0b89d7f4d89a1e425448c0a",
"timestamp": "",
"source": "github",
"line_count": 1863,
"max_line_length": 151,
"avg_line_length": 41.088566827697264,
"alnum_prop": 0.5927653237184511,
"repo_name": "mollstam/UnrealPy",
"id": "f4f59b9801684bc1a0cbf7fe5b3d19a112a0d6d4",
"size": "76628",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/reportlab-3.2.0/src/reportlab/pdfgen/canvas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
from django.db import transaction
from django import forms
from registration.forms import RegistrationForm
from cmdrs.models import Commander
class SignupForm(RegistrationForm):
commander_name = forms.CharField(
help_text='This should match your in-game Commander Name (minus the '
'CMDR portion)'
)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
def save(self):
with transaction.atomic():
user = super(SignupForm, self).save()
Commander.objects.create(
user=user,
name=self.cleaned_data['commander_name']
)
return user
| {
"content_hash": "c99a53b8d1347e05e7059196b79d28e5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 26.96153846153846,
"alnum_prop": 0.6148359486447932,
"repo_name": "toastdriven/eliteracing",
"id": "faf83c58ee3aec7eddd03c85ba93693defc7b095",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7913"
},
{
"name": "HTML",
"bytes": "29062"
},
{
"name": "JavaScript",
"bytes": "11553"
},
{
"name": "Python",
"bytes": "107925"
}
],
"symlink_target": ""
} |
"""
=====================================
Blind source separation using FastICA
=====================================
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 2 instruments playing simultaneously and 2 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.decomposition import FastICA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 10, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
S = np.c_[s1, s2]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1], [0.5, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA()
S_ = ica.fit(X).transform(X) # Get the estimated sources
A_ = ica.get_mixing_matrix() # Get estimated mixing matrix
assert np.allclose(X, np.dot(S_, A_.T))
###############################################################################
# Plot results
pl.figure()
pl.subplot(3, 1, 1)
pl.plot(S)
pl.title('True Sources')
pl.subplot(3, 1, 2)
pl.plot(X)
pl.title('Observations (mixed signal)')
pl.subplot(3, 1, 3)
pl.plot(S_)
pl.title('ICA estimated sources')
pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
pl.show()
| {
"content_hash": "49acf8d4a1678a36a3b068034c0cff29",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 29.647058823529413,
"alnum_prop": 0.583994708994709,
"repo_name": "jmargeta/scikit-learn",
"id": "dda2dd2d0ea60e3c3373d376a1a371b7308a42ef",
"size": "1512",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/decomposition/plot_ica_blind_source_separation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import smallsmilhandler
import sys
import json
from urllib.request import urlretrieve
class KaraokeLocal():
def __init__(self, fich):
parser = make_parser()
cHandler = smallsmilhandler.SmallSMILHandler()
parser.setContentHandler(cHandler)
try:
parser.parse(open(fich, 'r'))
except IOError:
sys.exit("Usage: python karaoke.py file.smil")
self.lista = cHandler.get_tags()
def __str__(self):
elemento = ''
for dicc in self.lista:
elemento += dicc['name']
for i in dicc:
if dicc[i] and i != 'name':
elemento += "\t" + i + '="' + dicc[i] + '"'
elemento += '\n'
return elemento
def to_json(self, fich, finalname):
filen = fich.split('.')[1]
if filen == 'smil':
archivo = open(finalname + '.json', 'w')
contenido = json.dumps(self.lista)
archivo.write(contenido)
def do_local(self):
for dicc in self.lista:
for etiqueta in dicc:
if dicc[etiqueta].find('http://') == 0:
url = dicc[etiqueta]
elemento = url[url.rfind('/') + 1:]
urlretrieve(url, elemento)
modifica = dicc[etiqueta].split('/')[-1]
dicc[etiqueta] = modifica
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: python karaoke.py file.smil")
fich = sys.argv[1]
karaoke = KaraokeLocal(fich)
print(karaoke.__str__())
karaoke.to_json(fich, 'karaoke')
karaoke.do_local()
karaoke.to_json(fich, 'local')
print(karaoke.__str__())
| {
"content_hash": "3e86bba4f6221b34ca5e326ac25f63b6",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 63,
"avg_line_length": 30.79310344827586,
"alnum_prop": 0.5319148936170213,
"repo_name": "Wendycarolina/ptavi-p3",
"id": "64aa833b1269657650c61cd55bc48e5315902faf",
"size": "1830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "karaoke.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4808"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
import settings
from django.contrib import admin
admin.autodiscover()
import views
urlpatterns = patterns('',
url(r'^$', views.index),
url(r'^main/$', views.main),
url(r'^admin/', admin.site.urls),
url(r'^login/$', views.loginUser),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^register/$', views.register),
url(r'^registerAPI/$', views.registerAPI),
url(r'^changeAPI/$', views.changeAPI),
url(r'^getWinkLogin/$', views.getWinkLogin),
url(r'^settings/$', views.settings),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "8244bb31362f132824bf9dd2d874aa15",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 34.523809523809526,
"alnum_prop": 0.663448275862069,
"repo_name": "odingrey/Django-Wink",
"id": "e6536f268aed2836f647d8d52c405897992e9eba",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wink/wink/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56666"
},
{
"name": "HTML",
"bytes": "7207"
},
{
"name": "JavaScript",
"bytes": "132747"
},
{
"name": "Python",
"bytes": "18374"
}
],
"symlink_target": ""
} |
from parlai.core.teachers import FbDeprecatedDialogTeacher
from .build import build
import copy
import os
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.path.join(opt['datapath'], 'SimpleQuestions', 'sq', dt + '.txt')
class DefaultTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, '')
super().__init__(opt, shared)
| {
"content_hash": "a2f02de7b3b0550f0bd5ecd2c8155dca",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 27.105263157894736,
"alnum_prop": 0.6485436893203883,
"repo_name": "facebookresearch/ParlAI",
"id": "bb9f7964e8445c67989d884e0c5c71761de731d2",
"size": "715",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/tasks/simplequestions/agents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modificado em')),
('first_name', models.CharField(max_length=100, verbose_name='nome')),
('last_name', models.CharField(max_length=100, verbose_name='sobre nome')),
('phone', models.CharField(max_length=11, verbose_name='telefone')),
('cell', models.CharField(blank=True, max_length=12, verbose_name='celular')),
('address', models.CharField(max_length=200, verbose_name='endereço')),
('city', models.CharField(max_length=150, verbose_name='cidade')),
('zip_code', models.CharField(max_length=30, verbose_name='codigo postal')),
('state', models.CharField(choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AM', 'Amazonas'), ('AP', 'Amapá'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Brasília'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MG', 'Minas Gerais'), ('MS', 'Mato Grosso do Sul'), ('MT', 'Mato Grosso'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('PR', 'Paraná'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('RS', 'Rio Grande do Sul'), ('SC', 'Santa Catarina'), ('SE', 'Sergipe'), ('SP', 'São Paulo'), ('TO', 'Tocantins')], max_length=100, verbose_name='estado')),
('cpf', models.CharField(max_length=11, unique=True, verbose_name='cpf')),
],
options={
'verbose_name': 'cliente',
'ordering': ['first_name', 'last_name'],
'verbose_name_plural': 'clientes',
},
),
]
| {
"content_hash": "9664e0ae330777555ec4dc0ea9e70302",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 663,
"avg_line_length": 60.47222222222222,
"alnum_prop": 0.5507579237482775,
"repo_name": "dhelbegor/client-crud",
"id": "0d2bc196969df68c03091fe28d8eae5e64132990",
"size": "2262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/core/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5116"
},
{
"name": "HTML",
"bytes": "8677"
},
{
"name": "JavaScript",
"bytes": "463"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "15016"
}
],
"symlink_target": ""
} |
def get_instance_objs (ec2, opts, filterDict={}):
if opts['instance_name']:
filterDict = {'tag:Name': opts['instance_name']}
elif opts['instance_id']:
filterDict = {'instance-id': opts['instance_id']}
try:
reservations = ec2.get_all_instances(filters=filterDict)
except:
print "Error getting all instance data in [%s], bad credentials ?" % __file__
return None
try:
instance = reservations[0].instances[0]
except IndexError:
return None
all_instances = []
for reservation in reservations:
for instance in reservation.instances:
all_instances.append(instance)
return all_instances
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def instance_to_name (ec2, instance_id):
ins_to_name_dict = {}
for tag in ec2.get_all_tags():
if (tag.res_type == 'instance') and (tag.name == 'Name'):
ins_to_name_dict[tag.res_id] = tag.value
if instance_id in ins_to_name_dict:
return ins_to_name_dict[instance_id]
else:
return 1
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def name_to_instance (ec2, name_to_find):
name_to_ins_dict = {}
for tag in ec2.get_all_tags():
if (tag.res_type == 'instance') and (tag.name == 'Name'):
name_to_ins_dict[tag.value] = tag.res_id
if name_to_find in name_to_ins_dict:
return name_to_ins_dict[name_to_find]
else:
return 1
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def region_instances (ec2):
instance_array = []
try:
reservations = ec2.get_all_instances()
except:
print "Error getting all instance data in [%s], bad credentials ?" % __file__
return None
for reservation in reservations:
for instance in reservation.instances:
instance_array.append(instance)
return instance_array
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def compile_instance_lists():
all_global_instances = []
regions = boto.ec2.regions()
for region in regions:
ec2 = region.connect()
instances = region_instances(ec2)
if instances:
all_global_instances.append(instances)
return all_global_instances
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def check_tags (ec2, opts):
print "Checking all expected tags are present on all instances..."
tag_dict = {}
for tag in ec2.get_all_tags():
if tag.res_type == 'instance':
if tag.res_id in tag_dict:
tag_dict[tag.res_id].append(tag.name)
else:
tag_dict[tag.res_id] = [tag.name]
for ins, tag_list in tag_dict.iteritems():
for tag_in_use in opts['tags_in_use']:
if tag_in_use not in tag_list:
print "Instance: \"%s\" does not have tag \"%s\"" % (instance_to_name(ec2, ins), tag_in_use)
print "Done"
| {
"content_hash": "40320a943dc4648b7033fd270ebdd606",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 100,
"avg_line_length": 30.322580645161292,
"alnum_prop": 0.5648936170212766,
"repo_name": "robertpearce/aws-manager",
"id": "ba099abf6373b038ea24a76154a80c8354528d32",
"size": "2820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awsmanager/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25953"
}
],
"symlink_target": ""
} |
'''@file test.py
this file will run the test script
usage: run test --expdir=<expdir> --recipe=<recipe> --computing=<computing>'''
import sys
import os
sys.path.append(os.getcwd())
import shutil
import subprocess
from six.moves import configparser
import tensorflow as tf
from test import test
def main(expdir, recipe, computing):
'''main function
Args:
- expdir: the training experiments directory
- recipe: the directory containing the recipe config files
- computing: the computing type, one off (condor, standard)'''
if expdir is None:
raise Exception('no expdir specified. Command usage: '
'nabu data --expdir=/path/to/recipe '
'--recipe=/path/to/recipe')
if not os.path.isdir(expdir):
raise Exception('cannot find expdir %s' % expdir)
if recipe is None:
raise Exception('no recipe specified. Command usage: '
'nabu data --expdir=/path/to/recipe '
'--recipe=/path/to/recipe')
if not os.path.isdir(recipe):
raise Exception('cannot find recipe %s' % recipe)
evaluator_cfg_file = os.path.join(recipe, 'test_evaluator.cfg')
database_cfg_file = os.path.join(recipe, 'database.conf')
#create the testing dir
if os.path.isdir(os.path.join(expdir, 'test')):
shutil.rmtree(os.path.join(expdir, 'test'))
os.makedirs(os.path.join(expdir, 'test'))
#copy the config files
shutil.copyfile(database_cfg_file,
os.path.join(expdir, 'test', 'database.conf'))
shutil.copyfile(evaluator_cfg_file,
os.path.join(expdir, 'test', 'test_evaluator.cfg'))
#create a link to the model
os.symlink(os.path.join(expdir, 'model'),
os.path.join(expdir, 'test', 'model'))
if computing == 'condor':
computing_cfg_file = 'config/computing/condor/non_distributed.cfg'
parsed_computing_cfg = configparser.ConfigParser()
parsed_computing_cfg.read(computing_cfg_file)
computing_cfg = dict(parsed_computing_cfg.items('computing'))
if not os.path.isdir(os.path.join(expdir, 'test', 'outputs')):
os.makedirs(os.path.join(expdir, 'test', 'outputs'))
subprocess.call(['condor_submit',
'expdir=%s' % os.path.join(expdir, 'test'),
'script=nabu/scripts/test.py',
'memory=%s' % computing_cfg['minmemory'],
'nabu/computing/condor/non_distributed.job'])
elif computing == 'standard':
test(expdir=os.path.join(expdir, 'test'))
else:
raise Exception('Unknown computing type %s' % computing)
if __name__ == '__main__':
tf.app.flags.DEFINE_string('expdir', None,
'the training experiments directory'
)
tf.app.flags.DEFINE_string('recipe', None,
'the directory containing the recipe config '
'files'
)
tf.app.flags.DEFINE_string('computing', 'standard',
'the computing type, one off (condor, standard)'
)
FLAGS = tf.app.flags.FLAGS
main(
expdir=FLAGS.expdir,
recipe=FLAGS.recipe,
computing=FLAGS.computing)
| {
"content_hash": "e7540b6aac3a14a41daca068f999627e",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 35.08247422680412,
"alnum_prop": 0.5780193946517779,
"repo_name": "vrenkens/nabu",
"id": "7e7fa5080bc99e4907009d9a40047491fb28ea71",
"size": "3403",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nabu/scripts/prepare_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "322709"
},
{
"name": "Shell",
"bytes": "5290"
}
],
"symlink_target": ""
} |
"""MySQL to GCS operator."""
import base64
import calendar
from datetime import date, datetime, timedelta
from decimal import Decimal
from typing import Dict
from MySQLdb.constants import FIELD_TYPE
from airflow.providers.google.cloud.transfers.sql_to_gcs import BaseSQLToGCSOperator
from airflow.providers.mysql.hooks.mysql import MySqlHook
class MySQLToGCSOperator(BaseSQLToGCSOperator):
"""Copy data from MySQL to Google Cloud Storage in JSON or CSV format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:MySQLToGCSOperator`
:param mysql_conn_id: Reference to :ref:`mysql connection id <howto/connection:mysql>`.
:type mysql_conn_id: str
:param ensure_utc: Ensure TIMESTAMP columns exported as UTC. If set to
`False`, TIMESTAMP columns will be exported using the MySQL server's
default timezone.
:type ensure_utc: bool
"""
ui_color = '#a0e08c'
type_map = {
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DATE: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIME: 'TIME',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.YEAR: 'INTEGER',
}
def __init__(self, *, mysql_conn_id='mysql_default', ensure_utc=False, **kwargs):
super().__init__(**kwargs)
self.mysql_conn_id = mysql_conn_id
self.ensure_utc = ensure_utc
def query(self):
"""Queries mysql and returns a cursor to the results."""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
if self.ensure_utc:
# Ensure TIMESTAMP results are in UTC
tz_query = "SET time_zone = '+00:00'"
self.log.info('Executing: %s', tz_query)
cursor.execute(tz_query)
self.log.info('Executing: %s', self.sql)
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field) -> Dict[str, str]:
field_type = self.type_map.get(field[1], "STRING")
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
field_mode = "NULLABLE" if field[6] or field_type == "TIMESTAMP" else "REQUIRED"
return {
'name': field[0],
'type': field_type,
'mode': field_mode,
}
def convert_type(self, value, schema_type: str):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
* Datetimes are converted to UTC seconds.
* Decimals are converted to floats.
* Dates are converted to ISO formatted string if given schema_type is
DATE, or UTC seconds otherwise.
* Binary type fields are converted to integer if given schema_type is
INTEGER, or encoded with base64 otherwise. Imported BYTES data must
be base64-encoded according to BigQuery documentation:
https://cloud.google.com/bigquery/data-types
:param value: MySQLdb column value
:type value: Any
:param schema_type: BigQuery data type
:type schema_type: str
"""
if value is None:
return value
if isinstance(value, datetime):
value = calendar.timegm(value.timetuple())
elif isinstance(value, timedelta):
value = value.total_seconds()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, date):
if schema_type == "DATE":
value = value.isoformat()
else:
value = calendar.timegm(value.timetuple())
elif isinstance(value, bytes):
if schema_type == "INTEGER":
value = int.from_bytes(value, "big")
else:
value = base64.standard_b64encode(value).decode('ascii')
return value
| {
"content_hash": "4795f194e541832dcb412107006a1e04",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 91,
"avg_line_length": 37.53389830508475,
"alnum_prop": 0.6161661774666968,
"repo_name": "sekikn/incubator-airflow",
"id": "6f5c4c20d6df11c61193d7d959dcd3e92a21a89e",
"size": "5216",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/transfers/mysql_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "reciprocity.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "ed2aa0db72f71354ac93e17fd6a62d1f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.7155172413793104,
"repo_name": "TeamReciprocity/reciprocity",
"id": "19faabb92c934947bc8b0d1b5f6afc6a02a2c9be",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reciprocity/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "848"
},
{
"name": "HTML",
"bytes": "12348"
},
{
"name": "JavaScript",
"bytes": "768"
},
{
"name": "Python",
"bytes": "63518"
}
],
"symlink_target": ""
} |
from ..parsed_result import ParsedResult
from ..parsed_result import ParsedComponent
class Refiner(object):
def refine(self, results, text, options):
return results
| {
"content_hash": "c4adbc4ad37ed3df88487805f845a4fd",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 45,
"avg_line_length": 23,
"alnum_prop": 0.7228260869565217,
"repo_name": "wanasit/chrono-python",
"id": "eeaa764d516ce72578e829e152dba65d7e7cdc78",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chrono/refiners/refiner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52458"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import authenticate, login, logout
from django.views.generic import View
from django.contrib.auth.mixins import LoginRequiredMixin
class LoginRegister(View):
template_name = 'user_profile/index.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('chat:index')
login_form = AuthenticationForm()
register_form = UserCreationForm()
return render(request, self.template_name,
{'login_form': login_form, 'register_form': register_form})
class Login(View):
template_name = 'user_profile/index.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('chat:index')
login_form = AuthenticationForm()
return render(request, self.template_name,
{'login_form': login_form})
def post(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('chat:index')
login_form = AuthenticationForm(data=request.POST)
if login_form.is_valid():
user = authenticate(username=login_form.cleaned_data['username'],
password=login_form.cleaned_data['password'])
if user.is_active:
login(request, user)
return redirect('chat:index')
return render(request, self.template_name,
{'login_form': login_form})
class Register(View):
template_name = 'user_profile/index.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('chat:index')
register_form = UserCreationForm()
return render(request, self.template_name,
{'register_form': register_form})
def post(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect('chat:index')
register_form = UserCreationForm(data=request.POST)
if register_form.is_valid():
new_user = register_form
register_form.save()
user = authenticate(username=new_user.cleaned_data['username'],
password=new_user.cleaned_data['password1'])
if user.is_active:
login(request, user)
return redirect('chat:index')
return render(request, self.template_name,
{'register_form': register_form})
class Logout(LoginRequiredMixin, View):
login_url = 'user-profile:login-register'
def get(self, request, *args, **kwargs):
logout(request)
return redirect('user-profile:login-register')
| {
"content_hash": "7254e6e97572f132526854ad91be2a77",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 81,
"avg_line_length": 33.241379310344826,
"alnum_prop": 0.6123789764868603,
"repo_name": "the-kid89/LearningWithWebsockets",
"id": "dce416a272e61e2d899cbbc005b1e642ea04f607",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_profile/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "7427"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "14850"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from core.models import Game, Company
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('game')
parser.add_argument('name')
parser.add_argument('--cash', type=int, default=0)
parser.add_argument('--shares', dest='share_count', type=int,
default=10)
parser.add_argument('--ipo', dest='ipo_shares', type=int, default=10)
parser.add_argument('--bank', dest='bank_shares', type=int, default=0)
parser.add_argument('--text', dest='text_color', default='black')
parser.add_argument('--background', dest='background_color',
default='white')
def handle(self, *args, **options):
try:
game = Game.objects.get(pk=options['game'])
except Game.DoesNotExist:
raise CommandError('This is not a valid UUID')
company = Company.objects.create(game=game, name=options['name'],
cash=options['cash'], share_count=options['share_count'],
ipo_shares=options['ipo_shares'],
bank_shares=options['bank_shares'],
text_color=options['text_color'],
background_color=options['background_color'])
return str(company.pk)
| {
"content_hash": "4bb6c72768a6a4246c3329f984dbe5e1",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 46.42857142857143,
"alnum_prop": 0.6230769230769231,
"repo_name": "XeryusTC/18xx-accountant",
"id": "83655d9e2e13e7cae25b913e2d45caa084e70bfa",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accountant/core/management/commands/createcompany.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56853"
},
{
"name": "HTML",
"bytes": "38371"
},
{
"name": "JavaScript",
"bytes": "1864"
},
{
"name": "Python",
"bytes": "595122"
},
{
"name": "Shell",
"bytes": "2580"
},
{
"name": "TypeScript",
"bytes": "163804"
}
],
"symlink_target": ""
} |
import argparse
import brightnessmonitorclient
from brightnessmonitorclient import __version__
from brightnessmonitorclient.raspberry.main import start
def get_parser():
"""
Creates a new argument parser.
"""
parser = argparse.ArgumentParser('BrightnessMonitorClient')
version = '%(prog)s ' + __version__
parser.add_argument('--version', '-v', action='version', version=version)
return parser
def main(args=None):
"""
Main entry point for your project.
Args:
args : list
A of arguments as if they were input in the command line. Leave it
None to use sys.argv.
"""
parser = get_parser()
args = parser.parse_args(args)
# start raspberry.main:start
start()
if __name__ == '__main__':
main() | {
"content_hash": "1076b1155f061efb3000b5f9ff3b444c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 23.264705882352942,
"alnum_prop": 0.6422250316055625,
"repo_name": "BrightnessMonitor/BrightnessMonitorClient",
"id": "3a025f3f68e7c4035ed0b1d1ce0375b40bd9ff53",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/brightnessmonitorclient/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25477"
},
{
"name": "Shell",
"bytes": "2819"
}
],
"symlink_target": ""
} |
import os
import time
import pytest
from mapproxy.client.http import HTTPClient, HTTPClientError, supports_ssl_default_context
from mapproxy.client.tile import TileClient, TileURLTemplate
from mapproxy.client.wms import WMSClient, WMSInfoClient
from mapproxy.grid import tile_grid
from mapproxy.layer import MapQuery, InfoQuery
from mapproxy.request.wms import (
WMS111MapRequest,
WMS100MapRequest,
WMS130MapRequest,
WMS111FeatureInfoRequest,
)
from mapproxy.source import SourceError
from mapproxy.srs import SRS, SupportedSRS
from mapproxy.test.helper import assert_re, TempFile
from mapproxy.test.http import mock_httpd, query_eq, assert_query_eq, wms_query_eq
from mapproxy.test.unit.test_cache import MockHTTPClient
TESTSERVER_ADDRESS = ('127.0.0.1', 56413)
TESTSERVER_URL = 'http://%s:%s' % TESTSERVER_ADDRESS
class TestHTTPClient(object):
def setup(self):
self.client = HTTPClient()
def test_post(self):
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/service?foo=bar', 'method': 'POST'},
{'status': '200', 'body': b''})]):
self.client.open(TESTSERVER_URL + '/service', data=b"foo=bar")
def test_internal_error_response(self):
try:
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/'},
{'status': '500', 'body': b''})]):
self.client.open(TESTSERVER_URL + '/')
except HTTPClientError as e:
assert_re(e.args[0], r'HTTP Error ".*": 500')
else:
assert False, 'expected HTTPClientError'
def test_invalid_url_type(self):
try:
self.client.open('htp://example.org')
except HTTPClientError as e:
assert_re(e.args[0], r'No response .* "htp://example.*": unknown url type')
else:
assert False, 'expected HTTPClientError'
def test_invalid_url(self):
try:
self.client.open('this is not a url')
except HTTPClientError as e:
assert_re(e.args[0], r'URL not correct "this is not.*": unknown url type')
else:
assert False, 'expected HTTPClientError'
def test_unknown_host(self):
try:
self.client.open('http://thishostshouldnotexist000136really42.org')
except HTTPClientError as e:
assert_re(e.args[0], r'No response .* "http://thishost.*": .*')
else:
assert False, 'expected HTTPClientError'
def test_no_connect(self):
try:
self.client.open('http://localhost:53871')
except HTTPClientError as e:
assert_re(e.args[0], r'No response .* "http://localhost.*": Connection refused')
else:
assert False, 'expected HTTPClientError'
def test_internal_error_hide_error_details(self):
try:
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/'},
{'status': '500', 'body': b''})]):
HTTPClient(hide_error_details=True).open(TESTSERVER_URL + '/')
except HTTPClientError as e:
assert_re(e.args[0], r'HTTP Error \(see logs for URL and reason\).')
else:
assert False, 'expected HTTPClientError'
@pytest.mark.online
def test_https_untrusted_root(self):
if not supports_ssl_default_context:
raise pytest.skip("old python versions require ssl_ca_certs")
self.client = HTTPClient('https://untrusted-root.badssl.com/')
try:
self.client.open('https://untrusted-root.badssl.com/')
except HTTPClientError as e:
assert_re(e.args[0], r'Could not verify connection to URL')
@pytest.mark.online
def test_https_insecure(self):
self.client = HTTPClient(
'https://untrusted-root.badssl.com/', insecure=True)
self.client.open('https://untrusted-root.badssl.com/')
@pytest.mark.online
def test_https_valid_ca_cert_file(self):
# verify with fixed ca_certs file
cert_file = '/etc/ssl/certs/ca-certificates.crt'
if os.path.exists(cert_file):
self.client = HTTPClient('https://www.google.com/', ssl_ca_certs=cert_file)
self.client.open('https://www.google.com/')
else:
with TempFile() as tmp:
with open(tmp, 'wb') as f:
f.write(GOOGLE_ROOT_CERT)
self.client = HTTPClient('https://www.google.com/', ssl_ca_certs=tmp)
self.client.open('https://www.google.com/')
@pytest.mark.online
def test_https_valid_default_cert(self):
# modern python should verify by default
if not supports_ssl_default_context:
raise pytest.skip("old python versions require ssl_ca_certs")
self.client = HTTPClient('https://www.google.com/')
self.client.open('https://www.google.com/')
@pytest.mark.online
def test_https_invalid_cert(self):
# load 'wrong' root cert
with TempFile() as tmp:
with open(tmp, 'wb') as f:
f.write(GOOGLE_ROOT_CERT)
self.client = HTTPClient(
'https://untrusted-root.badssl.com/', ssl_ca_certs=tmp)
try:
self.client.open('https://untrusted-root.badssl.com/')
except HTTPClientError as e:
assert_re(e.args[0], r'Could not verify connection to URL')
def test_timeouts(self):
test_req = ({'path': '/', 'req_assert_function': lambda x: time.sleep(0.9) or True},
{'body': b'nothing'})
import mapproxy.client.http
client1 = HTTPClient(timeout=0.1)
client2 = HTTPClient(timeout=0.5)
with mock_httpd(TESTSERVER_ADDRESS, [test_req]):
try:
start = time.time()
client1.open(TESTSERVER_URL + '/')
except HTTPClientError as ex:
assert 'timed out' in ex.args[0]
else:
assert False, 'HTTPClientError expected'
duration1 = time.time() - start
with mock_httpd(TESTSERVER_ADDRESS, [test_req]):
try:
start = time.time()
client2.open(TESTSERVER_URL + '/')
except HTTPClientError as ex:
assert 'timed out' in ex.args[0]
else:
assert False, 'HTTPClientError expected'
duration2 = time.time() - start
# check individual timeouts
assert 0.1 <= duration1 < 0.5, duration1
assert 0.5 <= duration2 < 0.9, duration2
def test_manage_cookies_off(self):
"""
Test the behavior when manage_cookies is off (the default). Cookies shouldn't be sent
"""
self.client = HTTPClient()
def assert_no_cookie(req_handler):
return 'Cookie' not in req_handler.headers
test_requests = [
(
{'path': '/', 'req_assert_function': assert_no_cookie},
{'body': b'nothing', 'headers': {'Set-Cookie': "testcookie=42"}}
),
(
{'path': '/', 'req_assert_function': assert_no_cookie},
{'body': b'nothing'}
)
]
with mock_httpd(TESTSERVER_ADDRESS, test_requests):
self.client.open(TESTSERVER_URL + '/')
self.client.open(TESTSERVER_URL + '/')
def test_manage_cookies_on(self):
"""
Test behavior of manage_cookies=True. Once the remote server sends a cookie back, it should
be included in future requests
"""
self.client = HTTPClient(manage_cookies=True)
def assert_no_cookie(req_handler):
return 'Cookie' not in req_handler.headers
def assert_cookie(req_handler):
assert 'Cookie' in req_handler.headers
cookie_name, cookie_val = req_handler.headers['Cookie'].split(';')[0].split('=')
assert cookie_name == 'testcookie'
assert cookie_val == '42'
return True
test_requests = [
(
{'path': '/', 'req_assert_function': assert_no_cookie},
{'body': b'nothing', 'headers': {'Set-Cookie': "testcookie=42"}}
),
(
{'path': '/', 'req_assert_function': assert_cookie},
{'body': b'nothing'}
)
]
with mock_httpd(TESTSERVER_ADDRESS, test_requests):
self.client.open(TESTSERVER_URL + '/')
self.client.open(TESTSERVER_URL + '/')
# root certificates for google.com, if no ca-certificates.cert
# file is found
GOOGLE_ROOT_CERT = b"""
-----BEGIN CERTIFICATE-----
MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV
UzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy
dGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1
MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx
dWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B
AQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f
BeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A
cJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC
AwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ
MA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm
aWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw
ODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj
IBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF
MAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA
A4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y
7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh
1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4
-----END CERTIFICATE-----
"""
class TestTileClient(object):
def test_tc_path(self):
template = TileURLTemplate(TESTSERVER_URL + '/%(tc_path)s.png')
client = TileClient(template)
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/09/000/000/005/000/000/013.png'},
{'body': b'tile',
'headers': {'content-type': 'image/png'}})]):
resp = client.get_tile((5, 13, 9)).source.read()
assert resp == b'tile'
def test_quadkey(self):
template = TileURLTemplate(TESTSERVER_URL + '/key=%(quadkey)s&format=%(format)s')
client = TileClient(template)
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/key=000002303&format=png'},
{'body': b'tile',
'headers': {'content-type': 'image/png'}})]):
resp = client.get_tile((5, 13, 9)).source.read()
assert resp == b'tile'
def test_xyz(self):
template = TileURLTemplate(TESTSERVER_URL + '/x=%(x)s&y=%(y)s&z=%(z)s&format=%(format)s')
client = TileClient(template)
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/x=5&y=13&z=9&format=png'},
{'body': b'tile',
'headers': {'content-type': 'image/png'}})]):
resp = client.get_tile((5, 13, 9)).source.read()
assert resp == b'tile'
def test_arcgiscache_path(self):
template = TileURLTemplate(TESTSERVER_URL + '/%(arcgiscache_path)s.png')
client = TileClient(template)
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/L09/R0000000d/C00000005.png'},
{'body': b'tile',
'headers': {'content-type': 'image/png'}})]):
resp = client.get_tile((5, 13, 9)).source.read()
assert resp == b'tile'
def test_bbox(self):
grid = tile_grid(4326)
template = TileURLTemplate(TESTSERVER_URL + '/service?BBOX=%(bbox)s')
client = TileClient(template, grid=grid)
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/service?BBOX=-180.00000000,0.00000000,-90.00000000,90.00000000'},
{'body': b'tile',
'headers': {'content-type': 'image/png'}})]):
resp = client.get_tile((0, 1, 2)).source.read()
assert resp == b'tile'
class TestWMSClient(object):
def test_no_image(self, caplog):
try:
with mock_httpd(TESTSERVER_ADDRESS, [({'path': '/service?map=foo&layers=foo&transparent=true&bbox=-200000,-200000,200000,200000&width=512&height=512&srs=EPSG%3A900913&format=image%2Fpng&request=GetMap&version=1.1.1&service=WMS&styles='},
{'status': '200', 'body': b'x' * 1000,
'headers': {'content-type': 'application/foo'},
})]):
req = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo',
param={'layers':'foo', 'transparent': 'true'})
query = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png')
wms = WMSClient(req).retrieve(query, 'png')
except SourceError:
assert len(caplog.record_tuples) == 1
assert ("'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' [output truncated]"
in caplog.record_tuples[0][2])
else:
assert False, 'expected no image returned error'
class TestCombinedWMSClient(object):
def setup(self):
self.http = MockHTTPClient()
def test_combine(self):
req1 = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo',
param={'layers':'foo', 'transparent': 'true'})
wms1 = WMSClient(req1, http_client=self.http)
req2 = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo',
param={'layers':'bar', 'transparent': 'true'})
wms2 = WMSClient(req2, http_client=self.http)
req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png')
combined = wms1.combined_client(wms2, req)
assert combined.request_template.params.layers == ['foo', 'bar']
assert combined.request_template.url == TESTSERVER_URL + '/service?map=foo'
def test_combine_different_url(self):
req1 = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=bar',
param={'layers':'foo', 'transparent': 'true'})
wms1 = WMSClient(req1, http_client=self.http)
req2 = WMS111MapRequest(url=TESTSERVER_URL + '/service?map=foo',
param={'layers':'bar', 'transparent': 'true'})
wms2 = WMSClient(req2, http_client=self.http)
req = MapQuery((-200000, -200000, 200000, 200000), (512, 512), SRS(900913), 'png')
combined = wms1.combined_client(wms2, req)
assert combined is None
class TestWMSInfoClient(object):
def test_transform_fi_request_supported_srs(self):
req = WMS111FeatureInfoRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo'})
http = MockHTTPClient()
wms = WMSInfoClient(req, http_client=http, supported_srs=SupportedSRS([SRS(25832)]))
fi_req = InfoQuery((8, 50, 9, 51), (512, 512),
SRS(4326), (128, 64), 'text/plain')
wms.get_info(fi_req)
assert wms_query_eq(http.requested[0],
TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetFeatureInfo&SRS=EPSG%3A25832&info_format=text/plain'
'&query_layers=foo'
'&VERSION=1.1.1&WIDTH=512&HEIGHT=797&STYLES=&x=135&y=101'
'&BBOX=428333.552496,5538630.70275,500000.0,5650300.78652'), http.requested[0]
def test_transform_fi_request(self):
req = WMS111FeatureInfoRequest(url=TESTSERVER_URL + '/service?map=foo', param={'layers':'foo', 'srs': 'EPSG:25832'})
http = MockHTTPClient()
wms = WMSInfoClient(req, http_client=http)
fi_req = InfoQuery((8, 50, 9, 51), (512, 512),
SRS(4326), (128, 64), 'text/plain')
wms.get_info(fi_req)
assert wms_query_eq(http.requested[0],
TESTSERVER_URL+'/service?map=foo&LAYERS=foo&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetFeatureInfo&SRS=EPSG%3A25832&info_format=text/plain'
'&query_layers=foo'
'&VERSION=1.1.1&WIDTH=512&HEIGHT=797&STYLES=&x=135&y=101'
'&BBOX=428333.552496,5538630.70275,500000.0,5650300.78652'), http.requested[0]
class TestWMSMapRequest100(object):
def setup(self):
self.r = WMS100MapRequest(param=dict(layers='foo', version='1.1.1', request='GetMap'))
self.r.params = self.r.adapt_params_to_version()
def test_version(self):
assert self.r.params['WMTVER'] == '1.0.0'
assert 'VERSION' not in self.r.params
def test_service(self):
assert 'SERVICE' not in self.r.params
def test_request(self):
assert self.r.params['request'] == 'map'
def test_str(self):
assert_query_eq(str(self.r.params), 'layers=foo&styles=&request=map&wmtver=1.0.0')
class TestWMSMapRequest130(object):
def setup(self):
self.r = WMS130MapRequest(param=dict(layers='foo', WMTVER='1.0.0'))
self.r.params = self.r.adapt_params_to_version()
def test_version(self):
assert self.r.params['version'] == '1.3.0'
assert 'WMTVER' not in self.r.params
def test_service(self):
assert self.r.params['service'] == 'WMS'
def test_request(self):
assert self.r.params['request'] == 'GetMap'
def test_str(self):
query_eq(str(self.r.params), 'layers=foo&styles=&service=WMS&request=GetMap&version=1.3.0')
class TestWMSMapRequest111(object):
def setup(self):
self.r = WMS111MapRequest(param=dict(layers='foo', WMTVER='1.0.0'))
self.r.params = self.r.adapt_params_to_version()
def test_version(self):
assert self.r.params['version'] == '1.1.1'
assert 'WMTVER' not in self.r.params
| {
"content_hash": "8e7c38cd64a7191f429b6d4d00024a47",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 249,
"avg_line_length": 46.125,
"alnum_prop": 0.6311510483525888,
"repo_name": "camptocamp/mapproxy",
"id": "8a35be718abac9ef4e2245c764e99b518183be5f",
"size": "21683",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mapproxy/test/unit/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1326087"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
} |
import unittest
from hashlib import sha1
from magicfolder.server import calculate_merge, FileItem
def make_file_item(file_path, file_data):
return FileItem(file_path, sha1(file_data).hexdigest(), len(file_data), 0)
f1 = make_file_item('file_1', 'some data')
f2 = make_file_item('file_2', 'more data')
f3 = make_file_item('file_3', 'other data')
f2a = make_file_item('file_2', 'more data, changed')
f2b = make_file_item('file_2', 'more data, modified')
class MergeTest(unittest.TestCase):
def test_blank(self):
new_tree, conflict = calculate_merge(set(), set(), set())
self.assertEqual(new_tree, {})
self.assertEqual(conflict, set())
def test_only_client(self):
new_tree, conflict = calculate_merge(set(), set([f1, f2]), set())
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2})
self.assertEqual(conflict, set())
def test_only_server(self):
new_tree, conflict = calculate_merge(set(), set(), set([f1, f2]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2})
self.assertEqual(conflict, set())
def test_no_changes(self):
new_tree, conflict = calculate_merge(
set([f1]), set([f1]), set([f1]))
self.assertEqual(new_tree, {'file_1': f1})
self.assertEqual(conflict, set())
def test_add_client(self):
new_tree, conflict = calculate_merge(
set([f1]), set([f1, f2]), set([f1]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2})
self.assertEqual(conflict, set())
def test_add_server(self):
new_tree, conflict = calculate_merge(
set([f1]), set([f1]), set([f1, f2]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2})
self.assertEqual(conflict, set())
def test_add_both(self):
new_tree, conflict = calculate_merge(
set([f1]), set([f1, f2]), set([f1, f3]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2, 'file_3': f3})
self.assertEqual(conflict, set())
def test_add_conflict(self):
new_tree, conflict = calculate_merge(
set([f1]), set([f1, f2]), set([f1, f2a]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2})
self.assertEqual(conflict, set([f2a]))
def test_add_conflict_but_identical(self):
new_tree, conflict = calculate_merge(
set([f1]), set([f1, f2a]), set([f1, f2a]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set([f2a]))
def test_remove_client(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1]), set([f1, f2]))
self.assertEqual(new_tree, {'file_1': f1})
self.assertEqual(conflict, set())
def test_remove_server(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1, f2]), set([f1]))
self.assertEqual(new_tree, {'file_1': f1})
self.assertEqual(conflict, set())
def test_remove_both(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1]), set([f1]))
self.assertEqual(new_tree, {'file_1': f1})
self.assertEqual(conflict, set())
def test_remove_client_and_change_server(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1]), set([f1, f2a]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set())
def test_remove_server_and_change_client(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1, f2a]), set([f1]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set())
def test_change_client(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1, f2a]), set([f1, f2]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set())
def test_change_server(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1, f2]), set([f1, f2a]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set())
def test_change_both(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1, f2a]), set([f1, f2b]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set([f2b]))
def test_change_both_but_identical(self):
new_tree, conflict = calculate_merge(
set([f1, f2]), set([f1, f2a]), set([f1, f2a]))
self.assertEqual(new_tree, {'file_1': f1, 'file_2': f2a})
self.assertEqual(conflict, set([f2a]))
| {
"content_hash": "24bb3348bb5dd80304ea564ae3684d72",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 78,
"avg_line_length": 40.55833333333333,
"alnum_prop": 0.5627696733100472,
"repo_name": "mgax/MagicFolder",
"id": "fc0a8939b4ebea8b933163344c7a789e315af44a",
"size": "4867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magicfolder/tests/test_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61086"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.contrib import admin
from symposion.schedule.models import Schedule, Day, Room, SlotKind, Slot, SlotRoom, Presentation, Session, SessionRole, Track
class DayInline(admin.StackedInline):
model = Day
extra = 2
class SlotKindInline(admin.StackedInline):
model = SlotKind
class ScheduleAdmin(admin.ModelAdmin):
model = Schedule
inlines = [DayInline, SlotKindInline, ]
class SlotRoomInline(admin.TabularInline):
model = SlotRoom
extra = 1
class SlotAdmin(admin.ModelAdmin):
list_filter = ("day", "kind")
list_display = ("day", "start", "end", "kind", "content_override")
inlines = [SlotRoomInline]
class RoomAdmin(admin.ModelAdmin):
list_display = ["name", "order", "schedule"]
list_filter = ["schedule"]
inlines = [SlotRoomInline]
class PresentationAdmin(admin.ModelAdmin):
model = Presentation
list_filter = ("section", "cancelled", "slot")
admin.site.register(Day)
admin.site.register(
SlotKind,
list_display=["label", "schedule"],
)
admin.site.register(
SlotRoom,
list_display=["slot", "room"]
)
admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(Room, RoomAdmin)
admin.site.register(Slot, SlotAdmin)
admin.site.register(Session)
admin.site.register(SessionRole)
admin.site.register(Presentation, PresentationAdmin)
admin.site.register(Track)
| {
"content_hash": "8e00a19979ca453514babb86c5fc7606",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 126,
"avg_line_length": 24.155172413793103,
"alnum_prop": 0.7201998572448252,
"repo_name": "pyconau2017/symposion",
"id": "7cf59ecbd5beaf068758a3a5e0afa5b5b80ec2e8",
"size": "1401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symposion/schedule/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "95579"
},
{
"name": "Python",
"bytes": "270259"
}
],
"symlink_target": ""
} |
"""
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import mock
from testtools import matchers
import webob
import webob.exc
from cinder.api import common
from cinder import test
NS = "{http://docs.openstack.org/compute/api/v1.1}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
class LimiterTest(test.TestCase):
"""Unit tests for the `cinder.api.common.limited` method.
This method takes in a list of items and, depending on the 'offset'
and 'limit' GET params, returns a subset or complete set of the given
items.
"""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
self.medium = range(1000)
self.large = range(10000)
def test_limiter_offset_zero(self):
"""Test offset key works with 0."""
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
"""Test offset key works with a medium sized number."""
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
self.assertEqual(common.limited(self.medium, req), self.medium[10:])
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
"""Test offset key works with a number over 1000 (max_limit)."""
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
self.assertEqual(common.limited(self.medium, req), [])
self.assertEqual(
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
"""Test offset key works with a blank offset."""
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
"""Test offset key works with a BAD offset."""
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
"""Test request with no offset or limit."""
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
"""Test limit of zero."""
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_bad(self):
"""Test with a bad limit."""
req = webob.Request.blank(u'/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_limit_medium(self):
"""Test limit of 10."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium[:10])
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
"""Test limit of 3000."""
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
self.assertEqual(common.limited(self.medium, req), self.medium)
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
"""Test request with both limit and offset."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3&limit=1500')
self.assertEqual(common.limited(items, req), items[3:1003])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
"""Test a max_limit other than 1000."""
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
common.limited(items, req, max_limit=2000), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
"""Test a negative limit."""
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
"""Test a negative offset."""
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
class PaginationParamsTest(test.TestCase):
"""Unit tests for `cinder.api.common.get_pagination_params` method.
This method takes in a request object and returns 'marker' and 'limit'
GET params.
"""
def test_nonnumerical_limit(self):
"""Test nonnumerical limit param."""
req = webob.Request.blank('/?limit=hello')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_no_params(self):
"""Test no params."""
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
"""Test valid marker param."""
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
"""Test valid limit param."""
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
"""Test invalid limit param."""
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
"""Test valid limit and marker parameters."""
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
{'marker': marker, 'limit': 20})
class SortParamUtilsTest(test.TestCase):
def test_get_sort_params_defaults(self):
'''Verifies the default sort key and direction.'''
sort_keys, sort_dirs = common.get_sort_params({})
self.assertEqual(['created_at'], sort_keys)
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_override_defaults(self):
'''Verifies that the defaults can be overriden.'''
sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
default_dir='dir1')
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_sort_param(self):
'''Verifies a single sort key and direction.'''
params = {'sort': 'key1:dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_value_old_params(self):
'''Verifies a single sort key and direction.'''
params = {'sort_key': 'key1', 'sort_dir': 'dir1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
self.assertEqual(['dir1'], sort_dirs)
def test_get_sort_params_single_with_default_sort_param(self):
'''Verifies a single sort value with a default direction.'''
params = {'sort': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_single_with_default_old_params(self):
'''Verifies a single sort value with a default direction.'''
params = {'sort_key': 'key1'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1'], sort_keys)
# Direction should be defaulted
self.assertEqual(['desc'], sort_dirs)
def test_get_sort_params_multiple_values(self):
'''Verifies multiple sort parameter values.'''
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_not_all_dirs(self):
'''Verifies multiple sort keys without all directions.'''
params = {'sort': 'key1:dir1,key2,key3:dir3'}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
# Second key is missing the direction, should be defaulted
self.assertEqual(['dir1', 'desc', 'dir3'], sort_dirs)
def test_get_sort_params_multiple_override_default_dir(self):
'''Verifies multiple sort keys and overriding default direction.'''
params = {'sort': 'key1:dir1,key2,key3'}
sort_keys, sort_dirs = common.get_sort_params(params,
default_dir='foo')
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'foo', 'foo'], sort_dirs)
def test_get_sort_params_params_modified(self):
'''Verifies that the input sort parameter are modified.'''
params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'}
common.get_sort_params(params)
self.assertEqual({}, params)
params = {'sort_dir': 'key1', 'sort_dir': 'dir1'}
common.get_sort_params(params)
self.assertEqual({}, params)
def test_get_sort_params_random_spaces(self):
'''Verifies that leading and trailing spaces are removed.'''
params = {'sort': ' key1 : dir1,key2: dir2 , key3 '}
sort_keys, sort_dirs = common.get_sort_params(params)
self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
self.assertEqual(['dir1', 'dir2', 'desc'], sort_dirs)
def test_get_params_mix_sort_and_old_params(self):
'''An exception is raised if both types of sorting params are given.'''
for params in ({'sort': 'k1', 'sort_key': 'k1'},
{'sort': 'k1', 'sort_dir': 'd1'},
{'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}):
self.assertRaises(webob.exc.HTTPBadRequest,
common.get_sort_params,
params)
class MiscFunctionsTest(test.TestCase):
def test_remove_major_version_from_href(self):
fixture = 'http://www.testsite.com/v1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = 'http://www.testsite.com/images'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_2(self):
fixture = 'http://www.testsite.com/v1.1/'
expected = 'http://www.testsite.com/'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_3(self):
fixture = 'http://www.testsite.com/v10.10'
expected = 'http://www.testsite.com'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_4(self):
fixture = 'http://www.testsite.com/v1.1/images/v10.5'
expected = 'http://www.testsite.com/images/v10.5'
actual = common.remove_version_from_href(fixture)
self.assertEqual(actual, expected)
def test_remove_version_from_href_bad_request(self):
fixture = 'http://www.testsite.com/1.1/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_2(self):
fixture = 'http://www.testsite.com/v/images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
def test_remove_version_from_href_bad_request_3(self):
fixture = 'http://www.testsite.com/v1.1images'
self.assertRaises(ValueError,
common.remove_version_from_href,
fixture)
class TestCollectionLinks(test.TestCase):
"""Tests the _get_collection_links method."""
def _validate_next_link(self, href_link_mock, item_count,
osapi_max_limit, limit, should_link_exist):
req = mock.MagicMock()
href_link_mock.return_value = [{"rel": "next",
"href": "fake_link"}]
self.flags(osapi_max_limit=osapi_max_limit)
if limit is None:
params = mock.PropertyMock(return_value=dict())
limited_list_size = min(item_count, osapi_max_limit)
else:
params = mock.PropertyMock(return_value=dict(limit=limit))
limited_list_size = min(item_count, osapi_max_limit,
limit)
limited_list = [{"uuid": str(i)} for i in range(limited_list_size)]
type(req).params = params
builder = common.ViewBuilder()
results = builder._get_collection_links(req, limited_list,
mock.sentinel.coll_key,
item_count, "uuid")
if should_link_exist:
href_link_mock.assert_called_once_with(limited_list, "uuid",
req,
mock.sentinel.coll_key)
self.assertThat(results, matchers.HasLength(1))
else:
self.assertFalse(href_link_mock.called)
self.assertThat(results, matchers.HasLength(0))
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_no_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = None
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_greater_than_limit(self,
href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = 4
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_equals_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = 5
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_osapi_max_less_than_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 5
limit = 6
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_osapi_max_no_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = None
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_less_than_items_less_than_osapi_max(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 4
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_equals_items_less_than_osapi_max(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 5
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_limit_less_than_osapi_max(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 6
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_osapi_max_equals_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 7
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_less_than_osapi_max_less_than_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 7
limit = 8
should_link_exist = False
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_greater_than_osapi_max_no_limit(self, href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = None
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_less_than_items_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 2
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_greater_than_osapi_max_equals_limit(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 3
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_greater_than_limit_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 4
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_items_equals_limit_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 5
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
@mock.patch('cinder.api.common.ViewBuilder._generate_next_link')
def test_limit_greater_than_items_greater_than_osapi_max(self,
href_link_mock):
item_count = 5
osapi_max_limit = 3
limit = 6
should_link_exist = True
self._validate_next_link(href_link_mock, item_count,
osapi_max_limit,
limit, should_link_exist)
| {
"content_hash": "f841b7a41d31d4607ddec277481bdeec",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 79,
"avg_line_length": 43.254681647940075,
"alnum_prop": 0.5831673737985973,
"repo_name": "yanheven/cinder",
"id": "75ef32894a800a3279596249d6cd58e1eef5fd47",
"size": "23734",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/api/test_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10655225"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
from typing import Any
from typing import Generic
from typing import TypeVar
import attr
from _pytest.compat import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Type # noqa: F401 (used in type string)
class PytestWarning(UserWarning):
"""Base class for all warnings emitted by pytest."""
__module__ = "pytest"
class PytestAssertRewriteWarning(PytestWarning):
"""Warning emitted by the pytest assert rewrite module."""
__module__ = "pytest"
class PytestCacheWarning(PytestWarning):
"""Warning emitted by the cache plugin in various situations."""
__module__ = "pytest"
class PytestConfigWarning(PytestWarning):
"""Warning emitted for configuration issues."""
__module__ = "pytest"
class PytestCollectionWarning(PytestWarning):
"""Warning emitted when pytest is not able to collect a file or symbol in a module."""
__module__ = "pytest"
class PytestDeprecationWarning(PytestWarning, DeprecationWarning):
"""Warning class for features that will be removed in a future version."""
__module__ = "pytest"
class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
"""Warning category used to denote experiments in pytest.
Use sparingly as the API might change or even be removed completely in a
future version.
"""
__module__ = "pytest"
@classmethod
def simple(cls, apiname: str) -> "PytestExperimentalApiWarning":
return cls(
"{apiname} is an experimental api that may change over time".format(
apiname=apiname
)
)
class PytestUnhandledCoroutineWarning(PytestWarning):
"""Warning emitted for an unhandled coroutine.
A coroutine was encountered when collecting test functions, but was not
handled by any async-aware plugin.
Coroutine test functions are not natively supported.
"""
__module__ = "pytest"
class PytestUnknownMarkWarning(PytestWarning):
"""Warning emitted on use of unknown markers.
See https://docs.pytest.org/en/latest/mark.html for details.
"""
__module__ = "pytest"
_W = TypeVar("_W", bound=PytestWarning)
@attr.s
class UnformattedWarning(Generic[_W]):
"""A warning meant to be formatted during runtime.
This is used to hold warnings that need to format their message at runtime,
as opposed to a direct message.
"""
category = attr.ib(type="Type[_W]")
template = attr.ib(type=str)
def format(self, **kwargs: Any) -> _W:
"""Returns an instance of the warning category, formatted with given kwargs"""
return self.category(self.template.format(**kwargs))
PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example")
| {
"content_hash": "a380902b3f795ee26f84ff806a7b1f90",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 90,
"avg_line_length": 25.69811320754717,
"alnum_prop": 0.6956681350954479,
"repo_name": "alfredodeza/pytest",
"id": "ee437cc9746ad6002b5095fd9c8eb45f784af9c3",
"size": "2724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/_pytest/warning_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "837013"
}
],
"symlink_target": ""
} |
from ..excel_comparison_test import ExcelComparisonTest
from datetime import datetime
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('default_date_format01.xlsx')
def test_create_file_user_date_format(self):
"""Test write_datetime with explicit date format."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
format1 = workbook.add_format({'num_format': 'yyyy\\-mm\\-dd'})
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1, format1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format(self):
"""Test write_datetime with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_datetime(0, 0, date1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write(self):
"""Test write_datetime with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write('A1', date1)
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write_row(self):
"""Test write_row with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_row('A1', [date1])
workbook.close()
self.assertExcelEqual()
def test_create_file_default_date_format_write_column(self):
"""Test write_column with default date format."""
workbook = Workbook(self.got_filename, {'default_date_format': 'yyyy\\-mm\\-dd'})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 12)
date1 = datetime.strptime('2013-07-25', "%Y-%m-%d")
worksheet.write_column(0, 0, [date1])
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "c6f9d4f9e93068eb0de06a28544d7907",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 89,
"avg_line_length": 26.405940594059405,
"alnum_prop": 0.6205474315710536,
"repo_name": "jmcnamara/XlsxWriter",
"id": "52c19d0c6d235f948aeeed8980318b722304dd7c",
"size": "2880",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_default_date_format01.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
from monthlyReportNew import getData
import nose.tools as nTools
import warnings
warnings.filterwarnings("ignore")
# nose.tools.TimeExpired nose.tools.assert_items_equal nose.tools.assert_set_equal
# nose.tools.assert_almost_equal nose.tools.assert_less nose.tools.assert_true
# nose.tools.assert_almost_equals nose.tools.assert_less_equal nose.tools.assert_tuple_equal
# nose.tools.assert_dict_contains_subset nose.tools.assert_list_equal nose.tools.eq_
# nose.tools.assert_dict_equal nose.tools.assert_multi_line_equal nose.tools.istest
# nose.tools.assert_equal nose.tools.assert_not_almost_equal nose.tools.make_decorator
# nose.tools.assert_equals nose.tools.assert_not_almost_equals nose.tools.nontrivial
# nose.tools.assert_false nose.tools.assert_not_equal nose.tools.nontrivial_all
# nose.tools.assert_greater nose.tools.assert_not_equals nose.tools.nottest
# nose.tools.assert_greater_equal nose.tools.assert_not_in nose.tools.ok_
# nose.tools.assert_in nose.tools.assert_not_is_instance nose.tools.raises
# nose.tools.assert_is nose.tools.assert_not_regexp_matches nose.tools.set_trace
# nose.tools.assert_is_instance nose.tools.assert_raises nose.tools.timed
# nose.tools.assert_is_none nose.tools.assert_raises_regexp nose.tools.trivial
# nose.tools.assert_is_not nose.tools.assert_regexp_matches nose.tools.trivial_all
# nose.tools.assert_is_not_none nose.tools.assert_sequence_equal nose.tools.with_setup
def getEmails():
'''
Check the Hello World!!! is working properly
This funciton just makes sure that the basic function is
working properly. This is a simple assert_equal statement.
This is not intended to be run by itself, but as a part of
the nose unit testing facility ...
'''
nTools.assert_greater(len(getData.getEmails()), 0)
# nTools.assert_greater(len(getData.getEmails()), 1e6) # Make sure that this is working
def testMain():
getEmails() | {
"content_hash": "6f90f802eb7357403954639eed6389fe",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 111,
"avg_line_length": 56.625,
"alnum_prop": 0.6582781456953642,
"repo_name": "Holmusk/glycoleapMonthlyReportNew",
"id": "fd1a65a4bbaa60c7f41e2a6972556047939dd270",
"size": "2265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monthlyReportNew/tests/testGetUser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "2635230"
},
{
"name": "Python",
"bytes": "86660"
},
{
"name": "Shell",
"bytes": "13187"
}
],
"symlink_target": ""
} |
a = """
multiline "unicode" string \
\xf1 \u1234aaaa \U1234aaaa
\N{BLACK SPADE SUIT}
"""
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
""" : punctuation.definition.string.begin.python, source.python, string.quoted.multi.python
multiline "unicode" string : source.python, string.quoted.multi.python
\ : constant.language.python, source.python, string.quoted.multi.python
: source.python, string.quoted.multi.python
: source.python, string.quoted.multi.python
\xf1 : constant.character.escape.python, source.python, string.quoted.multi.python
: source.python, string.quoted.multi.python
\u1234 : constant.character.escape.python, source.python, string.quoted.multi.python
aaaa : source.python, string.quoted.multi.python
\U1234aaaa : constant.character.escape.python, source.python, string.quoted.multi.python
: source.python, string.quoted.multi.python
: source.python, string.quoted.multi.python
\N{BLACK SPADE SUIT} : constant.character.escape.python, source.python, string.quoted.multi.python
""" : punctuation.definition.string.end.python, source.python, string.quoted.multi.python
| {
"content_hash": "ae154d21a914e4f1d14dec168cfe7459",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 101,
"avg_line_length": 47.82142857142857,
"alnum_prop": 0.671396564600448,
"repo_name": "MagicStack/MagicPython",
"id": "985bb4cfe447ee61bdacebd18c43e754af4c41fb",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/strings/unicode1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6854"
},
{
"name": "JavaScript",
"bytes": "1338031"
},
{
"name": "Makefile",
"bytes": "2083"
},
{
"name": "Python",
"bytes": "666648"
},
{
"name": "Reason",
"bytes": "11395"
}
],
"symlink_target": ""
} |
"""
os-release - File ``/etc/os-release``
=====================================
This module provides plugins access to file ``/etc/os-release``.
Typical content of file ``/etc/os-release`` is::
NAME="Red Hat Enterprise Linux Server"
VERSION="7.2 (Maipo)"
ID="rhel"
ID_LIKE="fedora"
VERSION_ID="7.2"
PRETTY_NAME="Employee SKU"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:redhat:enterprise_linux:7.2:GA:server"
HOME_URL="https://www.redhat.com/"
BUG_REPORT_URL="https://bugzilla.redhat.com/"
REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
REDHAT_BUGZILLA_PRODUCT_VERSION=7.2
REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
REDHAT_SUPPORT_PRODUCT_VERSION="7.2"
Note:
The /etc/os-release is not exist in RHEL6 and prior versions.
This module parses the file content and stores it as a `dict` in the `data`
attribute.
Examples:
>>> os_rls_content = '''
... Red Hat Enterprise Linux Server release 7.2 (Maipo)
... '''.strip()
>>> from insights.tests import context_wrap
>>> shared = {OsRelease: OsRelease(context_wrap(os_rls_content))}
>>> rls = shared[OsRelease]
>>> data = rls.data
>>> assert data.get("VARIANT_ID") is None
>>> assert data.get("VERSION") == "7.2 (Maipo)"
"""
from .. import Parser, parser, get_active_lines, LegacyItemAccess
@parser("os-release")
class OsRelease(LegacyItemAccess, Parser):
"""Parses the content of file ``/etc/os-release``."""
def parse_content(self, content):
data = {}
for line in get_active_lines(content):
k, _, v = line.partition("=")
if _ == "=" and k:
data[k] = v.strip('"') if v else None
self.data = data
| {
"content_hash": "aeceb6723f0b9e162b3a579e0964d24d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 75,
"avg_line_length": 31.29090909090909,
"alnum_prop": 0.6153399186519466,
"repo_name": "PaulWay/insights-core",
"id": "ecf52786400cbdd8823b21514f6ac635e66bf4cf",
"size": "1721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/os_release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "19339"
},
{
"name": "Python",
"bytes": "2479830"
},
{
"name": "Shell",
"bytes": "892"
}
],
"symlink_target": ""
} |
"""Test that forward declaration of a data structure gets resolved correctly."""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
class ForwardDeclarationTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def do_test(self, dictionary=None):
"""Display *bar_ptr when stopped on a function with forward declaration of struct bar."""
self.build(dictionary=dictionary)
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the foo function which takes a bar_ptr argument.
lldbutil.run_break_set_by_symbol(
self, "foo", num_expected_locations=1, sym_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This should display correctly.
# Note that the member fields of a = 1 and b = 2 is by design.
self.expect(
"frame variable --show-types *bar_ptr",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
'(bar) *bar_ptr = ',
'(int) a = 1',
'(int) b = 2'])
# And so should this.
self.expect(
"expression --show-types -- *bar_ptr",
VARIABLES_DISPLAYED_CORRECTLY,
substrs=[
'(bar)',
'(int) a = 1',
'(int) b = 2'])
def test(self):
self.do_test()
@no_debug_info_test
@skipIfDarwin
@skipIf(compiler=no_match("clang"))
@skipIf(compiler_version=["<", "7.0"])
def test_debug_names(self):
"""Test that we are able to find complete types when using DWARF v5
accelerator tables"""
self.do_test(dict(CFLAGS_EXTRAS="-gdwarf-5 -gpubnames"))
| {
"content_hash": "f1c082d816fd2c6f5a9d36ec57967b28",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 97,
"avg_line_length": 33.83076923076923,
"alnum_prop": 0.5802637562528422,
"repo_name": "endlessm/chromium-browser",
"id": "f955d013bc900e5cb926700a7652b4d6add8d11b",
"size": "2199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/lang/c/forward/TestForwardDeclaration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import logging
from maintain.release.base import Releaser
from maintain.release.hooks import HookReleaser
from maintain.release.version_file import VersionFileReleaser
from maintain.release.python import PythonReleaser
from maintain.release.cocoapods import CocoaPodsReleaser
from maintain.release.npm import NPMReleaser
from maintain.release.c import CReleaser
from maintain.release.changelog import ChangelogReleaser
from maintain.release.git_releaser import GitReleaser
from maintain.release.github import GitHubReleaser
logger = logging.getLogger(__name__)
class AggregateReleaser(Releaser):
@classmethod
def releasers(cls):
"""
Returns all of the supported releasers.
"""
return [
HookReleaser,
VersionFileReleaser,
PythonReleaser,
CocoaPodsReleaser,
NPMReleaser,
CReleaser,
ChangelogReleaser,
GitHubReleaser,
GitReleaser,
]
@classmethod
def detected_releasers(cls, config):
"""
Returns all of the releasers that are compatible with the project.
"""
def get_config(releaser):
if config:
return config.get(releaser.config_name(), {})
return {}
releasers = []
for releaser_cls in cls.releasers():
releaser_config = get_config(releaser_cls)
if releaser_config.get('disabled', False):
continue
if releaser_cls.detect():
logger.info('Enabled Releaser: {}'.format(releaser_cls.name))
releasers.append(releaser_cls(releaser_config))
return releasers
@classmethod
def detect(cls):
return len(cls.detected_releasers()) > 0
def __init__(self, config=None, releasers=None):
self.releasers = releasers or self.detected_releasers(config)
self.check_version_consistency()
def check_version_consistency(self):
"""
Determine if any releasers have inconsistent versions
"""
version = None
releaser_name = None
for releaser in self.releasers:
try:
next_version = releaser.determine_current_version()
except NotImplementedError:
continue
if next_version and version and version != next_version:
raise Exception('Inconsistent versions, {} is at {} but {} is at {}.'.format(
releaser_name, version, releaser.name, next_version))
version = next_version
releaser_name = releaser.name
def determine_current_version(self):
for releaser in self.releasers:
try:
return releaser.determine_current_version()
except NotImplementedError:
continue
def determine_next_version(self):
version = None
releaser_name = None
for releaser in self.releasers:
next_version = releaser.determine_next_version()
if not next_version:
continue
if version and version != next_version:
raise Exception('Inconsistent next versions, {} is at {} but {} is at {}.'.format(
releaser_name, version, releaser.name, next_version))
version = next_version
releaser_name = releaser.name
return version
def bump(self, new_version):
for releaser in self.releasers:
releaser.pre_bump(new_version)
for releaser in self.releasers:
releaser.bump(new_version)
for releaser in self.releasers:
releaser.post_bump(new_version)
def release(self, new_version):
for releaser in reversed(self.releasers):
releaser.pre_release(new_version)
for releaser in reversed(self.releasers):
releaser.release(new_version)
for releaser in reversed(self.releasers):
releaser.post_release(new_version)
| {
"content_hash": "b7432a1a8acca32ad849c21dbea6534a",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 98,
"avg_line_length": 30.22222222222222,
"alnum_prop": 0.6088235294117647,
"repo_name": "kylef/maintain",
"id": "614f3ab5f28ac7548b1a797be4f928765a33f72d",
"size": "4080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maintain/release/aggregate.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "376"
},
{
"name": "Python",
"bytes": "91811"
},
{
"name": "Ruby",
"bytes": "2450"
}
],
"symlink_target": ""
} |
import sys
import requests
import json
import sets
from urlparse import urlparse
from prettytable import PrettyTable
import os
import urllib
import datetime, time
import pprint
from parse import compile
from gremlin import ApplicationGraph, A8FailureGenerator, A8AssertionChecker
def passOrfail(result):
if result:
return "PASS"
else:
return "FAIL"
def a8_get(url, token, headers={'Accept': 'application/json'}, showcurl=False, extra_headers={}):
if token != "" :
headers['Authorization'] = "Bearer " + token
if extra_headers:
headers=dict(headers.items() + extra_headers.items())
if showcurl:
curl_headers = ' '.join(["-H '{0}: {1}'".format(key, value) for key, value in headers.iteritems()])
print "curl", curl_headers, url
try:
r = requests.get(url, headers=headers)
except Exception, e:
sys.stderr.write("Could not contact {0}".format(url))
sys.stderr.write("\n")
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(2)
if showcurl:
print r.text
return r
def a8_post(url, token, body, headers={'Accept': 'application/json', 'Content-type': 'application/json'}, showcurl=False, extra_headers={}):
"""
@type body: str
"""
if token != "" :
headers['Authorization'] = "Bearer " + token
if extra_headers:
headers=dict(headers.items() + extra_headers.items())
if showcurl:
curl_headers = ' '.join(["-H '{0}: {1}'".format(key, value) for key, value in headers.iteritems()])
print "REQ:", "curl -i -X POST", url, curl_headers, "--data", '\'{0}\''.format(body.replace('\'', '\\\''))
try:
r = requests.post(url, headers=headers, data=body)
except Exception, e:
sys.stderr.write("Could not POST to {0}".format(url))
sys.stderr.write("\n")
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(2)
if showcurl:
print "RESP: [{0}]".format(r.status_code), r.headers
print "RESP BODY:", r.text
return r
def a8_put(url, token, body, headers={'Accept': 'application/json', 'Content-type': 'application/json'}, showcurl=False, extra_headers={}):
"""
@type body: str
"""
if token != "" :
headers['Authorization'] = "Bearer " + token
if extra_headers:
headers=dict(headers.items() + extra_headers.items())
if showcurl:
curl_headers = ' '.join(["-H '{0}: {1}'".format(key, value) for key, value in headers.iteritems()])
print "REQ:", "curl -i -X PUT", url, curl_headers, "--data", '\'{0}\''.format(body.replace('\'', '\\\''))
try:
r = requests.put(url, headers=headers, data=body)
except Exception, e:
sys.stderr.write("Could not PUT to {0}".format(url))
sys.stderr.write("\n")
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(2)
if showcurl:
print "RESP: [{0}]".format(r.status_code), r.headers
print "RESP BODY:", r.text
return r
def a8_delete(url, token, headers={'Accept': 'application/json'}, showcurl=False, extra_headers={}):
if token != "" :
headers['Authorization'] = "Bearer " + token
if extra_headers:
headers=dict(headers.items() + extra_headers.items())
if showcurl:
curl_headers = ' '.join(["-H '{0}: {1}'".format(key, value) for key, value in headers.iteritems()])
print "curl -X DELETE", curl_headers, url
try:
r = requests.delete(url, headers=headers)
except Exception, e:
sys.stderr.write("Could not DELETE {0}".format(url))
sys.stderr.write("\n")
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(2)
return r
def get_field(d, key):
if key not in d:
return '***MISSING***'
return d[key]
def fail_unless(response, code_or_codes):
if not isinstance(code_or_codes, list):
code_or_codes = [code_or_codes]
if response.status_code not in code_or_codes:
print response
print response.text
sys.exit(3)
def is_active(service, default_version, registry_url, registry_token, debug=False):
r = a8_get('{0}/api/v1/services/{1}'.format(registry_url, service), registry_token, showcurl=debug)
if r.status_code == 200:
instance_list = r.json()["instances"]
for instance in instance_list:
version = version = tags_to_version(instance.get("tags"))
if version == default_version:
return True
return False
def base_route_rule(destination, version, priority):
rule = {
"destination": destination,
"priority": priority,
"route": {
"backends": [
{ "tags": [ version ] }
]
}
}
return rule
def weight_rule(destination, default_version, weighted_vesions=[], priority=1, source=None):
rule = base_route_rule(destination, default_version, priority)
for version, weight in weighted_vesions:
rule["route"]["backends"].insert(0, { "tags": [ version ], "weight": weight })
if source:
source_name, source_version = split_service(source)
rule["match"] = {
"source": {
"name": source_name
}
}
if source_version:
rule["match"]["source"]["tags"] = version_to_tags(source_version)
return rule
def header_rule(destination, version, header, pattern, priority, source=None):
rule = base_route_rule(destination, version, priority)
rule["match"] = {
"headers": {
header: pattern
}
}
if source:
source_name, source_version = split_service(source)
rule["match"]["source"] = { "name": source_name }
if source_version:
rule["match"]["source"]["tags"] = version_to_tags(source_version)
return rule
def fault_rule(source, destination_name, destination_version, header, pattern, priority, delay=None, delay_probability=None, abort=None, abort_probability=None):
rule = {
"destination": destination_name,
"priority": priority,
"match": {
"headers": {
header: pattern
}
},
"actions" : []
}
if source:
source_name, source_version = split_service(source)
rule["match"]["source"] = { "name": source_name }
if source_version:
rule["match"]["source"]["tags"] = version_to_tags(source_version)
if delay_probability:
action = {
"action" : "delay",
"probability" : delay_probability,
"duration": delay
}
if destination_version:
action["tags"] = version_to_tags(destination_version)
rule["actions"].append(action)
if abort_probability:
action = {
"action" : "abort",
"probability" : abort_probability,
"return_code": abort
}
if destination_version:
action["tags"] = version_to_tags(destination_version)
rule["actions"].append(action)
return rule
def action_rule(source, destination, headers, priority, actions):
rule = {
"destination": destination,
"priority": priority,
"match": {
},
"actions" : actions
}
if source:
source_name, source_version = split_service(source)
rule["match"]["source"] = { "name": source_name }
if source_version:
rule["match"]["source"]["tags"] = version_to_tags(source_version)
if headers:
rule["match"]["headers"] = headers
return rule
def split_service(input):
colon = input.rfind(':')
if colon != -1:
service = input[:colon]
version = input[colon+1:]
else:
service = input
version = None
return service, version
def tags_to_version(tags):
#TODO: what about order of tags? need to be sorted?
return ",".join(tags) if tags else NO_VERSION
def version_to_tags(version):
return version.split(",")
def versioned_service_name(name, tags):
service = name
if tags:
service += ":" + tags_to_version(tags)
return service
def get_match_selector(version, match, weight=None):
selector = version + "("
if "source" in match:
selector += "source=" + versioned_service_name(match["source"]["name"], match["source"].get("tags"))
if "headers" in match:
for header, value in match["headers"].items():
if selector[-1:] != "(":
selector += ","
if header == "Cookie" and value.startswith(".*?user="):
selector += 'user="%s"' % value[len(".*?user="):]
else:
selector += 'header="%s:%s"' % (header, value)
if weight:
selector += ',weight=%s' % weight
selector += ")"
return selector
def add_rule(sorted_rules, rule):
for i in range(0, len(sorted_rules)):
if sorted_rules[i]["priority"] < rule["priority"]:
sorted_rules.insert(i, rule)
return
sorted_rules.append(rule)
def sort_rules(rule_list):
sorted_rules = []
for rule in rule_list:
add_rule(sorted_rules, rule)
return sorted_rules
def get_routes(routing_rules):
default = None
selectors = []
routing_rules = sort_rules(routing_rules)
for rule in routing_rules:
route = rule["route"]
match = rule.get("match")
if match:
if len(route["backends"]) == 1 and "weight" not in route["backends"][0]:
version = tags_to_version(route["backends"][0]["tags"])
selectors.append(get_match_selector(version, match))
else:
for backend in route["backends"]:
version = tags_to_version(backend["tags"])
selectors.append(get_match_selector(version, match, backend.get("weight")))
else:
for backend in route["backends"]:
version = tags_to_version(backend["tags"])
if "weight" in backend:
selectors.append("%s(weight=%s)" % (version, backend["weight"]))
else:
default = version
return default, selectors
NO_VERSION = "-untagged-"
SELECTOR_PARSER = compile("{version}({rule})")
ACTION_PARSER = compile("{version}({weight}->{action}={value})")
############################################
# CLI Commands
############################################
def service_list(args):
registry_url, registry_token = args.a8_registry_url, args.a8_registry_token
r = a8_get('{0}/api/v1/services'.format(registry_url), registry_token, showcurl=args.debug)
fail_unless(r, 200)
service_list = r.json()["services"]
result_list = []
for service in service_list:
r = a8_get('{0}/api/v1/services/{1}'.format(registry_url, service), registry_token, showcurl=args.debug)
fail_unless(r, 200)
instance_list = r.json()["instances"]
version_counts = {}
for instance in instance_list:
version = tags_to_version(instance.get("tags"))
version_counts[version] = version_counts.get(version, 0) + 1
result_instances = []
for version, count in version_counts.iteritems():
result_instances.append("%s(%s)" % (version, count))
result_list.append({"service": service, "instances": result_instances})
if args.json:
print json.dumps(result_list, indent=2)
else:
x = PrettyTable(["Service", "Instances"])
x.align = "l"
for entry in result_list:
service = entry["service"]
versions = ", ".join(entry["instances"])
x.add_row([service, versions])
print x
def service_routing(args):
r = a8_get('{0}/v1/rules/routes'.format(args.a8_controller_url),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_rules = r.json()["services"]
registry_url, registry_token = args.a8_registry_url, args.a8_registry_token
r = a8_get('{0}/api/v1/services'.format(registry_url), registry_token, showcurl=args.debug)
fail_unless(r, 200)
service_list = r.json()["services"]
#service_rules = { "reviews": test_routing_rules, "ratings": test_routing_rules2 } #FB TEMP
#service_list = [ "foo" ] #FB TEMP
result_list = []
for service, routing_rules in service_rules.iteritems():
if service in service_list:
service_list.remove(service)
default, selectors = get_routes(routing_rules)
if selectors:
result_list.append({"service": service, "default": default, "selectors": selectors})
else:
result_list.append({"service": service, "default": default})
for service in service_list:
result_list.append({"service": service})
if args.json:
print json.dumps(result_list, indent=2)
else:
x = PrettyTable(["Service", "Default Version", "Version Selectors"])
x.align = "l"
for entry in result_list:
x.add_row([entry["service"],
entry["default"] if "default" in entry else "",
", ".join(entry["selectors"]) if "selectors" in entry else ""
])
print x
def set_routing(args):
if not args.default:
print "You must specify --default"
sys.exit(4)
weight_list = []
header_list = []
if args.selector:
for selector in args.selector:
r = SELECTOR_PARSER.parse(selector)
if not r:
print "Invalid --selector value: %s" % selector
sys.exit(5)
version = r['version'].strip()
rule = r['rule'].strip()
key, sep, value = rule.partition('=')
kind = key.strip()
if kind == 'weight':
weight = float(value.strip())
weight_list.insert(0, (version, weight))
elif kind == 'user':
user = value.strip(' "')
header_list.insert(0, (version, "Cookie", ".*?user=" + user))
elif kind == 'header':
header, sep, pattern = value.strip(' "').partition(':')
header_list.insert(0, (version, header, pattern))
else:
print "Unrecognized --selector key (%s) in selector: %s" % (kind, selector)
sys.exit(6)
priority = 1
if args.source:
routing_request = { "rules": [ weight_rule(args.service, args.default, [], priority) ] }
priority += 1
routing_request["rules"].insert(0, weight_rule(args.service, args.default, weight_list, priority, args.source))
else:
routing_request = { "rules": [ weight_rule(args.service, args.default, weight_list, priority) ] }
for version, header, pattern in header_list:
priority += 1
routing_request["rules"].insert(0, header_rule(args.service, version, header, pattern, priority, args.source))
#print json.dumps(routing_request, indent=2)
r = a8_put('{0}/v1/rules/routes/{1}'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
json.dumps(routing_request),
showcurl=args.debug)
fail_unless(r, [200,201])
print 'Set routing rules for microservice', args.service
def delete_routing(args):
r = a8_delete('{0}/v1/rules/routes/{1}'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
print 'Deleted routing rules for microservice', args.service
def rules_list(args):
sys.stderr.write("WARNING: deprecated command. Will be removed in the future. Use action-list instead.\n")
r = a8_get('{0}/v1/rules/actions'.format(args.a8_controller_url),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_rules = r.json()["services"]
#service_rules = { "ratings": test_fault_rules } #FB TEMP
result_list = []
for action_rules in service_rules.itervalues():
for rule in sort_rules(action_rules):
action_entry = {
"id": rule["id"],
"priority": rule["priority"]
}
if "match" in rule:
match = rule["match"]
if "source" in match:
action_entry["source"] = versioned_service_name(match["source"]["name"], match["source"].get("tags"))
if "headers" in match:
for header, pattern in match["headers"].iteritems():
action_entry["header"] = header
action_entry["header_pattern"] = pattern
break # Ignore more than one header
tagged_destinations = set()
delay_set = False
abort_set = False
for action in rule["actions"]:
if action["action"] == "delay":
if delay_set: continue # Ignore all but the first one.
action_entry["delay"] = action["duration"]
action_entry["delay_probability"] = action["probability"]
tagged_destinations.add(versioned_service_name(rule["destination"], action.get("tags")))
delay_set = True
elif action["action"] == "abort":
if abort_set: continue # Ignore all but the first one.
action_entry["abort_code"] = action["return_code"]
action_entry["abort_probability"] = action["probability"]
tagged_destinations.add(versioned_service_name(rule["destination"], action.get("tags")))
abort_set = True
elif action["action"] == "trace":
tagged_destinations.add(versioned_service_name(rule["destination"], action.get("tags")))
action_entry["destination"] = ",".join(tagged_destinations)
result_list.append(action_entry)
if args.json:
print json.dumps(result_list, indent=2)
else:
x = PrettyTable(["Source", "Destination", "Header", "Header Pattern", "Delay Probability", "Delay", "Abort Probability", "Abort Code", "Rule Id"])
x.align = "l"
for entry in result_list:
x.add_row([entry.get("source", ""),
entry["destination"],
entry.get("header", ""),
entry.get("header_pattern", ""),
entry.get("delay_probability", ""),
entry.get("delay", ""),
entry.get("abort_probability", ""),
entry.get("abort_code", ""),
entry["id"]
])
print x
def set_rule(args):
sys.stderr.write("WARNING: deprecated command. Will be removed in the future. Use action-add instead.\n")
if not args.source or not args.destination or not args.header:
print "You must specify --source, --destination, and --header"
sys.exit(4)
if not (args.delay > 0 and args.delay_probability > 0.0) and not (args.abort_code and args.abort_probability > 0.0):
print "You must specify either a valid delay with non-zero delay_probability or a valid abort-code with non-zero abort-probability"
sys.exit(5)
destination_name, destination_version = split_service(args.destination)
r = a8_get('{}/v1/rules/actions/{}'.format(args.a8_controller_url, destination_name),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
current_rules = r.json()["rules"]
pattern = '.*?'+args.pattern if args.pattern else '.*'
delay_probability = args.delay_probability if args.delay_probability > 0 else None
abort_probability = args.abort_probability if args.abort_probability > 0 else None
priority = 10
for rule in current_rules:
if rule["priority"] >= priority:
priority = rule["priority"] + 10
rule = fault_rule(args.source,
destination_name,
destination_version,
args.header, pattern,
priority,
delay=args.delay,
delay_probability=delay_probability,
abort=args.abort_code,
abort_probability=abort_probability)
current_rules.append(rule)
payload = { "rules": current_rules }
#print json.dumps(payload, indent=2)
r = a8_put('{}/v1/rules/actions/{}'.format(args.a8_controller_url, destination_name),
args.a8_controller_token,
json.dumps(payload),
showcurl=args.debug)
fail_unless(r, 201)
print 'Set fault injection rule between %s and %s' % (args.source, args.destination)
def clear_rules(args):
sys.stderr.write("WARNING: deprecated command. Will be removed in the future. Use rule-delete instead.\n")
r = a8_get('{0}/v1/rules/actions'.format(args.a8_controller_url),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_rules = r.json()["services"]
#service_rules = { "ratings": test_fault_rules } #FB TEMP
for destination in service_rules:
r = a8_delete('{0}/v1/rules/actions/{1}'.format(args.a8_controller_url, destination),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
print 'Cleared fault injection rules from all microservices'
def action_list(args):
r = a8_get('{0}/v1/rules/actions'.format(args.a8_controller_url),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_rules = r.json()["services"]
#service_rules = { "ratings": test_fault_rules } #FB TEMP
result_list = []
for action_rules in service_rules.itervalues():
for rule in sort_rules(action_rules):
action_entry = {
"id": rule["id"],
"destination": rule["destination"],
"priority": rule["priority"],
"actions": []
}
if "match" in rule:
match = rule["match"]
if "source" in match:
action_entry["source"] = versioned_service_name(match["source"]["name"], match["source"].get("tags"))
if "headers" in match:
action_entry["headers"] = []
for header, pattern in match["headers"].iteritems():
action_entry["headers"].append(header + ":" + pattern)
for action in rule["actions"]:
version = tags_to_version(action.get("tags"))
if action["action"] == "delay":
action_entry["actions"].append("%s(%s->delay=%s)" % (version, action["probability"], action["duration"]))
elif action["action"] == "abort":
action_entry["actions"].append("%s(%s->abort=%s)" % (version, action["probability"], action["return_code"]))
elif action["action"] == "trace":
action_entry["actions"].append("%s(trace)" % (version)) #, action["log_key"], action["log_value"]))
result_list.append(action_entry)
if args.json:
print json.dumps(result_list, indent=2)
else:
x = PrettyTable(["Destination", "Source", "Headers", "Priority", "Actions", "Rule Id"])
x.align = "l"
for entry in result_list:
x.add_row([entry["destination"],
entry.get("source", ""),
", ".join(entry.get("headers", [])),
entry["priority"],
", ".join(entry["actions"]),
entry["id"]
])
print x
def add_action(args):
if not args.destination or not (args.source or args.header or args.cookie):
print "You must specify --destination, and at least one --source, --header, or --cookie parameter"
sys.exit(4)
if not args.action:
print "You must specify at least one --action parameter"
sys.exit(5)
r = a8_get('{}/v1/rules/actions/{}'.format(args.a8_controller_url, args.destination),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
current_rules = r.json()["rules"]
if args.priority:
priority = int(args.priority)
else:
priority = 10
for rule in current_rules:
if rule["priority"] >= priority:
priority = rule["priority"] + 10
if args.header or args.cookie:
headers = {}
if args.header:
for header in args.header:
key, sep, value = header.partition(':')
headers[key] = value
if args.cookie:
for cookie in args.cookie:
headers['Cookie'] = '.*?'+cookie
else:
headers = None
actions = []
for action in args.action:
r = ACTION_PARSER.parse(action)
if not r:
print "Invalid --action value: %s" % action
sys.exit(6)
version = r['version'].strip()
weight = float(r['weight'].strip())
action_type = r['action'].strip()
value = r['value'].strip()
if action_type == 'delay':
rule_action = {
"action" : "delay",
"probability" : weight,
"duration": float(value),
"tags": version_to_tags(version)
}
actions.append(rule_action)
elif action_type == 'abort':
rule_action = {
"action" : "abort",
"probability" : weight,
"return_code": int(value),
"tags": version_to_tags(version)
}
actions.append(rule_action)
else:
print "Invalid --action type: %s" % action
sys.exit(7)
rule = action_rule(args.source,
args.destination,
headers,
priority,
actions)
current_rules.append(rule)
payload = { "rules": current_rules }
#print json.dumps(payload, indent=2)
r = a8_put('{}/v1/rules/actions/{}'.format(args.a8_controller_url, args.destination),
args.a8_controller_token,
json.dumps(payload),
showcurl=args.debug)
fail_unless(r, 201)
print 'Set action rule for destination %s' % args.destination
def delete_rule(args):
r = a8_delete('{}/v1/rules?id={}'.format(args.a8_controller_url, args.id),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
print 'Deleted rule with id: %s' % args.id
def _print_assertion_results(results):
x = PrettyTable(["AssertionName", "Source", "Destination", "Result", "ErrorMsg"])
x.align = "l"
newlist={}
for res in results:
res['result']=passOrfail(res['result'])
#pprint.pprint(results)
for check in results:
x.add_row([get_field(check, 'name'),
get_field(check, 'source'),
get_field(check, 'dest'),
get_field(check, 'result'),
get_field(check, 'errormsg')
])
print x
def run_recipe(args):
if not args.topology or not args.scenarios:
print "You must specify --topology and --scenarios"
sys.exit(4)
if args.header:
header = args.header
else:
header = "X-Request-ID"
if args.pattern:
pattern = args.pattern
else:
pattern = '*'
if not os.path.isfile(args.topology):
print u"Topology file {} not found".format(args.topology)
sys.exit(4)
if not os.path.isfile(args.scenarios):
print u"Failure scenarios file {} not found".format(args.scenarios)
sys.exit(4)
if args.checks and not os.path.isfile(args.checks):
print u"Checklist file {} not found".format(args.checks)
sys.exit(4)
with open(args.topology) as fp:
app = json.load(fp)
topology = ApplicationGraph(app)
if args.debug:
print "Using topology:\n", topology
with open(args.scenarios) as fp:
scenarios = json.load(fp)
if args.checks:
with open(args.checks) as fp:
checklist = json.load(fp)
fg = A8FailureGenerator(topology, a8_controller_url='{0}/v1/rules'.format(args.a8_controller_url), a8_controller_token=args.a8_controller_token,
header=header, pattern='.*?'+pattern, debug=args.debug)
fg.setup_failures(scenarios)
start_time = datetime.datetime.utcnow().isoformat()
#print start_time
if args.checks:
if args.run_load_script:
import subprocess
#print ">>>", args.run_load_script
retcode = subprocess.call([args.run_load_script])
if retcode: #load injection failed. Do not run assertions
sys.exit(retcode)
else:
print 'Inject test requests with HTTP header %s matching the pattern %s' % (header, pattern)
print ('When done, press Enter key to continue to validation phase')
a = sys.stdin.read(1)
#sleep for 3sec to make sure all logs reach elasticsearch
time.sleep(3)
end_time=datetime.datetime.utcnow().isoformat()
#print end_time
#sleep for some more time to make sure all logs have been flushed
time.sleep(5)
log_server = checklist.get('log_server', args.a8_log_server)
# TODO: Obtain the logstash index as user input or use logstash-YYYY.MM.DD with current date and time.
ac = A8AssertionChecker(es_host=log_server, trace_log_value=fg.get_id(),
index=["_all"], debug=args.debug)
results = ac.check_assertions(checklist, continue_on_error=True)
if args.json:
print json.dumps(results, indent=2)
else:
_print_assertion_results(results)
clear_rules(args)
def traffic_start(args):
if args.amount < 0 or args.amount > 100:
print "--amount must be between 0 and 100"
sys.exit(4)
r = a8_get('{0}/v1/rules/routes/{1}'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_info = r.json()
#service_info = {"rules": test_routing_rules2} #FB TEMP
routing_rules = sort_rules(service_info["rules"])
weight_rule = routing_rules[-1]
backends = weight_rule["route"]["backends"]
if len(backends) != 1 or "weight" in backends[0]:
print "Invalid state for start operation: service \"%s\" traffic is already being split" % args.service
sys.exit(5)
default_version = tags_to_version(backends[0]["tags"])
registry_url, registry_token = args.a8_registry_url, args.a8_registry_token
if not is_active(args.service, default_version, registry_url, registry_token, args.debug):
print "Invalid state for start operation: service \"%s\" is not currently receiving traffic" % args.service
sys.exit(6)
if not is_active(args.service, args.version, registry_url, registry_token, args.debug):
print "Invalid state for start operation: service \"%s\" does not have active instances of version \"%s\"" % (args.service, args.version)
sys.exit(7)
if args.amount == 100:
backends[0]["tags"] = version_to_tags(args.version)
else:
backends.insert(0, {"tags": version_to_tags(args.version), "weight": float(args.amount)/100})
#print json.dumps(weight_rule, indent=2)
r = a8_put('{0}/v1/rules'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
json.dumps({"rules": [ weight_rule ]}),
showcurl=args.debug)
fail_unless(r, 200)
if args.amount == 100:
print 'Transfer complete for {}: sending {}% of traffic to {}'.format(args.service, args.amount, args.version)
else:
print 'Transfer starting for {}: diverting {}% of traffic from {} to {}'.format(args.service, args.amount, default_version, args.version)
def traffic_step(args):
r = a8_get('{0}/v1/rules/routes/{1}'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_info = r.json()
#service_info = {"rules": test_routing_rules2} #FB TEMP
routing_rules = sort_rules(service_info["rules"])
weight_rule = routing_rules[-1]
backends = weight_rule["route"]["backends"]
if len(backends) != 2 or "weight" not in backends[0] or "weight" in backends[1]:
print "Invalid state for step operation"
sys.exit(5)
traffic_version = tags_to_version(backends[0]["tags"])
default_version = tags_to_version(backends[1]["tags"])
current_weight = backends[0]["weight"]
if not args.amount:
new_amount = int(current_weight * 100) + 10
else:
if args.amount < 0 or args.amount > 100:
print "--amount must be between 0 and 100"
sys.exit(4)
new_amount = args.amount
if new_amount < 100:
backends[0]["weight"] = float(new_amount)/100
else:
del backends[0]["weight"]
del backends[1]
#print json.dumps(weight_rule, indent=2)
r = a8_put('{0}/v1/rules'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
json.dumps({"rules": [ weight_rule ]}),
showcurl=args.debug)
fail_unless(r, 200)
if new_amount == 100:
print 'Transfer complete for {}: sending {}% of traffic to {}'.format(args.service, new_amount, traffic_version)
else:
print 'Transfer step for {}: diverting {}% of traffic from {} to {}'.format(args.service, new_amount, default_version, traffic_version)
def traffic_abort(args):
r = a8_get('{0}/v1/rules/routes/{1}'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
showcurl=args.debug)
fail_unless(r, 200)
service_info = r.json()
#service_info = {"rules": test_routing_rules} #FB TEMP
routing_rules = sort_rules(service_info["rules"])
weight_rule = routing_rules[-1]
backends = weight_rule["route"]["backends"]
if len(backends) != 2 or "weight" not in backends[0] or "weight" in backends[1]:
print "Invalid state for step operation"
sys.exit(5)
default_version = tags_to_version(backends[1]["tags"])
del backends[0]
#print json.dumps(weight_rule, indent=2)
r = a8_put('{0}/v1/rules'.format(args.a8_controller_url, args.service),
args.a8_controller_token,
json.dumps({"rules": [ weight_rule ]}),
showcurl=args.debug)
fail_unless(r, 200)
print 'Transfer aborted for {}: all traffic reverted to {}'.format(args.service, default_version)
'''
test_routing_rules = json.loads("""
[
{
"priority": 1,
"route": {
"backends": [
{
"weight": 0.25,
"tags": [
"v2"
]
},
{
"tags": [
"v1"
]
}
]
},
"destination": "reviews"
},
{
"priority": 2,
"route": {
"backends": [
{
"tags": [
"v3"
]
}
]
},
"destination": "reviews",
"match": {
"headers": {
"Cookie": ".*?user=shriram"
}
}
},
{
"priority": 3,
"route": {
"backends": [
{
"tags": [
"v4"
]
}
]
},
"destination": "reviews",
"match": {
"headers": {
"Foo": "bar"
}
}
}
]
""")
test_routing_rules2 = json.loads("""
[
{
"priority": 1,
"route": {
"backends": [
{
"tags": [
"v1"
]
}
]
},
"destination": "ratings"
}
]""")
test_fault_rules = json.loads("""
[
{
"destination": "ratings",
"id": "action123",
"priority": 5,
"match": {
"source": {
"name": "reviews",
"tags": [ "v2" ]
},
"headers": {
"Cookie": ".*?user=jason"
}
},
"actions": [
{
"action": "delay",
"probability": 1,
"tags": [ "v1" ],
"duration": 7
}
]
},
{
"destination": "ratings",
"id": "action345",
"priority": 10,
"match": {
"source": {
"name": "bar",
"tags": [ "v1" ]
},
"headers": {
"Foo": "bar"
}
},
"actions": [
{
"action": "delay",
"probability": 0.5,
"tags": [ "v1" ],
"duration": 2
},
{
"action": "abort",
"probability": 0.25,
"tags": [ "v1" ],
"return_code": 400
}
]
}
]""")
'''
| {
"content_hash": "e6ea75b4bfbc4b4d95cb9be719c296ad",
"timestamp": "",
"source": "github",
"line_count": 1054,
"max_line_length": 236,
"avg_line_length": 35.63377609108159,
"alnum_prop": 0.5522658288513765,
"repo_name": "amalgam8/a8ctl",
"id": "0619aea724f3b5938abc82d9f0f3a40fd16d2e10",
"size": "38200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "a8ctl/v1/commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "131713"
},
{
"name": "Shell",
"bytes": "1156"
}
],
"symlink_target": ""
} |
import os
import pyinotify
from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError
from pipelines.logger import PipelineJobLogger
from pipelines.paths import *
class PipelineConfigError(Exception): # TODO: implement
def __init__(self, msg):
super(PipelineConfigError, self).__init__()
self.msg = msg
class PipelineConfig(SafeConfigParser, object):
def __init__(self, from_file=True, path=None, project_id=None, zones=None, scopes=None, service_account_email=None, max_running_jobs=None, autorestart_preempted=None):
super(PipelineConfig, self).__init__()
self._configParams = {
"project_id": {
"section": "gcp",
"required": True,
"default": None
},
"zones": {
"section": "gcp",
"required": True,
"default": "us-central1-a,us-central1-b,us-central1-c,us-central1-f,us-east1-b,us-east1-c,us-east1-d"
},
"scopes": {
"section": "gcp",
"required": True,
"default": "https://www.googleapis.com/auth/genomics,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.full_control"
},
"service_account_email": {
"section": "gcp",
"required": True,
"default": "default"
},
"max_running_jobs": {
"section": "pipelines",
"required": True,
"default": 2000
},
"autorestart_preempted": {
"section": "pipelines",
"required": True,
"default": False
}
}
if path is None:
self.path = SERVER_CONFIG_PATH
else:
self.path = path
if from_file:
self._from_file = True
try:
os.makedirs(os.path.dirname(self.path))
except OSError:
pass
self.refresh()
else:
self._from_file = False
try:
self.project_id = project_id
except AttributeError as e:
raise PipelineConfigError("Couldn't create config: {reason}".format(reason=e))
else:
for o in self._configParams.keys():
s = self._configParams[o]["section"]
if locals[o] is None:
v = self._configParams[o]["default"]
else:
v = locals[o]
self.update(s, o, v)
def update(self, section, option, value, first_time=False):
if option not in self._configParams.keys():
raise PipelineConfigError("unrecognized option {s}/{o}".format(s=section, o=option))
else:
if self._configParams[option]["section"] != section:
raise PipelineConfigError("unrecognized section {s}".format(s=section))
if not self.has_section(section):
self.add_section(section)
self.set(section, str(option), str(value))
if self.path is not None:
with open(self.path, 'w') as f:
self.write(f)
self.refresh()
def watch(self):
# watch changes to the config file -- needs to be run in a separate thread
configStatusManager = pyinotify.WatchManager()
configStatusNotifier = pyinotify.Notifier(configStatusManager)
configStatusManager.add_watch(self.path, pyinotify.IN_CLOSE_WRITE, proc_fun=PipelineConfigUpdateHandler(config=self))
configStatusNotifier.loop()
def refresh(self):
self.__dict__.update(self._verify())
def _verify(self):
try:
if self._from_file:
self.read(self.path)
except IOError as e:
raise PipelineConfigError("Couldn't open {path}: {reason}".format(path=self.path, reason=e))
else:
d = {}
for name, attrs in self._configParams.iteritems():
if attrs["required"]:
if not self.has_section(attrs["section"]):
raise PipelineConfigError("missing required section {s} in the configuration!\nRUN `isb-cgc-pipelines config` to correct the configuration".format(s=attrs["section"]))
if not self.has_option(attrs["section"], name):
raise PipelineConfigError("missing required option {o} in section {s}!\nRun `isb-cgc-pipelines config` to correct the configuration".format(s=attrs["section"], o=name))
try:
d[name] = self.get(attrs["section"], name)
except NoOptionError:
pass
except NoSectionError:
pass
return d
class PipelineConfigUpdateHandler(pyinotify.ProcessEvent):
def my_init(self, config=None): # config -> PipelineConfig
self._config = config
def process_IN_CLOSE_WRITE(self, event):
PipelineJobLogger.writeStdout("Refreshing configuration ...")
self._config.refresh()
| {
"content_hash": "ee8555cc7468f25ae9462a2e77528bc0",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 174,
"avg_line_length": 28.06040268456376,
"alnum_prop": 0.6792633341305908,
"repo_name": "isb-cgc/ISB-CGC-pipelines",
"id": "c65e29b85e80d607ab882dec9b9c60f36c4daf1b",
"size": "4181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pipelines/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "144267"
},
{
"name": "Shell",
"bytes": "8430"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 Pani Networks Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# A monitor plugin for checking instance health with a TCP connection
# establishment attempt.
#
import logging
import socket
import threading
from vpcrouter.errors import ArgsError
from vpcrouter.monitor import common
class Tcp(common.MonitorPlugin):
"""
A health monitor plugin, which uses ICMP echo requests (ping) to check
instances for health.
"""
def __init__(self, conf):
super(Tcp, self).__init__(conf, "TcpHealth")
def _do_tcp_check(self, ip, results):
"""
Attempt to establish a TCP connection.
If not successful, record the IP in the results dict.
Always closes the connection at the end.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect((ip, self.conf['tcp_check_port']))
except:
# Any problem during the connection attempt? We won't diagnose it,
# we just indicate failure by adding the IP to the list
results.append(ip)
finally:
sock.close()
def get_monitor_interval(self):
"""
Return the sleep time between monitoring intervals.
"""
return self.conf['tcp_check_interval']
def do_health_checks(self, list_of_ips):
"""
Perform a health check on a list of IP addresses.
Each check (we use a TCP connection attempt) is run in its own thread.
Gather up the results and return the list of those addresses that
failed the test and the list of questionable IPs.
TODO: Currently, this starts a thread for every single address we want
to check. That's probably not a good idea if we have thousands of
addresses. Therefore, we should implement some batching for large
sets.
"""
threads = []
results = []
# Start the thread for each IP we wish to check.
for count, ip in enumerate(list_of_ips):
thread = threading.Thread(
target = self._do_tcp_check,
name = "%s:%s" % (self.thread_name, ip),
args = (ip, results))
thread.start()
threads.append(thread)
# ... make sure all threads are done...
for thread in threads:
thread.join()
# ... and send back all the failed IPs.
return results, [] # return empty list for questionable IPs
def start(self):
"""
Start the monitoring thread of the plugin.
"""
logging.info("TCP health monitor plugin: Starting to watch "
"instances.")
self.monitor_thread = threading.Thread(target = self.start_monitoring,
name = self.thread_name)
self.monitor_thread.daemon = True
self.monitor_thread.start()
def stop(self):
"""
Stop the monitoring thread of the plugin.
The super-class will send the stop signal on the monitor-IP queue,
which prompts the loop to stop.
"""
super(Tcp, self).stop()
self.monitor_thread.join()
logging.info("TCP health monitor plugin: Stopped")
def get_info(self):
"""
Return plugin information.
"""
return {
self.get_plugin_name() : {
"version" : self.get_version(),
"params" : {
"tcp_check_interval" : self.conf['tcp_check_interval'],
"tcp_check_port" : self.conf['tcp_check_port']
}
}
}
@classmethod
def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the TCP health monitor plugin.
"""
parser.add_argument('--tcp_check_interval',
dest='tcp_check_interval',
required=False, default=2, type=float,
help="TCP health-test interval in seconds, "
"default 2 "
"(only for 'tcp' health monitor plugin)")
parser.add_argument('--tcp_check_port',
dest='tcp_check_port',
required=False, default=22, type=int,
help="Port for TCP health-test, default 22 "
"(only for 'tcp' health monitor plugin)")
return ["tcp_check_interval", "tcp_check_port"]
@classmethod
def check_arguments(cls, conf):
"""
Sanity check plugin options values.
As a side effect, it also converts the specified interval and port
to an integer.
"""
# Checking the interval
if not conf['tcp_check_interval']:
raise ArgsError("A TCP health-test interval needs to be "
"specified (--tcp_check_interval).")
if not (1 <= conf['tcp_check_interval'] <= 3600):
raise ArgsError("Specified TCP health-test interval must be "
"between 1 and 3600 seconds")
# Checking the port
if not conf['tcp_check_port']:
raise ArgsError("A port for the TCP health-test needs to be "
"specified (--tcp_check_port).")
if not (1 <= conf['tcp_check_port'] <= 65535):
raise ArgsError("Specified port for TCP health-test must be "
"between 1 and 65535")
| {
"content_hash": "53dd101142e54194a6131dd828f1e317",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 78,
"avg_line_length": 33.3027027027027,
"alnum_prop": 0.5624086998863821,
"repo_name": "romana/vpc-router",
"id": "8cd689d298e6c149f36a0aa2537754d3cd359316",
"size": "6161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vpcrouter/monitor/plugins/tcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "212538"
},
{
"name": "Shell",
"bytes": "761"
}
],
"symlink_target": ""
} |
'''
Created on Nov 15, 2018
'''
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from pprint import pprint
from weblyzard_api.client import OGER_API_URL
from weblyzard_api.client.ontogene import OgerClient
from weblyzard_api.client.recognize import Recognize
from weblyzard_api.client.jeremia import Jeremia
class TestOGER(unittest.TestCase):
def setUp(self):
url = OGER_API_URL
print(url)
self.client = OgerClient(url)
def test_raise_exception_if_service_urls_is_array(self):
with self.assertRaises(Exception) as context:
OgerClient(['http://localhost:8080', 'http://localhost:8081'])
self.assertTrue('Oger url cannot be an array' in context.exception)
def test_status(self):
self.assertTrue(self.client.status())
def test_annotate_text(self):
docid='99999999'
#doctext='Cancer, also called malignancy, is an abnormal growth of cells.'
doctext='Alzheimer\'s disease (AD), also referred to simply as Alzheimer\'s, is a chronic neurodegenerative disease that usually starts slowly and worsens over time.'
response = self.client.annotate_text(docid, doctext)
assert len(response), 'No items found for {}'.format(docid)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "553d415cea603b8dd2599847fec3a191",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 174,
"avg_line_length": 34.075,
"alnum_prop": 0.6845194424064563,
"repo_name": "weblyzard/weblyzard_api",
"id": "861f8e256135e78c1f55674f1a379b4dcd46d136",
"size": "1405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/weblyzard_api/tests/client/test_oger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "145466"
},
{
"name": "Python",
"bytes": "742504"
},
{
"name": "Shell",
"bytes": "97"
}
],
"symlink_target": ""
} |
import numpy as np
import requests
from PIL import Image
from bokeh.plotting import *
class MapArea:
def __init__(self, image, min_lon, max_lon, min_lat, max_lat):
self.image = image
self.min_lon = min_lon
self.min_lat = min_lat
self.max_lon = max_lon
self.max_lat = max_lat
self.range_lon = max_lon - min_lon
self.range_lat = max_lat - min_lat
def rgba_to_array2d(image):
arr = np.array(image)
image_2d = np.empty(arr.shape[0:2], dtype=np.uint32)
view = image_2d.view(dtype=np.uint8).reshape(arr.shape)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
for k in range(arr.shape[2]):
view[i, j, k] = arr[i,j,k]
return image_2d
def add_maparea_to_plot(p, maparea):
image_array = rgba_to_array2d(maparea.image)
rotated_2d = np.rot90(np.transpose(image_array))
p.image_rgba(image=[rotated_2d], x=[maparea.min_lon], y=[maparea.min_lat], dw=[maparea.range_lon], dh=[maparea.range_lat])
class MapTile(MapArea):
def __init__(self, image, x, y, zoom):
self.image = image
self.x = x
self.y = y
self.zoom = zoom
self.min_lon, self.max_lat = tileaddress_to_lonlat(x, y, zoom)
self.max_lon, self.min_lat = tileaddress_to_lonlat(x+1, y+1, zoom)
self.range_lon = self.max_lon - self.min_lon
self.range_lat = self.max_lat - self.min_lat
def get_maptile(x, y, zoom, file_extension, url_format = "http://tile.stamen.com/toner/{0}/{1}/{2}.{3}"):
url = url_format.format(zoom, x, y, file_extension)
print(url)
response = requests.get(url)
with io.BytesIO(response.content) as response_io:
image = Image.open(response_io).convert("RGBA") # in watercolor example jpg image was RGB. Convert all to RGBA so we're dealing w/ a standard array shape
return MapTile(image, x, y, zoom)
def get_stamen_maptile(x, y, zoom, type = "watercolor", file_extension = "jpg"):
# TODO: make it convenient to access other stamen tile types
return get_maptile(x, y, zoom, file_extension, url_format = "http://tile.stamen.com/watercolor/{0}/{1}/{2}.{3}")
def get_google_maptile(x, y, zoom):
return get_maptile(x, y, zoom, "", url_format="http://mt0.google.com/vt/lyrs=m@169000000&hl=en&x={1}&y={2}&z={0}&s=Ga")
# adapted from XYtoLatLon in ggmap package
def tileaddress_to_lonlat(tileaddress_x, tileaddress_y, zoom):
n = 2**zoom
lon = tileaddress_x / n * 360.0 - 180.0
lat = (180/np.pi) * np.arcsin(np.tanh(np.pi * (1 - 2 * tileaddress_y / n)))
# TODO: shift so that pi/2 < lat <= pi/2
print(lon,lat)
return lon, lat
# img = Image.open("images/tile.png").convert('RGBA')
# print(type(img))
#
# img_bw = img.convert('L')
#
# print(type(img_bw))
#
# arr = np.array(img)
#
# im2 = Image.fromarray(arr, "RGBA")
# print(im2)
if __name__=='__main__':
output_file("png_to_bokeh_image.html")
maparea1 = get_stamen_maptile(7700, 13550, 15, "png")
maparea2 = get_stamen_maptile(7700, 13551, 15, "png")
p = figure(tools = "pan, box_zoom, reset, wheel_zoom", width=500, height=500,
x_range=[maparea1.min_lon, maparea1.max_lon],
y_range = [maparea1.min_lat,maparea1.max_lat])
add_maparea_to_plot(p, maparea1)
add_maparea_to_plot(p, maparea2)
show(p)
#img = Image.open("images/tile.jpg").convert('RGBA')
#arr = np.array(img)
#print(arr.shape)
print(maparea2.min_lon)
print(maparea2.max_lon)
tile = get_stamen_maptile(7700, 13550, 15, "png")
#image = tile.image.convert("RGBA")
#arr = np.array(image)
#print(arr.shape)
#
#
#print(tileaddress_to_lonlat(7700, 13550, 15)) | {
"content_hash": "b515cc0e89c9b4dde6652a66d7c75696",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 161,
"avg_line_length": 34.324074074074076,
"alnum_prop": 0.6215268411114109,
"repo_name": "dchudz/bokeh-maps",
"id": "9035eefb7fbfcc5e444001e1baf323c98c228339",
"size": "3707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MapArea.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7504"
}
],
"symlink_target": ""
} |
"""
Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations:
get and set.
get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.
"""
class Node:
def __init__(self, key, val, prev=None, next=None):
self.val = val
self.key = key
self.prev = prev
self.next = next
class LinkedList:
def __init__(self):
self.size = 0
self.head = Node(None, None)
self.tail = Node(None, None)
self.head.next = self.tail
self.tail.prev = self.head
def push(self, n):
# link between n and head
next = self.head.next
self.head.next = n
n.prev = self.head
# link between n and next
n.next = next
next.prev = n
#update size
self.size = self.size + 1
return n
def pop(self):
last = self.tail.prev
prelast = last.prev
prelast.next = self.tail
self.tail.prev = prelast
# update size
self.size = self.size - 1
return last
def remove(self, node):
prev = node.prev
next = node.next
prev.next = next
next.prev = prev
# update size
self.size = self.size - 1
def printList(self):
pointer = self.head.next
s = ""
while(pointer != self.tail):
s = s + str(pointer.val) + " , "
pointer = pointer.next
print s
class LRUCache:
# @param capacity, an integer
def __init__(self, capacity):
self.memory = LinkedList()
self.capacity = capacity
self.keys = {}
# @return an integer
def get(self, key):
if self.keys.has_key(key):
# add key to the start of the list
return self.refreshKey(key).val
else:
return -1
# @param key, an integer
# @param value, an integer
# @return nothing
def set(self, key, value):
if self.keys.has_key(key):
oldNode = self.refreshKey(key)
oldNode.val = value
else:
if self.memory.size == self.capacity:
invalidatedNode = self.memory.pop()
invalidatedkey = invalidatedNode.key
self.keys.pop(invalidatedkey, None)
# insert new key/value
node = Node(key, value)
self.memory.push(node)
self.keys[key] = node
def refreshKey(self, key):
node = self.keys[key]
self.memory.remove(node)
self.memory.push(node)
return node
l = LRUCache(1)
l.set(2,1)
print l.get(2)
l.set(3,2)
print l.get(2)
print l.get(3)
| {
"content_hash": "0177c8b2a0cf69fe8ed225ccc80f4200",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 192,
"avg_line_length": 20.59016393442623,
"alnum_prop": 0.6564490445859873,
"repo_name": "Ahmed--Mohsen/leetcode",
"id": "2d2a95cd469923bc21eea75c86eaa96113423d61",
"size": "2512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lru_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317482"
}
],
"symlink_target": ""
} |
"""A few things that didn't seem to fit anywhere else."""
import os
import os.path
import pwd
import tempfile
import fcntl
import errno
import threading
import subprocess
import shutil
import sys
import signal
import logger
PID_FILE = '/var/run/nodemanager.pid'
####################
def get_default_if():
interface = get_if_from_hwaddr(get_hwaddr_from_plnode())
if not interface:
interface = "eth0"
return interface
def get_hwaddr_from_plnode():
try:
for line in open("/usr/boot/plnode.txt", 'r').readlines():
if line.startswith("NET_DEVICE"):
return line.split("=")[1].strip().strip('"')
except:
pass
return None
def get_if_from_hwaddr(hwaddr):
import sioc
devs = sioc.gifconf()
for dev in devs:
dev_hwaddr = sioc.gifhwaddr(dev)
if dev_hwaddr == hwaddr:
return dev
return None
####################
# daemonizing
def as_daemon_thread(run):
"""
Call function <run> with no arguments in its own thread.
"""
thr = threading.Thread(target=run)
thr.setDaemon(True)
thr.start()
def close_nonstandard_fds():
"""
Close all open file descriptors other than 0, 1, and 2.
"""
_SC_OPEN_MAX = 4
for fd in range(3, os.sysconf(_SC_OPEN_MAX)):
try:
os.close(fd)
except OSError:
pass # most likely an fd that isn't open
# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
def daemon():
"""
Daemonize the current process.
"""
if os.fork() != 0:
os._exit(0)
os.setsid()
if os.fork() != 0:
os._exit(0)
os.chdir('/')
os.umask(0o022)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
crashlog = os.open('/var/log/nodemanager.daemon',
os.O_RDWR | os.O_APPEND | os.O_CREAT, 0o644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
def fork_as(su, function, *args):
"""
fork(), cd / to avoid keeping unused directories open, close all nonstandard
file descriptors (to avoid capturing open sockets), fork() again (to avoid
zombies) and call <function> with arguments <args> in the grandchild
process. If <su> is not None, set our group and user ids appropriately in
the child process.
"""
child_pid = os.fork()
if child_pid == 0:
try:
os.chdir('/')
close_nonstandard_fds()
if su:
pw_ent = pwd.getpwnam(su)
os.setegid(pw_ent[3])
os.seteuid(pw_ent[2])
child_pid = os.fork()
if child_pid == 0:
function(*args)
except:
os.seteuid(os.getuid()) # undo su so we can write the log file
os.setegid(os.getgid())
logger.log_exc("tools: fork_as")
os._exit(0)
else:
os.waitpid(child_pid, 0)
####################
# manage files
def pid_file():
"""
We use a pid file to ensure that only one copy of NM is running at a given
time. If successful, this function will write a pid file containing the pid
of the current process. The return value is the pid of the other running
process, or None otherwise.
"""
other_pid = None
if os.access(PID_FILE, os.F_OK): # check for a pid file
handle = open(PID_FILE) # pid file exists, read it
other_pid = int(handle.read())
handle.close()
# check for a process with that pid by sending signal 0
try:
os.kill(other_pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
other_pid = None # doesn't exist
else:
raise # who knows
if other_pid == None:
# write a new pid file
write_file(PID_FILE, lambda f: f.write(str(os.getpid())))
return other_pid
def write_file(filename, do_write, **kw_args):
"""
Write file <filename> atomically by opening a temporary file,
using <do_write> to write that file, and then renaming the temporary file.
"""
shutil.move(write_temp_file(do_write, **kw_args), filename)
def write_temp_file(do_write, mode=None, uidgid=None, binary=False):
fd, temporary_filename = tempfile.mkstemp()
if mode:
os.chmod(temporary_filename, mode)
if uidgid:
os.chown(temporary_filename, *uidgid)
open_mode = 'wb' if binary else 'w'
f = os.fdopen(fd, open_mode)
try:
do_write(f)
finally:
f.close()
return temporary_filename
def replace_file_with_string(target, new_contents,
chmod=None, remove_if_empty=False):
"""
Replace a target file with a new contents checks for changes: does not do
anything if previous state was already right can handle chmod if requested
can also remove resulting file if contents are void, if requested performs
atomically: writes in a tmp file, which is then renamed (from sliverauth
originally) returns True if a change occurred, or the file is deleted
"""
try:
with open(target) as feed:
current = feed.read()
except:
current = ""
if current == new_contents:
# if turns out to be an empty string, and remove_if_empty is set,
# then make sure to trash the file if it exists
if remove_if_empty and not new_contents and os.path.isfile(target):
logger.verbose(
"tools.replace_file_with_string: removing file {}".format(target))
try:
os.unlink(target)
finally:
return True
return False
# overwrite target file: create a temp in the same directory
path = os.path.dirname(target) or '.'
fd, name = tempfile.mkstemp('', 'repl', path)
os.write(fd, new_contents.encode())
os.close(fd)
if os.path.exists(target):
os.unlink(target)
shutil.move(name, target)
if chmod:
os.chmod(target, chmod)
return True
####################
# utilities functions to get (cached) information from the node
# get node_id from /etc/planetlab/node_id and cache it
_node_id = None
def node_id():
global _node_id
if _node_id is None:
try:
with open("/etc/planetlab/node_id") as f:
_node_id = int(f.read())
except:
_node_id = ""
return _node_id
_root_context_arch = None
def root_context_arch():
global _root_context_arch
if not _root_context_arch:
sp = subprocess.Popen(["uname", "-i"], stdout=subprocess.PIPE)
(_root_context_arch, _) = sp.communicate()
_root_context_arch = _root_context_arch.strip()
return _root_context_arch
####################
class NMLock:
def __init__(self, file):
logger.log("tools: Lock {} initialized.".format(file), 2)
self.fd = os.open(file, os.O_RDWR | os.O_CREAT, 0o600)
flags = fcntl.fcntl(self.fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fd, fcntl.F_SETFD, flags)
def __del__(self):
os.close(self.fd)
def acquire(self):
logger.log("tools: Lock acquired.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_SH)
def release(self):
logger.log("tools: Lock released.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_UN)
####################
# Utilities for getting the IP address of a LXC/Openvswitch slice. Do this by
# running ifconfig inside of the slice's context.
def get_sliver_process(slice_name, process_cmdline):
"""
Utility function to find a process inside of an LXC sliver. Returns
(cgroup_fn, pid). cgroup_fn is the filename of the cgroup file for
the process, for example /proc/2592/cgroup. Pid is the process id of
the process. If the process is not found then (None, None) is returned.
"""
try:
cmd = 'grep {} /proc/*/cgroup | grep freezer'.format(slice_name)
output = os.popen(cmd).readlines()
except:
# the slice couldn't be found
logger.log(
"get_sliver_process: couldn't find slice {}".format(slice_name))
return (None, None)
cgroup_fn = None
pid = None
for e in output:
try:
l = e.rstrip()
path = l.split(':')[0]
comp = l.rsplit(':')[-1]
slice_name_check = comp.rsplit('/')[-1]
# the lines below were added by Guilherme <[email protected]>
# due to the LXC requirements
# What we have to consider here is that libervirt on Fedora 18
# uses the following line:
# /proc/1253/cgroup:6:freezer:/machine.slice/auto_sirius.libvirt-lxc
# While the libvirt on Fedora 20 and 21 uses the following line:
# /proc/1253/cgroup:6:freezer:/machine.slice/machine-lxc\x2del_sirius.scope
# Further documentation on:
# https://libvirt.org/cgroups.html#systemdScope
virt = get_node_virt()
if virt == 'lxc':
# This is for Fedora 20 or later
regexf20orlater = re.compile(r'machine-lxc\\x2d(.+).scope')
isf20orlater = regexf20orlater.search(slice_name_check)
if isf20orlater:
slice_name_check = isf20orlater.group(1)
else:
# This is for Fedora 18
slice_name_check = slice_name_check.rsplit('.')[0]
if (slice_name_check == slice_name):
slice_path = path
pid = slice_path.split('/')[2]
with open('/proc/{}/cmdline'.format(pid)) as cmdfile:
cmdline = cmdfile.read().rstrip('\n\x00')
if (cmdline == process_cmdline):
cgroup_fn = slice_path
break
except:
break
if (not cgroup_fn) or (not pid):
logger.log("get_sliver_process: process {} not running in slice {}"
.format(process_cmdline, slice_name))
return (None, None)
return (cgroup_fn, pid)
###################################################
# Added by Guilherme Sperb Machado <[email protected]>
###################################################
try:
import re
import socket
import fileinput
except:
logger.log("Could not import 're', 'socket', or 'fileinput' python packages.")
# TODO: is there anything better to do if the "libvirt", "sliver_libvirt",
# and "sliver_lxc" are not in place?
try:
import libvirt
from sliver_libvirt import Sliver_Libvirt
import sliver_lxc
except:
logger.log("Could not import 'sliver_lxc' or 'libvirt' or 'sliver_libvirt'.")
###################################################
def get_sliver_ifconfig(slice_name, device="eth0"):
"""
return the output of "ifconfig" run from inside the sliver.
side effects: adds "/usr/sbin" to sys.path
"""
# See if setns is installed. If it's not then we're probably not running
# LXC.
if not os.path.exists("/usr/sbin/setns.so"):
return None
# setns is part of lxcsu and is installed to /usr/sbin
if not "/usr/sbin" in sys.path:
sys.path.append("/usr/sbin")
import setns
(cgroup_fn, pid) = get_sliver_process(slice_name, "/sbin/init")
if (not cgroup_fn) or (not pid):
return None
path = '/proc/{}/ns/net'.format(pid)
result = None
try:
setns.chcontext(path)
args = ["/sbin/ifconfig", device]
sub = subprocess.Popen(
args, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
sub.wait()
if (sub.returncode != 0):
logger.log("get_slice_ifconfig: error in ifconfig: {}".format(
sub.stderr.read()))
result = sub.stdout.read()
finally:
setns.chcontext("/proc/1/ns/net")
return result
def get_sliver_ip(slice_name):
ifconfig = get_sliver_ifconfig(slice_name)
if not ifconfig:
return None
for line in ifconfig.split("\n"):
if "inet addr:" in line:
# example: ' inet addr:192.168.122.189 Bcast:192.168.122.255 Mask:255.255.255.0'
parts = line.strip().split()
if len(parts) >= 2 and parts[1].startswith("addr:"):
return parts[1].split(":")[1]
return None
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# Get the slice ipv6 address
# Only for LXC!
###################################################
def get_sliver_ipv6(slice_name):
ifconfig = get_sliver_ifconfig(slice_name)
if not ifconfig:
return None, None
# example: 'inet6 2001:67c:16dc:1302:5054:ff:fea7:7882 prefixlen 64 scopeid 0x0<global>'
prog = re.compile(
r'inet6\s+(.*)\s+prefixlen\s+(\d+)\s+scopeid\s+(.+)<global>')
for line in ifconfig.split("\n"):
search = prog.search(line)
if search:
ipv6addr = search.group(1)
prefixlen = search.group(2)
return (ipv6addr, prefixlen)
return None, None
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# Check if the address is a AF_INET6 family address
###################################################
def is_valid_ipv6(ipv6addr):
try:
socket.inet_pton(socket.AF_INET6, ipv6addr)
except socket.error:
return False
return True
# this returns the kind of virtualization on the node
# either 'vs' or 'lxc'
# also caches it in /etc/planetlab/virt for next calls
# could be promoted to core nm if need be
virt_stamp = "/etc/planetlab/virt"
def get_node_virt():
try:
with open(virt_stamp) as f:
return f.read().strip()
except:
pass
logger.log("Computing virt..")
try:
virt = 'vs' if subprocess.call(['vserver', '--help']) == 0 else 'lxc'
except:
virt = 'lxc'
with open(virt_stamp, "w") as f:
f.write(virt)
return virt
# this return True or False to indicate that systemctl is present on that box
# cache result in memory as _has_systemctl
_has_systemctl = None
def has_systemctl():
global _has_systemctl
if _has_systemctl is None:
_has_systemctl = (subprocess.call(['systemctl', '--help']) == 0)
return _has_systemctl
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# This method was developed to support the ipv6 plugin
# Only for LXC!
###################################################
def reboot_slivers():
type = 'sliver.LXC'
# connecting to the libvirtd
connLibvirt = Sliver_Libvirt.getConnection(type)
domains = connLibvirt.listAllDomains()
for domain in domains:
try:
# set the flag VIR_DOMAIN_REBOOT_INITCTL, which uses "initctl"
result = domain.reboot(0x04)
if result == 0:
logger.log("tools: REBOOT {}".format(domain.name()))
else:
raise Exception()
except Exception as e:
logger.log("tools: FAILED to reboot {} ({})".format(
domain.name(), e))
logger.log(
"tools: Trying to DESTROY/CREATE {} instead...".format(domain.name()))
try:
result = domain.destroy()
if result == 0:
logger.log("tools: DESTROYED {}".format(domain.name()))
else:
logger.log(
"tools: FAILED in the DESTROY call of {}".format(domain.name()))
result = domain.create()
if result == 0:
logger.log("tools: CREATED {}".format(domain.name()))
else:
logger.log(
"tools: FAILED in the CREATE call of {}".format(domain.name()))
except Exception as e:
logger.log(
"tools: FAILED to DESTROY/CREATE {} ({})".format(domain.name(), e))
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# Get the /etc/hosts file path
###################################################
def get_hosts_file_path(slicename):
containerDir = os.path.join(sliver_lxc.Sliver_LXC.CON_BASE_DIR, slicename)
return os.path.join(containerDir, 'etc', 'hosts')
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# Search if there is a specific ipv6 address in the
# /etc/hosts file of a given slice
# If the parameter 'ipv6addr' is None, then search
# for any ipv6 address
###################################################
def search_ipv6addr_hosts(slicename, ipv6addr):
hostsFilePath = get_hosts_file_path(slicename)
found = False
try:
for line in fileinput.input(r'{}'.format(hostsFilePath)):
if ipv6addr is not None:
if re.search(r'{}'.format(ipv6addr), line):
found = True
else:
search = re.search(r'^(.*)\s+.*$', line)
if search:
ipv6candidate = search.group(1)
ipv6candidatestrip = ipv6candidate.strip()
valid = is_valid_ipv6(ipv6candidatestrip)
if valid:
found = True
fileinput.close()
return found
except:
logger.log("tools: FAILED to search {} in /etc/hosts file of slice={}"
.format(ipv6addr, slicename))
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# Removes all ipv6 addresses from the /etc/hosts
# file of a given slice
###################################################
def remove_all_ipv6addr_hosts(slicename, node):
hostsFilePath = get_hosts_file_path(slicename)
try:
for line in fileinput.input(r'{}'.format(hostsFilePath), inplace=True):
search = re.search(
r'^(.*)\s+({}|{})$'.format(node, 'localhost'), line)
if search:
ipv6candidate = search.group(1)
ipv6candidatestrip = ipv6candidate.strip()
valid = is_valid_ipv6(ipv6candidatestrip)
if not valid:
print(line, end=' ')
fileinput.close()
logger.log("tools: REMOVED IPv6 address from /etc/hosts file of slice={}"
.format(slicename))
except:
logger.log("tools: FAILED to remove the IPv6 address from /etc/hosts file of slice={}"
.format(slicename))
###################################################
# Author: Guilherme Sperb Machado <[email protected]>
###################################################
# Adds an ipv6 address to the /etc/hosts file within a slice
###################################################
def add_ipv6addr_hosts_line(slicename, node, ipv6addr):
hostsFilePath = get_hosts_file_path(slicename)
logger.log("tools: {}".format(hostsFilePath))
# debugging purposes:
#string = "127.0.0.1\tlocalhost\n192.168.100.179\tmyplc-node1-vm.mgmt.local\n"
#string = "127.0.0.1\tlocalhost\n"
try:
with open(hostsFilePath, "a") as file:
file.write(ipv6addr + " " + node + "\n")
file.close()
logger.log("tools: ADDED IPv6 address to /etc/hosts file of slice={}"
.format(slicename))
except:
logger.log("tools: FAILED to add the IPv6 address to /etc/hosts file of slice={}"
.format(slicename))
# how to run a command in a slice
# now this is a painful matter
# the problem is with capsh that forces a bash command to be injected in its exec'ed command
# so because lxcsu uses capsh, you cannot exec anything else than bash
# bottom line is, what actually needs to be called is
# vs: vserver exec slicename command and its arguments
# lxc: lxcsu slicename "command and its arguments"
# which, OK, is no big deal as long as the command is simple enough,
# but do not stretch it with arguments that have spaces or need quoting as that will become a nightmare
def command_in_slice(slicename, argv):
virt = get_node_virt()
if virt == 'vs':
return ['vserver', slicename, 'exec', ] + argv
elif virt == 'lxc':
# wrap up argv in a single string for -c
return ['lxcsu', slicename, ] + [" ".join(argv)]
logger.log("command_in_slice: WARNING: could not find a valid virt")
return argv
####################
def init_signals():
def handler(signum, frame):
logger.log("Received signal {} - exiting".format(signum))
os._exit(1)
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
| {
"content_hash": "3459c2b54d33e46501bb8188b508a780",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 103,
"avg_line_length": 32.78153846153846,
"alnum_prop": 0.5556598460672048,
"repo_name": "dreibh/planetlab-lxc-nodemanager",
"id": "ba6e44a33b9ba8962f0174b2713a755a15286223",
"size": "21336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3519"
},
{
"name": "Makefile",
"bytes": "4704"
},
{
"name": "Python",
"bytes": "314431"
},
{
"name": "Shell",
"bytes": "2429"
}
],
"symlink_target": ""
} |
"""Collection of classifiers intended to work with text data."""
import numpy as np
from sklearn.base import BaseEstimator
from dstoolbox.data import load_w2v_format
from dstoolbox.utils import normalize_matrix
from dstoolbox.utils import fast_argsort
class W2VClassifier(BaseEstimator):
"""Word2Vec classifier that requires pre-trained word vectors.
This classifier implements the `kneighbors` interface from
scikit-learn so that it can be used similarly to a
KNeighborsClassifier & Co. It also partly re-implements some of
the `gensim.models.Word2Vec` method.
Parameters
----------
path_w2v : str
Filename of w2v data exported with gensim (first row dimensons
of data in file, each row space-separated word vectors with word
as string at the beginning.)
topn : int (default=10)
The number of similar words to return by default
"""
def __init__(self, path_w2v, topn=10):
self.fname = path_w2v
self.topn = topn
# pylint: disable=attribute-defined-outside-init,unused-argument
def fit(self, X=None, y=None):
"""Load word2vec data.
Parameters
----------
Requires no parameters.
Attributes
----------
word2idx_, dict{str: int}
A dictionary to look up the row number of the
representation vector for a given word.
syn0_: numpy.array 2d
Contains the vector represantation for words in every row. The
row number for a given word can be found with `word2idx`.
classes_: numpy.array 2d
Holds the label for each class.
"""
word2idx, syn0 = load_w2v_format(self.fname)
self.word2idx_ = word2idx
self.classes_ = np.array(sorted(word2idx, key=word2idx.get))
self.syn0_ = normalize_matrix(syn0)
return self
def _get_vector_from_word(self, word):
# return word2vec embedding for a given word
return self.syn0_[self.word2idx_[word]]
def _update_vocabulary(self, word, vector):
# add a new word and embedding to existing ones
self.word2idx_[word] = self.syn0_.shape[0]
self.classes_ = np.array(self.classes_.tolist() + [word])
if vector.ndim == 1:
vector = vector.reshape(1, -1)
self.syn0_ = np.vstack((self.syn0_, vector))
def _add_word_vectors(self, positive):
# compute the normalized mean of several word embeddings
vectors = [self._get_vector_from_word(word) for word in positive]
vectors = np.mean(vectors, axis=0).reshape(1, -1)
normalized = normalize_matrix(vectors)
return normalized
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each
point.
Parameters
----------
X: numpy.array 1D
Array of strings containing the words whose neighbors should
be determined.
n_neighbors: int
Number of neighbors to get (default is the value passed to
the constructor).
return_distance: boolean (default=True)
If False, distances will not be returned.
Returns
-------
neighbors: numpy.array
Indices of the nearest points in the population matrix.
distances: numpy.array (optional)
Array representing the lengths to points, only present if
`return_distance=True`.
"""
n_neighbors = n_neighbors or self.topn
n_neighbors = min(n_neighbors, self.syn0_.shape[0] - 1)
neighbors, similarities = [], []
for x in X:
xvec = self._get_vector_from_word(x)
similarity = np.dot(self.syn0_, xvec.T)
# Throw away the smallest index, since it is the initial
# word itself.
# pylint: disable=invalid-unary-operand-type
neighbor_indices = fast_argsort(-similarity, n_neighbors + 1)[1:]
neighbors.append(neighbor_indices)
similarities.append(similarity[neighbor_indices])
neighbors = np.vstack(neighbors)
if not return_distance:
return neighbors
# normalize distances to [0, 1]
distances = (np.vstack(similarities) - 1) / -2.
return neighbors, distances
# pylint: disable=dangerous-default-value
def most_similar(self, positive=[], negative=[], topn=10):
"""Find the top-N most similar words.
(verbatim from gensim)
This method computes cosine similarity between a simple mean
of the projection weight vectors of the given words and the
vectors for each word in the model. The method corresponds to
the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive: str or list of str (default=[])
Word(s) whose embedding(s) contribute(s) positively.
negative: str or list of str (default=[])
Word(s) whose embedding(s) contribute(s) negatively. It is
currently NOT IMPLEMENTED.
topn: int (default=10)
Number of similar words to return.
Returns
-------
results: list of tuples (str, float)
The `topn` most similar words in a list with corresponding
similarity measure.
"""
if negative:
raise NotImplementedError(
"The `negative` parameter is not yet supported.")
if not positive:
raise ValueError("No words provided to compute similarity.")
if isinstance(positive, str):
positive = [positive]
if len(positive) > 1:
joined = ' '.join(positive)
if joined not in self.word2idx_:
# update vocabulary to contain composite word
vector = self._add_word_vectors(positive)
self._update_vocabulary(joined, vector)
positive = [joined]
neighbor_indices, distances = self.kneighbors(positive, topn)
neighbors = self.classes_[neighbor_indices]
similarities = 1.0 - distances
results = list(zip(neighbors[0], similarities[0]))
return results
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X: numpy.array 1d
Array containing word for each sample.
Returns
-------
y_pred: numpy.array 1d
Array containing the class labels for each sample.
"""
neighbors = self.kneighbors(X, n_neighbors=1, return_distance=False)
y_pred = neighbors.flatten()
return y_pred
def predict_proba(self, X):
"""This method is not implemented."""
raise NotImplementedError("`predict_proba` does not exist for this "
"classifier.")
| {
"content_hash": "804094141eba3994bdb256b3baeca0ec",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 77,
"avg_line_length": 33.55980861244019,
"alnum_prop": 0.6099230111206159,
"repo_name": "ottogroup/dstoolbox",
"id": "f8c11d0ff59f10e6f8508f8aec74f657873be73f",
"size": "7014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dstoolbox/models/text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "452043"
},
{
"name": "Python",
"bytes": "158090"
},
{
"name": "Shell",
"bytes": "440"
}
],
"symlink_target": ""
} |
import flask_login
from flask_mail import Mail
from flask import current_app
from flask_migrate import Migrate
from flask.ext.sqlalchemy import SQLAlchemy
from celery import Celery
from celery import task as ctask
from flask_assets import Environment
from app.config import task
login_manager = flask_login.LoginManager()
sqlalchemy = SQLAlchemy()
migrate = Migrate()
mail = Mail()
assets = Environment()
task_server = Celery(__name__, broker=task.CELERY_BROKER_URL)
task_server.config_from_object(task) | {
"content_hash": "41d440e65da471271ef531d651283ed4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 61,
"avg_line_length": 23.136363636363637,
"alnum_prop": 0.7917485265225933,
"repo_name": "atulmishra-one/dairy_manager",
"id": "f9cd3bf3cb2f23def83fc80af84e8557de92490f",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/services/extension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28650"
},
{
"name": "HTML",
"bytes": "50034"
},
{
"name": "JavaScript",
"bytes": "170466"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "35873"
}
],
"symlink_target": ""
} |
def capitals_first(string):
return ' '.join(sorted((a for a in string.split() if a[0].isalpha()),
key=str.islower))
| {
"content_hash": "8440e0052d654e7dab7bff6a40926e9a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 73,
"avg_line_length": 49,
"alnum_prop": 0.54421768707483,
"repo_name": "the-zebulan/CodeWars",
"id": "c3af4fa1ff425215b5c6ddb6ffb96d3f93c8950c",
"size": "147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/beta/capitals_first.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
import warnings
warnings.filterwarnings("ignore")
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import sys,os
import subprocess
import numpy as np
import random
import time
import cv2 as cv
import pyfits
from pyfits import getheader
import multiprocessing, Queue
import ctypes
class Worker(multiprocessing.Process):
def __init__(self, work_queue, result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
i_range, psf_file = self.work_queue.get_nowait()
except Queue.Empty:
break
# the actual processing
print "Adding artificial stars - index range=", i_range
radius=16
x_c,y_c=( (psf_size[1]-1)/2, (psf_size[2]-1)/2 )
x,y=np.meshgrid(np.arange(psf_size[1])-x_c,np.arange(psf_size[2])-y_c)
distance = np.sqrt(x**2 + y**2)
for i in range(i_range[0],i_range[1]):
psf_xy=np.zeros(psf_size[1:3], dtype=float)
j=0
for i_order in range(psf_order+1):
j_order=0
while (i_order+j_order < psf_order+1):
psf_xy += psf_data[j,:,:] * ((mock_y[i]-psf_offset[1])/psf_scale[1])**i_order * ((mock_x[i]-psf_offset[0])/psf_scale[0])**j_order
j_order+=1
j+=1
psf_factor=10.**( (30.-mock_mag[i])/2.5)/np.sum(psf_xy)
psf_xy *= psf_factor
npsf_xy=cv.resize(psf_xy,(npsf_size[0],npsf_size[1]),interpolation=cv.INTER_LANCZOS4)
npsf_factor=10.**( (30.-mock_mag[i])/2.5)/np.sum(npsf_xy)
npsf_xy *= npsf_factor
im_rangex=[max(mock_x[i]-npsf_size[1]/2,0), min(mock_x[i]-npsf_size[1]/2+npsf_size[1], im_size[1])]
im_rangey=[max(mock_y[i]-npsf_size[0]/2,0), min(mock_y[i]-npsf_size[0]/2+npsf_size[0], im_size[0])]
npsf_rangex=[max(-1*(mock_x[i]-npsf_size[1]/2),0), min(-1*(mock_x[i]-npsf_size[1]/2-im_size[1]),npsf_size[1])]
npsf_rangey=[max(-1*(mock_y[i]-npsf_size[0]/2),0), min(-1*(mock_y[i]-npsf_size[0]/2-im_size[0]),npsf_size[0])]
im_data[im_rangey[0]:im_rangey[1], im_rangex[0]:im_rangex[1]] += npsf_xy[npsf_rangey[0]:npsf_rangey[1], npsf_rangex[0]:npsf_rangex[1]]
print 'Done'
self.result_queue.put(id)
# store the result
class Worker_sex(multiprocessing.Process):
def __init__(self, work_queue, result_queue):
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
def run(self):
while not self.kill_received:
# get a task
try:
i_thread, i_range = self.work_queue.get_nowait()
except Queue.Empty:
break
fwhm=1.0
pixel_scale=0.263
weight_type='MAP_WEIGHT'
checkimage_type='NONE'
checkimage_file='NONE'
satur_level=4.3e5
analysis_thresh=2.0
detect_minarea=3
detect_thresh=1.4
phot_apertures=",".join(["%.2f" % x for x in 2*np.array((0.5,1.,1.5,2.,2.5,3.,3.5,4.,4.5,5.))*fwhm/pixel_scale])
filter_name='sex_config/gauss_3.0_7x7.conv'
xml_name='survey_sex.xml'
# the actual processing
log_file="survey_completeness_sex_thread%d.log" % i_thread
for i in range(i_range[0],i_range[1]):
command = "sex %s -c sex_config/ctio_decam.sex -PARAMETERS_NAME sex_config/ctio_decam_psfmodel.param -CATALOG_TYPE FITS_LDAC -CATALOG_NAME %s -SEEING_FWHM %.2f -WEIGHT_TYPE %s -WEIGHT_THRESH 0. -WEIGHT_IMAGE %s -CHECKIMAGE_TYPE %s -CHECKIMAGE_NAME %s -SATUR_LEVEL %d -BACKPHOTO_TYPE LOCAL -BACKPHOTO_THICK 30 -BACK_SIZE 250 -BACK_FILTERSIZE 3 -MASK_TYPE CORRECT -ANALYSIS_THRESH %.2f -DETECT_MINAREA %d -DETECT_THRESH %.2f -DEBLEND_MINCONT 0.0000001 -INTERP_TYPE ALL -INTERP_MAXXLAG 1 -INTERP_MAXYLAG 1 -FLAG_TYPE OR -FLAG_IMAGE %s -PHOT_AUTOPARAMS 2.3,4.0 -PHOT_FLUXFRAC 0.5 -PHOT_APERTURES %s -PIXEL_SCALE %.4f -FILTER Y -FILTER_NAME %s -WRITE_XML Y -XML_NAME %s -PSF_NAME %s -PSF_NMAX 1" % (mock_im_file[i], sex_cat_file[i], fwhm, weight_type, weight_file[i], checkimage_type, checkimage_file, satur_level, analysis_thresh, detect_minarea, detect_thresh, flag_file[i], phot_apertures, pixel_scale, filter_name, xml_name, psf_file[i] )
print command
with open(log_file, "a") as log:
result=subprocess.call(command, stderr=log, stdout=log, shell=True)
log.close()
print "SExtractor thread: %d - iteration: %d is done!" % (i_thread, i)
self.result_queue.put(id)
if __name__ == "__main__":
n_cpu=2
n_core=6
n_processes=n_cpu*n_core*1
input_mock_file=sys.argv[1]
input_data_dtype=np.dtype({'names':['im_file','weight_file','flag_file','psf_file','mock_mag_file','mock_im_file','sex_cat_file'],'formats':['S200','S200','S200','S200','S200','S200','S200']})
input_data=np.loadtxt(input_mock_file, skiprows=1, dtype=input_data_dtype)
im_file=input_data['im_file']
weight_file=input_data['weight_file']
flag_file=input_data['flag_file']
psf_file=input_data['psf_file']
mock_mag_file=input_data['mock_mag_file']
mock_im_file=input_data['mock_im_file']
sex_cat_file=input_data['sex_cat_file']
input_n=im_file.size
n_processes=np.minimum(n_processes,input_n)
print n_processes
for i in range(input_n):
if os.path.exists(mock_im_file[i]):
print "Removing file ", mock_im_file[i]
os.remove(mock_im_file[i])
if os.path.exists(sex_cat_file[i]):
print "Removing file ", sex_cat_file[i]
os.remove(sex_cat_file[i])
# First, add artificial stars
for i in range(0,input_n):
hdulist = pyfits.open(psf_file[i])
psf_h = hdulist[1].header
psf_data = (hdulist[1].data)[0][0]
hdulist.close()
psf_order=psf_h['POLDEG1']
psf_offset=[psf_h['POLZERO1'],psf_h['POLZERO2']]
psf_scale=[psf_h['POLSCAL1'],psf_h['POLSCAL2']]
psf_pixstep=psf_h['PSF_SAMP']
psf_size=psf_data.shape
npsf_size=(np.array(psf_size[1:3])*psf_pixstep).astype(int)
mock_data=np.loadtxt(mock_mag_file[i], skiprows=1)
mock_n=mock_data[:,0].size
mock_sort=np.argsort(mock_data[:,1])
mock_x=mock_data[mock_sort,0]
mock_y=mock_data[mock_sort,1]
mock_mag=mock_data[mock_sort,2]
print "Reading file ", im_file[i]
hdu=pyfits.open(im_file[i])
data=hdu[0].data
im_size=data.shape
im_data_base = multiprocessing.Array(ctypes.c_float, im_size[0]*im_size[1])
im_data = np.ctypeslib.as_array(im_data_base.get_obj())
im_data = im_data.reshape(im_size[0], im_size[1])
im_data[:] = data
data=0
assert im_data.base.base is im_data_base.get_obj()
# run
# load up work queue
tic=time.time()
j_step=np.int(np.ceil( mock_n*1./n_processes ))
j_range=range(0,mock_n,j_step)
j_range.append(mock_n)
work_queue = multiprocessing.Queue()
for j in range(np.size(j_range)-1):
if work_queue.full():
print "Oh no! Queue is full after only %d iterations" % j
work_queue.put( (j_range[j:j+2], psf_file[i]) )
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue()
procs=[]
# spawn workers
for j in range(n_processes):
worker = Worker(work_queue, result_queue)
procs.append(worker)
worker.start()
# collect the results off the queue
for j in range(n_processes):
result_queue.get()
for p in procs:
p.join()
print 'Final Done'
print "Writing file ", mock_im_file[i]
hdu[0].data=im_data
hdu.writeto(mock_im_file[i])
print "%f s for parallel computation." % (time.time() - tic)
# Second, run Sextractor
n_processes=n_cpu*n_core
n_processes=np.minimum(n_processes,input_n)
tic=time.time()
j_step=np.int(np.ceil( input_n*1./n_processes ))
j_range=range(0,input_n,j_step)
j_range.append(input_n)
work_queue = multiprocessing.Queue()
for j in range(np.size(j_range)-1):
if work_queue.full():
print "Oh no! Queue is full after only %d iterations" % j
work_queue.put( (j+1, j_range[j:j+2]) )
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue()
procs=[]
# spawn workers
for j in range(n_processes):
worker = Worker_sex(work_queue, result_queue)
procs.append(worker)
worker.start()
time.sleep(30)
# collect the results off the queue
for j in range(n_processes):
result_queue.get()
for p in procs:
p.join()
| {
"content_hash": "ec23fb9c5228aaf9cd5fc2318c706e7b",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 941,
"avg_line_length": 32.27906976744186,
"alnum_prop": 0.6605427473583093,
"repo_name": "rpmunoz/DECam",
"id": "01ce327943c642863fe63a20c68d2e128741080c",
"size": "8352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "completeness/compute_completeness.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Prolog",
"bytes": "44607"
},
{
"name": "Python",
"bytes": "26306"
}
],
"symlink_target": ""
} |
"""
Created on Sun Jun 07 22:15:15 2015
@author: Paco
"""
from api import API
class Instagram(API):
_class_name = 'Instagram'
_category = 'Picture'
_help_url = 'https://instagram.com/developer/endpoints/'
_version = '1'
_api_url = 'https://api.instagram.com/v' + _version + '/'
def __init__(self,apikey):
self._api_key = apikey
def _parsing_data(self,data):
res = {'latitude':list(),'longitude':list(),'name':list(),'count_com':list(),'com':list(),'count_like':list(),'title':list(),'url':list()}
for d in data['data']:
res['latitude'].append(self._tools.key_test('latitude',d['location'],'float'))
res['longitude'].append(self._tools.key_test('longitude',d['location'],'float'))
res['name'].append(self._tools.key_test('name',d['location']))
res['count_com'].append(self._tools.key_test('count',d['comments'],'int'))
res['com'].append(self._tools.key_test('data',d['comments'],'list'))
res['count_like'].append(self._tools.key_test('count',d['likes'],'int'))
res['title'].append(self._tools.key_test('text',d['caption']))
res['url'].append(self._tools.key_test('link',d))
return res
def _parsing_data2(self,data):
res = {'count':list(),'name':list()}
for d in data['data']:
res['name'].append(self._tools.key_test('name',d))
res['count'].append(self._tools.key_test('media_count',d,'int'))
return res
def _parsing_data3(self,data):
res = {'latitude':list(),'longitude':list(),'name':list()}
for d in data['data']:
res['name'].append(self._tools.key_test('name',d))
res['latitude'].append(self._tools.key_test('latitude',d,'float'))
res['longitude'].append(self._tools.key_test('longitude',d,'float'))
return res
def _parsing_data4(self,data):
res = {'username':list(),'name':list(),'id':list(),'pic':list()}
for d in data['data']:
res['name'].append(self._tools.key_test('full_name',d))
res['username'].append(self._tools.key_test('username',d))
res['id'].append(self._tools.key_test('id',d,'int'))
res['pic'].append(self._tools.key_test('profile_picture',d))
return res
def get_popular(self,limit=10):
url = self._api_url+'media/popular?count='+str(limit)+'&client_id='+self._api_key
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data)
def search_by_coordinates(self,lat=48.858844,lon=2.294351, radius=2, limit=10):
url = self._api_url+'media/search?lat='+str(lat)+'&lng='+str(lon)+'&distance='+str(radius*1000)+'&count='+str(limit)+'&client_id='+self._api_key
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data(data)
def get_tags(self,text=''):
text = text.replace(' ','+')
url = self._api_url+'tags/search?q='+text+'&client_id='+self._api_key
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data2(data)
def search_locations(self,lat=48.858844,lon=2.294351,limit=10):
url = self._api_url+'locations/search?lat='+str(lat)+'&lng='+str(lon)+'&count='+str(limit)+'&client_id='+self._api_key
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data3(data)
def search_users(self,text='',limit=10):
text = text.replace(' ','+')
url = self._api_url+'users/search?q='+text+'&count='+str(limit)+'&client_id='+self._api_key
data = self._tools.data_from_url(url)
self._increment_nb_call()
return self._parsing_data4(data)
| {
"content_hash": "d0cbb6d388157c9d41b18f3402e33b3a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 152,
"avg_line_length": 43.95402298850575,
"alnum_prop": 0.5800209205020921,
"repo_name": "franblas/pyAPI",
"id": "c409704f4d77e250513b0fce59691a39c2752f20",
"size": "3848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyapi/instagram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76708"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import tensorflow as tf
import argparse
from antk.core import config
from antk.core import generic_model
from antk.core import loader
from antk.models import dsaddmodel
def return_parser():
parser = argparse.ArgumentParser(description="For testing")
parser.add_argument("datadir", metavar="DATA_DIRECTORY", type=str,
help="The directory where train, dev, and test data resides. ")
parser.add_argument("config", metavar="CONFIG", type=str,
help="The config file for building the ant architecture.")
parser.add_argument("initrange", metavar="INITRANGE", type=float,
help="A value determining the initial size of the weights.")
parser.add_argument("kfactors", metavar="KFACTORS", type=int,
help="The rank of the low rank factorization.")
parser.add_argument("lamb", metavar="LAMBDA", type=float,
help="The coefficient for l2 regularization")
parser.add_argument("mb", metavar="MINIBATCH", type=int,
help="The size of minibatches for stochastic gradient descent.")
parser.add_argument("learnrate", metavar="LEARNRATE", type=float,
help="The stepsize for gradient descent.")
parser.add_argument("verbose", metavar="VERBOSE", type=bool,
help="Whether or not to print dev evaluations during training.")
parser.add_argument("maxbadcount", metavar="MAXBADCOUNT", type=int,
help="The threshold for early stopping.")
parser.add_argument("epochs", metavar="EPOCHS", type=int,
help="The maximum number of epochs to train for.")
parser.add_argument("modelID", metavar="MODEL_ID", type=int,
help="A unique integer for saving model results during distributed runs model parameters.")
parser.add_argument("random_seed", metavar="RANDOM_SEED", type=int,
help="For reproducible results.")
return parser
if __name__ == '__main__':
args = return_parser().parse_args()
data = loader.read_data_sets(args.datadir,
folders=['train', 'test', 'dev', 'user', 'item'])
data.train.labels['ratings'] = loader.center(data.train.labels['ratings'])
data.dev.labels['ratings'] = loader.center(data.dev.labels['ratings'])
data.user.features['age'] = loader.center(data.user.features['age'])
data.item.features['year'] = loader.center(data.item.features['year'])
data.user.features['age'] = loader.maxnormalize(data.user.features['age'])
data.item.features['year'] = loader.maxnormalize(data.item.features['year'])
x = dsaddmodel.dsadd(data, args.config,
initrange=args.initrange,
kfactors=args.kfactors,
lamb =args.lamb,
mb=args.mb,
learnrate=args.learnrate,
verbose=args.verbose,
maxbadcount=args.maxbadcount,
epochs=args.epochs,
random_seed=args.random_seed,
eval_rate=args.eval_rate)
#print stuff here to file.
| {
"content_hash": "75b246e65e5fad1d37c453f709bea524",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 115,
"avg_line_length": 53.73770491803279,
"alnum_prop": 0.6070774862721171,
"repo_name": "aarontuor/antk",
"id": "f2d3b15359c9e4be4fc5349693b1aa5149a219e2",
"size": "3278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/modelwrappers/dsadd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "47995"
},
{
"name": "Python",
"bytes": "332504"
}
],
"symlink_target": ""
} |
class strToList :
def __init__(self,options,saveState=False):
#self.interest={"[":newList,"]":"closeList"}
# allows for
self.saveState=saveState
ints=["newList","closeList","setState","skipChar"]
posibs={"newList":self.newList ,"closeList":self.closeList ,"setState":self.setState,"skipChar":self.skipChar}
self.posibs=options
self.options={}
for i in options.keys():
if i in ints:
if isinstance(options[i],list):
for k in options[i]:
self.options[k]=posibs[i]
else:
self.options[options[i]]=posibs[i]
#print self.options.keys()
#self.options={"[":self.newList ,"]":self.closeList ,",":self.setState,"\"":self.skipChar}
#print self.options.keys()
def __call__(self,string,val=None):
return self.parseBuilder(val,string)
def setState(self,outList,string):
if self.saveState==False:
outList.append("")
else:
if not outList[-1]=="":
outList.append("")
return self.parseBuilder(outList,string[1::])
def newList(self,outList,string):
if isinstance(outList,list):
temp=[]
i=0
depth=0
for t in string:
if t in self.posibs["newList"]:
depth+=1
elif t in self.posibs["closeList"]:
depth-=1
if depth==0:
break
i+=1
if depth>0:
exit("Unmatched Bracket")
temp=self.parseBuilder(temp,string[1:i])
if len(outList)>0:
if outList[-1]=="":
outList[-1]=temp
else:
outList.append(temp)
else:
outList.append(temp)
return self.parseBuilder(outList,string[i::])
else:
outList=[]
return self.parseBuilder(outList,string[1::])
def closeList(self,outList,string):
return self.parseBuilder(outList,string[1::])
def pushChar(self,outList,string):
if len (outList)>0:
outList[-1]+=string[0]
else:
outList.append(string[0])
return self.parseBuilder(outList,string[1::])
def skipChar(self,outList,string):
return self.parseBuilder(outList,string[1::])
def parseBuilder(self,outList,string):
if len(string)==0:
return outList
if string[0] in self.options.keys():
func=self.options[string[0]]
else:
func=self.pushChar
return func(outList,string)
| {
"content_hash": "f5ad7770b4594361b168584da4aba512",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 118,
"avg_line_length": 34.2375,
"alnum_prop": 0.5151515151515151,
"repo_name": "alexjgriffith/alpha-score",
"id": "b31baf1675da78fe92a7e21677fa9d2974d654c5",
"size": "3122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/strToListClass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8130"
},
{
"name": "Python",
"bytes": "43744"
},
{
"name": "R",
"bytes": "20858"
},
{
"name": "Shell",
"bytes": "5670"
}
],
"symlink_target": ""
} |
from ddosso.handlers import DdossoHandlerMixin
import firenado.conf
import firenado.tornadoweb
from firenado import service
from tornado.auth import FacebookGraphMixin
from tornado.escape import json_encode, json_decode, url_escape
import tornado.web
class FacebookHandlerMixin:
SESSION_KEY = 'facebook_user'
def get_current_user(self):
user_json = self.session.get(self.SESSION_KEY)
if not user_json:
return None
return json_decode(user_json)
#Check https://developers.facebook.com/docs/graph-api/reference/user/picture/
class FacebookRouterHandler(FacebookHandlerMixin,
firenado.tornadoweb.TornadoHandler,
DdossoHandlerMixin):
@firenado.security.authenticated("facebook")
@service.served_by("ddosso.services.SocialLinkService")
def get(self):
errors = {}
facebook_user = self.current_user
next_url = self.session.get("next_url")
if next_url == self.get_rooted_path("sign_up"):
if self.social_link_service.by_handler("Oauth2:Facebook",
facebook_user['id']):
self.session.delete(self.SESSION_KEY)
errors['request'] = ("Este perfil de facebook já está "
"associado a outra conta. Não é possivel "
"associá-lo a um novo usuário.")
self.session.set("errors", errors)
self.redirect(self.session.get("next_url"))
class FacebookGraphAuthHandler(FacebookHandlerMixin,
firenado.tornadoweb.TornadoHandler,
FacebookGraphMixin, DdossoHandlerMixin):
@tornado.web.asynchronous
def get(self):
self.settings['facebook_api_key'] = self.component.conf[
'social']['facebook']['key']
self.settings['facebook_secret'] = self.component.conf[
'social']['facebook']['secret']
fb_url = firenado.conf.app['login']['urls']['facebook']
my_url = "%s://%s%s?next=%s" % (self.request.protocol,
self.request.host, fb_url,
url_escape(self.get_argument("next",
"/")))
if self.get_argument("code", False):
self.get_authenticated_user(
redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
callback=self._on_auth)
return
self.authorize_redirect(redirect_uri=my_url,
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "user_posts"})
@tornado.web.asynchronous
def _on_auth(self, user):
print(user)
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
self.session.set(self.SESSION_KEY, json_encode(user))
self.redirect(self.get_rooted_path("/facebook/authorize"))
| {
"content_hash": "3ac5d5cacf08bce4a335ce780b46df1d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 39.76543209876543,
"alnum_prop": 0.5638000620925179,
"repo_name": "piraz/ddosso",
"id": "bac849c0bf5c85efce3622cb518b9e5dc0013af8",
"size": "3828",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ddosso/components/facebook/handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2056"
},
{
"name": "HTML",
"bytes": "39933"
},
{
"name": "JavaScript",
"bytes": "14827"
},
{
"name": "Python",
"bytes": "128068"
}
],
"symlink_target": ""
} |
from base import BaseTest
import json
import os
import shutil
import subprocess
class Test(BaseTest):
def test_base(self):
"""
Basic test with exiting Mockbeat normally
"""
self.render_config_template(
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("Setup Beat"))
proc.check_kill_and_wait()
def test_no_config(self):
"""
Tests starting without a config
"""
exit_code = self.run_beat()
assert exit_code == 1
assert self.log_contains("error loading config file") is True
def test_invalid_config(self):
"""
Checks stop on invalid config
"""
shutil.copy(self.beat_path + "/tests/files/invalid.yml",
os.path.join(self.working_dir, "invalid.yml"))
exit_code = self.run_beat(config="invalid.yml")
assert exit_code == 1
assert self.log_contains("error loading config file") is True
def test_invalid_config_cli_param(self):
"""
Checks CLI overwrite actually overwrites some config variable by
writing an invalid value.
"""
self.render_config_template(
console={"pretty": "false"}
)
# first run with default config, validating config being
# actually correct.
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("Setup Beat"))
proc.check_kill_and_wait()
# start beat with invalid config setting on command line
exit_code = self.run_beat(
extra_args=["-E", "output.console=invalid"])
assert exit_code == 1
assert self.log_contains("error unpacking config data") is True
def test_config_test(self):
"""
Checks if -configtest works as expected
"""
shutil.copy(self.beat_path + "/_meta/config.yml",
os.path.join(self.working_dir, "libbeat.yml"))
with open(self.working_dir + "/mockbeat.template.json", "w") as f:
f.write('{"template": true}')
with open(self.working_dir + "/mockbeat.template-es2x.json", "w") as f:
f.write('{"template": true}')
exit_code = self.run_beat(
config="libbeat.yml",
extra_args=["-configtest",
"-path.config", self.working_dir])
assert exit_code == 0
assert self.log_contains("Config OK") is True
def test_version_simple(self):
"""
Tests -version prints a version and exits.
"""
self.start_beat(extra_args=["-version"]).check_wait()
assert self.log_contains("beat version") is True
def test_version(self):
"""
Checks if version param works
"""
args = [self.beat_path + "/libbeat.test"]
args.extend(["-version",
"-e",
"-systemTest",
"-v",
"-d", "*",
"-test.coverprofile",
os.path.join(self.working_dir, "coverage.cov")
])
assert self.log_contains("error loading config file") is False
with open(os.path.join(self.working_dir, "mockbeat.log"), "wb") \
as outputfile:
proc = subprocess.Popen(args,
stdout=outputfile,
stderr=subprocess.STDOUT)
exit_code = proc.wait()
assert exit_code == 0
assert self.log_contains("mockbeat") is True
assert self.log_contains("version") is True
assert self.log_contains("9.9.9") is True
def test_console_output_timed_flush(self):
"""
outputs/console - timed flush
"""
self.render_config_template(
console={"pretty": "false"}
)
proc = self.start_beat(logging_args=["-e"])
self.wait_until(lambda: self.log_contains("Mockbeat is alive"),
max_timeout=2)
proc.check_kill_and_wait()
def test_console_output_size_flush(self):
"""
outputs/console - size based flush
"""
self.render_config_template(
console={
"pretty": "false",
"bulk_max_size": 1,
"flush_interval": "1h"
}
)
proc = self.start_beat(logging_args=["-e"])
self.wait_until(lambda: self.log_contains("Mockbeat is alive"),
max_timeout=2)
proc.check_kill_and_wait()
def test_logging_metrics(self):
self.render_config_template(
metrics_period="0.1s"
)
proc = self.start_beat(logging_args=["-e"])
self.wait_until(
lambda: self.log_contains("Non-zero metrics in the last 100ms:"),
max_timeout=2)
proc.check_kill_and_wait()
self.wait_until(
lambda: self.log_contains("Total non-zero values:"),
max_timeout=2)
def test_persistent_uuid(self):
self.render_config_template()
# run starts and kills the beat, reading the meta file while
# the beat is alive
def run():
proc = self.start_beat(extra_args=["-path.home", self.working_dir])
self.wait_until(lambda: self.log_contains("Mockbeat is alive"),
max_timeout=2)
# open meta file before killing the beat, checking the file being
# available right after startup
metaFile = os.path.join(self.working_dir, "data", "meta.json")
with open(metaFile) as f:
meta = json.loads(f.read())
proc.check_kill_and_wait()
return meta
meta0 = run()
assert self.log_contains("Beat UUID: {}".format(meta0["uuid"]))
# remove log, restart beat and check meta file did not change
# and same UUID is used in log output.
os.remove(os.path.join(self.working_dir, "mockbeat.log"))
meta1 = run()
assert self.log_contains("Beat UUID: {}".format(meta1["uuid"]))
# check meta file did not change between restarts
assert meta0 == meta1
| {
"content_hash": "8dbdf5392da781709ecd7c90c41efc12",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 79,
"avg_line_length": 32.26288659793814,
"alnum_prop": 0.5435373062789584,
"repo_name": "taitan-org/inflog",
"id": "5af2181b9bd8c6ed6e76ff82a3432ca9b5acff3c",
"size": "6259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filebeat/libbeat/tests/system/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "216"
},
{
"name": "Go",
"bytes": "1171204"
},
{
"name": "Makefile",
"bytes": "23899"
},
{
"name": "Python",
"bytes": "250534"
},
{
"name": "Shell",
"bytes": "1141"
}
],
"symlink_target": ""
} |
import helpers
def plot():
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
fig = plt.figure()
# pylint: disable=invalid-slice-index
x, y = np.ogrid[-10:10:100j, -10:10:100j]
extent = (x.min(), x.max(), y.min(), y.max())
cmap = matplotlib.cm.get_cmap('gray')
plt.imshow(x*y, extent=extent, cmap=cmap)
plt.colorbar()
return fig
def test():
phash = helpers.Phash(plot())
assert phash.phash == 'fda6837883788378', phash.get_details()
| {
"content_hash": "bc5038476385c67815951fe10ee8ea25",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 23.363636363636363,
"alnum_prop": 0.6303501945525292,
"repo_name": "danielhkl/matplotlib2tikz",
"id": "90deb261a08777c5aaaa8aeb006883eba3c2cc09",
"size": "540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_heat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "654"
},
{
"name": "Python",
"bytes": "150161"
}
],
"symlink_target": ""
} |
import interpreter, messages, udpresponselistener, udpconnector, tcpfileclient, tcpfilesocket
| {
"content_hash": "42393ee343faa919ed25c14de4a04870",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 93,
"avg_line_length": 94,
"alnum_prop": 0.8723404255319149,
"repo_name": "xserty/piDS",
"id": "abe83614d54fc5b7951f5d3f7470cd238e9aaeae",
"size": "94",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Desktop/packages/rmnetwork/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "740"
},
{
"name": "CSS",
"bytes": "3503"
},
{
"name": "HTML",
"bytes": "2444"
},
{
"name": "PHP",
"bytes": "282"
},
{
"name": "Python",
"bytes": "291817"
},
{
"name": "Shell",
"bytes": "17406"
}
],
"symlink_target": ""
} |
"""
Sumy - automatic text summarizer.
Usage:
sumy (luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) [--length=<length>] [--language=<lang>] [--stopwords=<file_path>] [--format=<format>]
sumy (luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) [--length=<length>] [--language=<lang>] [--stopwords=<file_path>] [--format=<format>] --url=<url>
sumy (luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) [--length=<length>] [--language=<lang>] [--stopwords=<file_path>] [--format=<format>] --file=<file_path>
sumy (luhn | edmundson | lsa | text-rank | lex-rank | sum-basic | kl) [--length=<length>] [--language=<lang>] [--stopwords=<file_path>] [--format=<format>] --text=<text>
sumy --version
sumy --help
Options:
--length=<length> Length of summarized text. It may be count of sentences
or percentage of input text. [default: 20%]
--language=<lang> Natural language of summarized text. [default: english]
--stopwords=<file_path> Path to a file containing a list of stopwords. One word per line in UTF-8 encoding.
If it's not provided default list of stop-words is used according to chosen language.
--format=<format> Format of input document. Possible values: html, plaintext
--url=<url> URL address of the web page to summarize.
--file=<file_path> Path to the text file to summarize.
--text=<text> Raw text to summarize
--version Displays current application version.
--help Displays this text.
"""
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import sys
from docopt import docopt
from . import __version__
from .utils import ItemsCount, get_stop_words, read_stop_words, fetch_url
from ._compat import to_string, to_unicode, to_bytes, PY3
from .nlp.tokenizers import Tokenizer
from .parsers.html import HtmlParser
from .parsers.plaintext import PlaintextParser
from .summarizers.luhn import LuhnSummarizer
from .summarizers.edmundson import EdmundsonSummarizer
from .summarizers.lsa import LsaSummarizer
from .summarizers.text_rank import TextRankSummarizer
from .summarizers.lex_rank import LexRankSummarizer
from .summarizers.sum_basic import SumBasicSummarizer
from .summarizers.kl import KLSummarizer
from .nlp.stemmers import Stemmer
PARSERS = {
"html": HtmlParser,
"plaintext": PlaintextParser,
}
AVAILABLE_METHODS = {
"luhn": LuhnSummarizer,
"edmundson": EdmundsonSummarizer,
"lsa": LsaSummarizer,
"text-rank": TextRankSummarizer,
"lex-rank": LexRankSummarizer,
"sum-basic": SumBasicSummarizer,
"kl": KLSummarizer,
}
def main(args=None):
args = docopt(to_string(__doc__), args, version=__version__)
summarizer, parser, items_count = handle_arguments(args)
for sentence in summarizer(parser.document, items_count):
if PY3:
print(to_unicode(sentence))
else:
print(to_bytes(sentence))
return 0
def handle_arguments(args, default_input_stream=sys.stdin):
document_format = args['--format']
if document_format is not None and document_format not in PARSERS:
raise ValueError("Unsupported format of input document. Possible values are: %s. Given: %s." % (
", ".join(PARSERS.keys()),
document_format,
))
if args["--url"] is not None:
parser = PARSERS[document_format or "html"]
document_content = fetch_url(args["--url"])
elif args["--file"] is not None:
parser = PARSERS[document_format or "plaintext"]
with open(args["--file"], "rb") as file:
document_content = file.read()
elif args["--text"] is not None:
parser = PARSERS[document_format or "plaintext"]
document_content = args["--text"]
else:
parser = PARSERS[document_format or "plaintext"]
document_content = default_input_stream.read()
items_count = ItemsCount(args["--length"])
language = args["--language"]
if args["--stopwords"]:
stop_words = read_stop_words(args["--stopwords"])
else:
stop_words = get_stop_words(language)
parser = parser(document_content, Tokenizer(language))
stemmer = Stemmer(language)
summarizer_class = next(cls for name, cls in AVAILABLE_METHODS.items() if args[name])
summarizer = build_summarizer(summarizer_class, stop_words, stemmer, parser)
return summarizer, parser, items_count
def build_summarizer(summarizer_class, stop_words, stemmer, parser):
summarizer = summarizer_class(stemmer)
if summarizer_class is EdmundsonSummarizer:
summarizer.null_words = stop_words
summarizer.bonus_words = parser.significant_words
summarizer.stigma_words = parser.stigma_words
else:
summarizer.stop_words = stop_words
return summarizer
if __name__ == "__main__":
try:
exit_code = main()
exit(exit_code)
except KeyboardInterrupt:
exit(1)
except Exception as e:
print(e)
exit(1)
| {
"content_hash": "02f52d81441dfd6119ecdec8acff6cbb",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 178,
"avg_line_length": 38.237037037037034,
"alnum_prop": 0.6555598605191786,
"repo_name": "miso-belica/sumy",
"id": "e0ad2c390ef344baaa137e9857e8558a06a00e8c",
"size": "5187",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sumy/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "396"
},
{
"name": "Python",
"bytes": "205470"
}
],
"symlink_target": ""
} |
import os, sys, base64, hashlib
def parentOf(path, n=1):
return '/'.join(path.rstrip('/').split('/')[:-n])
REPO = parentOf(os.path.abspath(__file__), n=2)
sys.path.append('%s/impl' % REPO)
#### Tests
import unittest
from unittest import TestCase
from spriteutils import *
with open('%s/examples/raw/initial.css' % REPO, 'rb') as f:
INITIAL_CSS = f.read()
class Test_spriteImagesFromCss(TestCase):
def runTest(self):
y = spriteImagesFromCss({
'css': INITIAL_CSS,
'root': '%s/examples/djangoapp' % REPO,
})
assert set(y['sprite_images_map'].keys()) == set(['initial'])
assert len(y['sprite_images_map']['initial']) == 3
assert all(
re.search(r'file:///.*images/landing/signup.*\.png$', url)
for url in y['sprite_images_map']['initial'])
class Test_spriteCreate(TestCase):
def runTest(self):
y = spriteImagesFromCss({
'css': INITIAL_CSS,
'root': '%s/examples/djangoapp' % REPO,
})
imageUrls = y['sprite_images_map']['initial']
background = [255, 0, 0, 128]
y = spriteCreate({
'optipng': 2,
'background': background,
'images': imageUrls,
})
png = base64.b64decode(y['png_64'])
assert hashlib.sha1(png).hexdigest() == 'b6d7fa1d97727dfe1dfa7137282b4ad023bbf1e0'
#class Test_spriteHtmlViz(TestCase):
# def runTest(self):
# raise NotImplementedError
class Test_spriteReplace(TestCase):
def runTest(self):
imgsRoot = '%s/examples/djangoapp' % REPO
y = spriteImagesFromCss({
'css': INITIAL_CSS,
'root': imgsRoot,
})
imageUrls = y['sprite_images_map']['initial']
y = spriteCreate({
'background': '#94E4F9',
'images': imageUrls,
})
layout = y['layout']
y = spriteReplaceCss({
'sprite_urls': {
'initial': 'INITIAL_SPRITE_URL',
'registration': 'FINAL_SPRITE_URL',
},
'root': imgsRoot,
'css': INITIAL_CSS,
'layout': layout,
})
css = y['css']
assert css == '\n.signup {\n background: url("INITIAL_SPRITE_URL") 0 0;\nwidth: 182px;\nheight: 36px;\n}\n.signup:hover {\n background: url("INITIAL_SPRITE_URL") 0 -36px;\n}\n.signup:active {\n background: url("INITIAL_SPRITE_URL") 0 -72px;\n}\n'
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "710cad1aa70031931b5fa0cdca29f73b",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 263,
"avg_line_length": 29.91764705882353,
"alnum_prop": 0.5454187966968148,
"repo_name": "andrewschaaf/spriteutils",
"id": "89e859df904ff1be639531c267d6ded000623527",
"size": "2573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9508"
}
],
"symlink_target": ""
} |
## This python script will convert a TASSEL hapmap to a file input
# used by fastPHASE
# Import libraries
import argparse
#####
# Define the arguments
#####
# Description
DESC = """A Python program to convert the output from fastPHASE to a TASSEL-encoded
hapmap file.\n"""
# Argument parser
parser = argparse.ArgumentParser(description=DESC, add_help=True)
# Add arguments
# Input file
parser.add_argument('-i',
'--filein',
metavar = 'FILEIN',
help = 'The fastPHASE output file to be converted.',
required = True)
# The original TASSEL file
parser.add_argument('-t',
'--tinput',
metavar = 'TIN',
help = 'The original TASSEL file converted to the fastPHASE input file.',
required = True)
# Should alleles filtered by the probability threshold be removed?
parser.add_argument('-f',
'--filter',
help = 'Flag: should those genotype calls that did not meet the fastPHASE probability threshold be removed?',
action = 'store_true')
# Output file name
parser.add_argument('-o',
'--fileout',
metavar = 'FILEOUT',
help = 'The name of the output file. Default extension is ".hmp.txt',
required = True)
# Parse the arguments
args = parser.parse_args()
# Define the output filename
outfile = str(args.fileout) + '.hmp.txt'
# # Open a handle for writing
handle = open(outfile, 'w')
# Empty list to store the initial TASSEL data
snp_info = []
# Read in the original fp input file to obtain snp positions
with open(args.tinput, 'r') as tin:
# Iterate over lines
for line in tin:
# Skip the first line
if line.startswith('rs#'):
continue
else:
# We need the SNP name, allele, chromosome, and position
tmp = line.strip().split('\t')
# Append this information
snp_info.append(tmp[0:4])
# List to store column names / sample names
columns = ['rs#', 'alleles', 'chrom', 'pos', 'strand', 'assembly#', 'center', 'protLSID', 'assayLSID', 'panelLSID', 'QCcode']
# Empty list to store matrix components
geno_matrix = []
# Read in the fastPHASE file
with open(args.filein, 'r') as fp:
# Create a switch to activate parsing the genotypic information
line_switch = False
# Iterate over lines
for line in fp:
if not line_switch:
# If the line doesn't start with "BEGIN GENOTYPES", skip it
if line.startswith("BEGIN GENOTYPES"):
# Flip the switch
line_switch = True
continue
else:
continue
# If the line switch is on, begin parsing
else:
# If the line begins with #, it is a sample name
if line.startswith("#"):
# Strip and split on spaces
tmp = line.strip().split(' ')
# The sample name is the second entry
name = tmp[1]
# Append it to the columns list
columns.append(name)
# The last line contains "END GENOTYPES." Skip it
elif line.startswith("END GENOTYPES"):
continue
# Otherwise the line is genotypic information
else:
# Strip and split on spaces
tmp = line.strip().split(' ')
# Append to the genotype matrix list
geno_matrix.append(tmp)
# Print the column names line
handle.write('\t'.join(columns) + '\n')
# The number of entries is one-half of the length of the geno_matrix (for diploid)
n_entries = len(geno_matrix) / 2
# The number of sites
n_sites = len(snp_info)
## Now lets play around with this new geno_matrix
# Iterate over the number of snps
for i in range(len(snp_info)):
# Empty toprint list
toprint = []
# Add the snp_info
toprint.extend(snp_info[i])
# Buffer with NAs
toprint.extend(['NA'] * 7)
# Collect the genotype information for the i-th snp across all samples
# Use slice notation to go every other entry
for j in range(len(geno_matrix))[::2]:
# Collect allele information
allele1 = geno_matrix[j][i]
allele2 = geno_matrix[j+1][i]
# If one of the alleles has a bracket, set to missing
if '[' in allele1 or '[' in allele2:
# If the flag is true, set to NN
if (args.filter):
geno = 'NN'
# Otherwise just remove the brackets
else:
allele1 = allele1.strip('[]')
allele2 = allele2.strip('[]')
geno = str(allele1) + str(allele2)
else:
# Convert to N if necessary
if allele1 == "?":
allele1 = "N"
if allele2 == "?":
allele1 = "N"
# Concatenate allele information
geno = str(allele1) + str(allele2)
# Extend the toprint list
toprint.append(geno)
# Write the list to the handle
# print '\t'.join(toprint)
handle.write('\t'.join(toprint) + '\n')
# Close the file
handle.close() | {
"content_hash": "3206e598f95a64b969e14d7097775f90",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 125,
"avg_line_length": 24.577319587628867,
"alnum_prop": 0.6273070469798657,
"repo_name": "neyhartj/bioinformatic-utils",
"id": "63afaee3c23be183e951de2da599c373fddd1fb0",
"size": "4791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastPHASE2tassel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35926"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="scattercarpet", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "dc867a7534fa965b35e78af655ce825c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 88,
"avg_line_length": 38.166666666666664,
"alnum_prop": 0.6222707423580786,
"repo_name": "plotly/python-api",
"id": "3f25c5ff2d431c3bee230e81b97b1f7d577af79a",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattercarpet/_showlegend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.join(os.path.abspath(__file__), '..'))
sys.path.append(os.path.join(os.path.abspath(__file__), '_flask_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Pushrod'
copyright = u'2012, Nullable'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pushrod'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': None,
'github_fork': 'dontcare4free/Flask-Pushrod',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_flask_themes', '_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Pushroddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-Pushrod.tex', u'Flask-Pushrod Documentation',
u'Nullable', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-pushrod', u'Flask-Pushrod Documentation',
[u'Nullable'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flask-Pushrod', u'Flask-Pushrod Documentation',
u'Nullable', 'Flask-Pushrod', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'flask': ('http://flask.pocoo.org/docs/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
}
| {
"content_hash": "37f058188f9168b983dd476a8fffb28b",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 148,
"avg_line_length": 32.8724279835391,
"alnum_prop": 0.7001752628943415,
"repo_name": "teozkr/Flask-Pushrod",
"id": "003b1b0f0141b49801d4c84b786b816cbd821127",
"size": "8412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "42692"
}
],
"symlink_target": ""
} |
import deepchem as dc
import tempfile
import numpy as np
import os
def test_copy():
"""Test that copy works correctly."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
# legacy_dataset_reshard is a shared dataset in the legacy format kept
# around for testing resharding.
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Set cache to 0 size to avoid cache hiding errors
dataset.memory_cache_size = 0
with tempfile.TemporaryDirectory() as tmpdirname:
copy = dataset.copy(tmpdirname)
assert np.all(copy.X == dataset.X)
assert np.all(copy.y == dataset.y)
assert np.all(copy.w == dataset.w)
assert np.all(copy.ids == dataset.ids)
def test_move():
"""Test that move works correctly."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
# legacy_dataset_reshard is a shared dataset in the legacy format kept
# around for testing resharding.
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Set cache to 0 size to avoid cache hiding errors
dataset.memory_cache_size = 0
data_dir = dataset.data_dir
with tempfile.TemporaryDirectory() as tmpdirname:
dataset.move(tmpdirname, delete_if_exists=False)
assert np.all(X == dataset.X)
assert np.all(y == dataset.y)
assert np.all(w == dataset.w)
assert np.all(ids == dataset.ids)
assert dataset.data_dir == os.path.join(tmpdirname,
os.path.basename(data_dir))
| {
"content_hash": "27e89ccf16d883305c9f83144bb88586",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 33.78947368421053,
"alnum_prop": 0.6780893042575286,
"repo_name": "lilleswing/deepchem",
"id": "881019446108bc3ee5ea2e503c6cb2f0ad27c572",
"size": "1926",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "deepchem/data/tests/test_copy_and_move.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2597968"
},
{
"name": "Shell",
"bytes": "11491"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.