Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def _parse_persons(self, datafield, subfield, roles=["aut"]):
"""
Parse persons from given datafield.
Args:
datafield (str): code of datafield ("010", "730", etc..)
subfield (char): code of subfield ("a", "z", "4", etc..)
role (list of str): set to ["any"] for any role, ["aut"] for
authors, etc.. For details see
http://www.loc.gov/marc/relators/relaterm.html
Main records for persons are: "100", "600" and "700", subrecords "c".
Returns:
list: Person objects.
"""
# parse authors
parsed_persons = []
raw_persons = self.get_subfields(datafield, subfield)
for person in raw_persons:
# check if person have at least one of the roles specified in
# 'roles' parameter of function
other_subfields = person.other_subfields
if "4" in other_subfields and roles != ["any"]:
person_roles = other_subfields["4"] # list of role parameters
relevant = any(map(lambda role: role in roles, person_roles))
# skip non-relevant persons
if not relevant:
continue
# result of .strip() is string, so ind1/2 in MARCSubrecord are lost
ind1 = person.i1
ind2 = person.i2
person = person.strip()
name = ""
second_name = ""
surname = ""
title = ""
# here it gets nasty - there is lot of options in ind1/ind2
# parameters
if ind1 == "1" and ind2 == " ":
if "," in person:
surname, name = person.split(",", 1)
elif " " in person:
surname, name = person.split(" ", 1)
else:
surname = person
if "c" in other_subfields:
title = ",".join(other_subfields["c"])
elif ind1 == "0" and ind2 == " ":
name = person.strip()
if "b" in other_subfields:
second_name = ",".join(other_subfields["b"])
if "c" in other_subfields:
surname = ",".join(other_subfields["c"])
elif ind1 == "1" and ind2 == "0" or ind1 == "0" and ind2 == "0":
name = person.strip()
if "c" in other_subfields:
title = ",".join(other_subfields["c"])
parsed_persons.append(
Person(
name.strip(),
second_name.strip(),
surname.strip(),
title.strip()
)
)
return parsed_persons |
def get_subname(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`subname` record is not found.
Returns:
str: Subname of the book or `undefined` if `subname` is not \
found.
"""
return _undefined_pattern(
"".join(self.get_subfields("245", "b")),
lambda x: x.strip() == "",
undefined
) |
def get_price(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`price` record is not found.
Returns:
str: Price of the book (with currency) or `undefined` if `price` \
is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("020", "c")),
lambda x: x.strip() == "",
undefined
) |
def get_part(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`part` record is not found.
Returns:
str: Which part of the book series is this record or `undefined` \
if `part` is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("245", "p")),
lambda x: x.strip() == "",
undefined
) |
def get_part_name(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`part_name` record is not found.
Returns:
str: Name of the part of the series. or `undefined` if `part_name`\
is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("245", "n")),
lambda x: x.strip() == "",
undefined
) |
def get_publisher(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`publisher` record is not found.
Returns:
str: Name of the publisher ("``Grada``" for example) or \
`undefined` if `publisher` is not found.
"""
publishers = set([
remove_hairs_fn(publisher)
for publisher in self["260b "] + self["264b"]
])
return _undefined_pattern(
", ".join(publishers),
lambda x: x.strip() == "",
undefined
) |
def get_pub_date(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`pub_date` record is not found.
Returns:
str: Date of publication (month and year usually) or `undefined` \
if `pub_date` is not found.
"""
dates = self["260c "] + self["264c"]
def clean_date(date):
"""
Clean the `date` strings from special characters, but leave
sequences of numbers followed by -.
So:
[2015]- -> 2015
2015- -> 2015-
"""
out = ""
was_digit = False
for c in date:
if c.isdigit() or (c == "-" and was_digit) or c == " ":
out += c
was_digit = c.isdigit()
return out
# clean all the date strings
dates = set([
clean_date(date)
for date in self["260c "] + self["264c"]
])
return _undefined_pattern(
", ".join(dates),
lambda x: x.strip() == "",
undefined
) |
def get_pub_order(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`pub_order` record is not found.
Returns:
str: Information about order in which was the book published or \
`undefined` if `pub_order` is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("901", "f")),
lambda x: x.strip() == "",
undefined
) |
def get_pub_place(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`pub_place` record is not found.
Returns:
str: Name of city/country where the book was published or \
`undefined` if `pub_place` is not found.
"""
places = set([
remove_hairs_fn(place)
for place in self["260a "] + self["264a"]
])
return _undefined_pattern(
", ".join(places),
lambda x: x.strip() == "",
undefined
) |
def get_format(self, undefined=""):
"""
Args:
undefined (optional): Argument, which will be returned if the
`format` record is not found.
Returns:
str: Dimensions of the book ('``23 cm``' for example) or
`undefined` if `format` is not found.
"""
return _undefined_pattern(
"".join(self.get_subfields("300", "c")),
lambda x: x.strip() == "",
undefined
) |
def get_authors(self):
"""
Returns:
list: Authors represented as :class:`.Person` objects.
"""
authors = self._parse_persons("100", "a")
authors += self._parse_persons("600", "a")
authors += self._parse_persons("700", "a")
authors += self._parse_persons("800", "a")
return authors |
def get_corporations(self, roles=["dst"]):
"""
Args:
roles (list, optional): Specify which types of corporations you
need. Set to ``["any"]`` for any role, ``["dst"]`` for
distributors, etc..
Note:
See http://www.loc.gov/marc/relators/relaterm.html for details.
Returns:
list: :class:`.Corporation` objects specified by roles parameter.
"""
corporations = self._parse_corporations("110", "a", roles)
corporations += self._parse_corporations("610", "a", roles)
corporations += self._parse_corporations("710", "a", roles)
corporations += self._parse_corporations("810", "a", roles)
return corporations |
def get_ISBNs(self):
"""
Get list of VALID ISBN.
Returns:
list: List with *valid* ISBN strings.
"""
invalid_isbns = set(self.get_invalid_ISBNs())
valid_isbns = [
self._clean_isbn(isbn)
for isbn in self["020a"]
if self._clean_isbn(isbn) not in invalid_isbns
]
if valid_isbns:
return valid_isbns
# this is used sometimes in czech national library
return [
self._clean_isbn(isbn)
for isbn in self["901i"]
] |
def get_ISSNs(self):
"""
Get list of VALID ISSNs (``022a``).
Returns:
list: List with *valid* ISSN strings.
"""
invalid_issns = set(self.get_invalid_ISSNs())
return [
self._clean_isbn(issn)
for issn in self["022a"]
if self._clean_isbn(issn) not in invalid_issns
] |
def _filter_binding(self, binding):
"""
Filter binding from ISBN record. In MARC XML / OAI, the binding
information is stored in same subrecord as ISBN.
Example:
``<subfield code="a">80-251-0225-4 (brož.) :</subfield>`` ->
``brož.``.
"""
binding = binding.strip().split(" ", 1)[-1] # isolate bind. from ISBN
binding = remove_hairs_fn(binding) # remove special chars from binding
return binding.split(":")[-1].strip() |
def get_urls(self):
"""
Content of field ``856u42``. Typically URL pointing to producers
homepage.
Returns:
list: List of URLs defined by producer.
"""
urls = self.get_subfields("856", "u", i1="4", i2="2")
return map(lambda x: x.replace("&", "&"), urls) |
def get_internal_urls(self):
"""
URL's, which may point to edeposit, aleph, kramerius and so on.
Fields ``856u40``, ``998a`` and ``URLu``.
Returns:
list: List of internal URLs.
"""
internal_urls = self.get_subfields("856", "u", i1="4", i2="0")
internal_urls.extend(self.get_subfields("998", "a"))
internal_urls.extend(self.get_subfields("URL", "u"))
return map(lambda x: x.replace("&", "&"), internal_urls) |
def get_pub_type(self):
"""
Returns:
PublicationType: :class:`.PublicationType` enum **value**.
"""
INFO_CHAR_INDEX = 6
SECOND_INFO_CHAR_I = 18
if not len(self.leader) >= INFO_CHAR_INDEX + 1:
return PublicationType.monographic
if self.controlfields.get("FMT") == "SE":
return PublicationType.continuing
info_char = self.leader[INFO_CHAR_INDEX]
multipart_n = self.get_subfields("245", "n", exception=False)
multipart_p = self.get_subfields("245", "p", exception=False)
if info_char in "acd":
return PublicationType.monographic
elif info_char in "bis":
return PublicationType.continuing
elif info_char == "m" and (multipart_n or multipart_p):
return PublicationType.multipart_monograph
elif info_char == "m" and len(self.leader) >= SECOND_INFO_CHAR_I + 1:
if self.leader[SECOND_INFO_CHAR_I] == "a":
return PublicationType.multipart_monograph
elif self.leader[SECOND_INFO_CHAR_I] == " ":
return PublicationType.single_unit
return PublicationType.monographic |
def get(self, item, alt=None):
"""
Standard dict-like .get() method.
Args:
item (str): See :meth:`.__getitem__` for details.
alt (default None): Alternative value, if item is not found.
Returns:
obj: `item` or `alt`, if item is not found.
"""
try:
val = self[item]
except ValueError:
return alt
return val if val is not None else alt |
def pid(kp=0., ki=0., kd=0., smooth=0.1):
r'''Create a callable that implements a PID controller.
A PID controller returns a control signal :math:`u(t)` given a history of
error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral
(I), and derivative (D) terms, according to:
.. math::
u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t)
The proportional term is just the current error, the integral term is the
sum of all error measurements, and the derivative term is the instantaneous
derivative of the error measurement.
Parameters
----------
kp : float
The weight associated with the proportional term of the PID controller.
ki : float
The weight associated with the integral term of the PID controller.
kd : float
The weight associated with the derivative term of the PID controller.
smooth : float in [0, 1]
Derivative values will be smoothed with this exponential average. A
value of 1 never incorporates new derivative information, a value of 0.5
uses the mean of the historic and new information, and a value of 0
discards historic information (i.e., the derivative in this case will be
unsmoothed). The default is 0.1.
Returns
-------
controller : callable (float, float) -> float
Returns a function that accepts an error measurement and a delta-time
value since the previous measurement, and returns a control signal.
'''
state = dict(p=0, i=0, d=0)
def control(error, dt=1):
state['d'] = smooth * state['d'] + (1 - smooth) * (error - state['p']) / dt
state['i'] += error * dt
state['p'] = error
return kp * state['p'] + ki * state['i'] + kd * state['d']
return control |
def as_flat_array(iterables):
'''Given a sequence of sequences, return a flat numpy array.
Parameters
----------
iterables : sequence of sequence of number
A sequence of tuples or lists containing numbers. Typically these come
from something that represents each joint in a skeleton, like angle.
Returns
-------
ndarray :
An array of flattened data from each of the source iterables.
'''
arr = []
for x in iterables:
arr.extend(x)
return np.array(arr) |
def load(self, source, **kwargs):
'''Load a skeleton definition from a file.
Parameters
----------
source : str or file
A filename or file-like object that contains text information
describing a skeleton. See :class:`pagoda.parser.Parser` for more
information about the format of the text file.
'''
if hasattr(source, 'endswith') and source.lower().endswith('.asf'):
self.load_asf(source, **kwargs)
else:
self.load_skel(source, **kwargs) |
def load_skel(self, source, **kwargs):
'''Load a skeleton definition from a text file.
Parameters
----------
source : str or file
A filename or file-like object that contains text information
describing a skeleton. See :class:`pagoda.parser.BodyParser` for
more information about the format of the text file.
'''
logging.info('%s: parsing skeleton configuration', source)
if hasattr(source, 'read'):
p = parser.parse(source, self.world, self.jointgroup, **kwargs)
else:
with open(source) as handle:
p = parser.parse(handle, self.world, self.jointgroup, **kwargs)
self.bodies = p.bodies
self.joints = p.joints
self.set_pid_params(kp=0.999 / self.world.dt) |
def load_asf(self, source, **kwargs):
'''Load a skeleton definition from an ASF text file.
Parameters
----------
source : str or file
A filename or file-like object that contains text information
describing a skeleton, in ASF format.
'''
if hasattr(source, 'read'):
p = parser.parse_asf(source, self.world, self.jointgroup, **kwargs)
else:
with open(source) as handle:
p = parser.parse_asf(handle, self.world, self.jointgroup, **kwargs)
self.bodies = p.bodies
self.joints = p.joints
self.set_pid_params(kp=0.999 / self.world.dt) |
def set_pid_params(self, *args, **kwargs):
'''Set PID parameters for all joints in the skeleton.
Parameters for this method are passed directly to the `pid` constructor.
'''
for joint in self.joints:
joint.target_angles = [None] * joint.ADOF
joint.controllers = [pid(*args, **kwargs) for i in range(joint.ADOF)] |
def joint_torques(self):
'''Get a list of all current joint torques in the skeleton.'''
return as_flat_array(getattr(j, 'amotor', j).feedback[-1][:j.ADOF]
for j in self.joints) |
def indices_for_joint(self, name):
'''Get a list of the indices for a specific joint.
Parameters
----------
name : str
The name of the joint to look up.
Returns
-------
list of int :
A list of the index values for quantities related to the named
joint. Often useful for getting, say, the angles for a specific
joint in the skeleton.
'''
j = 0
for joint in self.joints:
if joint.name == name:
return list(range(j, j + joint.ADOF))
j += joint.ADOF
return [] |
def indices_for_body(self, name, step=3):
'''Get a list of the indices for a specific body.
Parameters
----------
name : str
The name of the body to look up.
step : int, optional
The number of numbers for each body. Defaults to 3, should be set
to 4 for body rotation (since quaternions have 4 values).
Returns
-------
list of int :
A list of the index values for quantities related to the named body.
'''
for j, body in enumerate(self.bodies):
if body.name == name:
return list(range(j * step, (j + 1) * step))
return [] |
def joint_distances(self):
'''Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint.
'''
return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints] |
def enable_motors(self, max_force):
'''Enable the joint motors in this skeleton.
This method sets the maximum force that can be applied by each joint to
attain the desired target velocities. It also enables torque feedback
for all joint motors.
Parameters
----------
max_force : float
The maximum force that each joint is allowed to apply to attain its
target velocity.
'''
for joint in self.joints:
amotor = getattr(joint, 'amotor', joint)
amotor.max_forces = max_force
if max_force > 0:
amotor.enable_feedback()
else:
amotor.disable_feedback() |
def set_target_angles(self, angles):
'''Move each joint toward a target angle.
This method uses a PID controller to set a target angular velocity for
each degree of freedom in the skeleton, based on the difference between
the current and the target angle for the respective DOF.
PID parameters are by default set to achieve a tiny bit less than
complete convergence in one time step, using only the P term (i.e., the
P coefficient is set to 1 - \delta, while I and D coefficients are set
to 0). PID parameters can be updated by calling the `set_pid_params`
method.
Parameters
----------
angles : list of float
A list of the target angles for every joint in the skeleton.
'''
j = 0
for joint in self.joints:
velocities = [
ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in
zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)]
joint.velocities = velocities
j += joint.ADOF |
def add_torques(self, torques):
'''Add torques for each degree of freedom in the skeleton.
Parameters
----------
torques : list of float
A list of the torques to add to each degree of freedom in the
skeleton.
'''
j = 0
for joint in self.joints:
joint.add_torques(
list(torques[j:j+joint.ADOF]) + [0] * (3 - joint.ADOF))
j += joint.ADOF |
def labels(self):
'''Return the names of our marker labels in canonical order.'''
return sorted(self.channels, key=lambda c: self.channels[c]) |
def load_csv(self, filename, start_frame=10, max_frames=int(1e300)):
'''Load marker data from a CSV file.
The file will be imported using Pandas, which must be installed to use
this method. (``pip install pandas``)
The first line of the CSV file will be used for header information. The
"time" column will be used as the index for the data frame. There must
be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and
'markerAB-foo-c' for marker 'foo' to be included in the model.
Parameters
----------
filename : str
Name of the CSV file to load.
'''
import pandas as pd
compression = None
if filename.endswith('.gz'):
compression = 'gzip'
df = pd.read_csv(filename, compression=compression).set_index('time').fillna(-1)
# make sure the data frame's time index matches our world.
assert self.world.dt == pd.Series(df.index).diff().mean()
markers = []
for c in df.columns:
m = re.match(r'^marker\d\d-(.*)-c$', c)
if m:
markers.append(m.group(1))
self.channels = self._map_labels_to_channels(markers)
cols = [c for c in df.columns if re.match(r'^marker\d\d-.*-[xyzc]$', c)]
self.data = df[cols].values.reshape((len(df), len(markers), 4))[start_frame:]
self.data[:, :, [1, 2]] = self.data[:, :, [2, 1]]
logging.info('%s: loaded marker data %s', filename, self.data.shape)
self.process_data()
self.create_bodies() |
def load_c3d(self, filename, start_frame=0, max_frames=int(1e300)):
'''Load marker data from a C3D file.
The file will be imported using the c3d module, which must be installed
to use this method. (``pip install c3d``)
Parameters
----------
filename : str
Name of the C3D file to load.
start_frame : int, optional
Discard the first N frames. Defaults to 0.
max_frames : int, optional
Maximum number of frames to load. Defaults to loading all frames.
'''
import c3d
with open(filename, 'rb') as handle:
reader = c3d.Reader(handle)
logging.info('world frame rate %s, marker frame rate %s',
1 / self.world.dt, reader.point_rate)
# set up a map from marker label to index in the data stream.
self.channels = self._map_labels_to_channels([
s.strip() for s in reader.point_labels])
# read the actual c3d data into a numpy array.
data = []
for i, (_, frame, _) in enumerate(reader.read_frames()):
if i >= start_frame:
data.append(frame[:, [0, 1, 2, 4]])
if len(data) > max_frames:
break
self.data = np.array(data)
# scale the data to meters -- mm is a very common C3D unit.
if reader.get('POINT:UNITS').string_value.strip().lower() == 'mm':
logging.info('scaling point data from mm to m')
self.data[:, :, :3] /= 1000.
logging.info('%s: loaded marker data %s', filename, self.data.shape)
self.process_data()
self.create_bodies() |
def process_data(self):
'''Process data to produce velocity and dropout information.'''
self.visibility = self.data[:, :, 3]
self.positions = self.data[:, :, :3]
self.velocities = np.zeros_like(self.positions) + 1000
for frame_no in range(1, len(self.data) - 1):
prev = self.data[frame_no - 1]
next = self.data[frame_no + 1]
for c in range(self.num_markers):
if -1 < prev[c, 3] < 100 and -1 < next[c, 3] < 100:
self.velocities[frame_no, c] = (
next[c, :3] - prev[c, :3]) / (2 * self.world.dt)
self.cfms = np.zeros_like(self.visibility) + self.DEFAULT_CFM |
def create_bodies(self):
'''Create physics bodies corresponding to each marker in our data.'''
self.bodies = {}
for label in self.channels:
body = self.world.create_body(
'sphere', name='marker:{}'.format(label), radius=0.02)
body.is_kinematic = True
body.color = 0.9, 0.1, 0.1, 0.5
self.bodies[label] = body |
def load_attachments(self, source, skeleton):
'''Load attachment configuration from the given text source.
The attachment configuration file has a simple format. After discarding
Unix-style comments (any part of a line that starts with the pound (#)
character), each line in the file is then expected to have the following
format::
marker-name body-name X Y Z
The marker name must correspond to an existing "channel" in our marker
data. The body name must correspond to a rigid body in the skeleton. The
X, Y, and Z coordinates specify the body-relative offsets where the
marker should be attached: 0 corresponds to the center of the body along
the given axis, while -1 and 1 correspond to the minimal (maximal,
respectively) extent of the body's bounding box along the corresponding
dimension.
Parameters
----------
source : str or file-like
A filename or file-like object that we can use to obtain text
configuration that describes how markers are attached to skeleton
bodies.
skeleton : :class:`pagoda.skeleton.Skeleton`
The skeleton to attach our marker data to.
'''
self.targets = {}
self.offsets = {}
filename = source
if isinstance(source, str):
source = open(source)
else:
filename = '(file-{})'.format(id(source))
for i, line in enumerate(source):
tokens = line.split('#')[0].strip().split()
if not tokens:
continue
label = tokens.pop(0)
if label not in self.channels:
logging.info('%s:%d: unknown marker %s', filename, i, label)
continue
if not tokens:
continue
name = tokens.pop(0)
bodies = [b for b in skeleton.bodies if b.name == name]
if len(bodies) != 1:
logging.info('%s:%d: %d skeleton bodies match %s',
filename, i, len(bodies), name)
continue
b = self.targets[label] = bodies[0]
o = self.offsets[label] = \
np.array(list(map(float, tokens))) * b.dimensions / 2
logging.info('%s <--> %s, offset %s', label, b.name, o) |
def attach(self, frame_no):
'''Attach marker bodies to the corresponding skeleton bodies.
Attachments are only made for markers that are not in a dropout state in
the given frame.
Parameters
----------
frame_no : int
The frame of data we will use for attaching marker bodies.
'''
assert not self.joints
for label, j in self.channels.items():
target = self.targets.get(label)
if target is None:
continue
if self.visibility[frame_no, j] < 0:
continue
if np.linalg.norm(self.velocities[frame_no, j]) > 10:
continue
joint = ode.BallJoint(self.world.ode_world, self.jointgroup)
joint.attach(self.bodies[label].ode_body, target.ode_body)
joint.setAnchor1Rel([0, 0, 0])
joint.setAnchor2Rel(self.offsets[label])
joint.setParam(ode.ParamCFM, self.cfms[frame_no, j])
joint.setParam(ode.ParamERP, self.erp)
joint.name = label
self.joints[label] = joint
self._frame_no = frame_no |
def reposition(self, frame_no):
'''Reposition markers to a specific frame of data.
Parameters
----------
frame_no : int
The frame of data where we should reposition marker bodies. Markers
will be positioned in the appropriate places in world coordinates.
In addition, linear velocities of the markers will be set according
to the data as long as there are no dropouts in neighboring frames.
'''
for label, j in self.channels.items():
body = self.bodies[label]
body.position = self.positions[frame_no, j]
body.linear_velocity = self.velocities[frame_no, j] |
def distances(self):
'''Get a list of the distances between markers and their attachments.
Returns
-------
distances : ndarray of shape (num-markers, 3)
Array of distances for each marker joint in our attachment setup. If
a marker does not currently have an associated joint (e.g. because
it is not currently visible) this will contain NaN for that row.
'''
distances = []
for label in self.labels:
joint = self.joints.get(label)
distances.append([np.nan, np.nan, np.nan] if joint is None else
np.array(joint.getAnchor()) - joint.getAnchor2())
return np.array(distances) |
def forces(self, dx_tm1=None):
'''Return an array of the forces exerted by marker springs.
Notes
-----
The forces exerted by the marker springs can be approximated by::
F = kp * dx
where ``dx`` is the current array of marker distances. An even more
accurate value is computed by approximating the velocity of the spring
displacement::
F = kp * dx + kd * (dx - dx_tm1) / dt
where ``dx_tm1`` is an array of distances from the previous time step.
Parameters
----------
dx_tm1 : ndarray
An array of distances from markers to their attachment targets,
measured at the previous time step.
Returns
-------
F : ndarray
An array of forces that the markers are exerting on the skeleton.
'''
cfm = self.cfms[self._frame_no][:, None]
kp = self.erp / (cfm * self.world.dt)
kd = (1 - self.erp) / cfm
dx = self.distances()
F = kp * dx
if dx_tm1 is not None:
bad = np.isnan(dx) | np.isnan(dx_tm1)
F[~bad] += (kd * (dx - dx_tm1) / self.world.dt)[~bad]
return F |
def load_skeleton(self, filename, pid_params=None):
'''Create and configure a skeleton in our model.
Parameters
----------
filename : str
The name of a file containing skeleton configuration data.
pid_params : dict, optional
If given, use this dictionary to set the PID controller
parameters on each joint in the skeleton. See
:func:`pagoda.skeleton.pid` for more information.
'''
self.skeleton = skeleton.Skeleton(self)
self.skeleton.load(filename, color=(0.3, 0.5, 0.9, 0.8))
if pid_params:
self.skeleton.set_pid_params(**pid_params)
self.skeleton.erp = 0.1
self.skeleton.cfm = 0 |
def load_markers(self, filename, attachments, max_frames=1e100):
'''Load marker data and attachment preferences into the model.
Parameters
----------
filename : str
The name of a file containing marker data. This currently needs to
be either a .C3D or a .CSV file. CSV files must adhere to a fairly
strict column naming convention; see :func:`Markers.load_csv` for
more information.
attachments : str
The name of a text file specifying how markers are attached to
skeleton bodies.
max_frames : number, optional
Only read in this many frames of marker data. By default, the entire
data file is read into memory.
Returns
-------
markers : :class:`Markers`
Returns a markers object containing loaded marker data as well as
skeleton attachment configuration.
'''
self.markers = Markers(self)
fn = filename.lower()
if fn.endswith('.c3d'):
self.markers.load_c3d(filename, max_frames=max_frames)
elif fn.endswith('.csv') or fn.endswith('.csv.gz'):
self.markers.load_csv(filename, max_frames=max_frames)
else:
logging.fatal('%s: not sure how to load markers!', filename)
self.markers.load_attachments(attachments, self.skeleton) |
def step(self, substeps=2):
'''Advance the physics world by one step.
Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but
it can also be called manually (or some other stepping mechanism
entirely can be used).
'''
# by default we step by following our loaded marker data.
self.frame_no += 1
try:
next(self.follower)
except (AttributeError, StopIteration) as err:
self.reset() |
def settle_to_markers(self, frame_no=0, max_distance=0.05, max_iters=300,
states=None):
'''Settle the skeleton to our marker data at a specific frame.
Parameters
----------
frame_no : int, optional
Settle the skeleton to marker data at this frame. Defaults to 0.
max_distance : float, optional
The settling process will stop when the mean marker distance falls
below this threshold. Defaults to 0.1m (10cm). Setting this too
small prevents the settling process from finishing (it will loop
indefinitely), and setting it too large prevents the skeleton from
settling to a stable state near the markers.
max_iters : int, optional
Attempt to settle markers for at most this many iterations. Defaults
to 1000.
states : list of body states, optional
If given, set the bodies in our skeleton to these kinematic states
before starting the settling process.
'''
if states is not None:
self.skeleton.set_body_states(states)
dist = None
for _ in range(max_iters):
for _ in self._step_to_marker_frame(frame_no):
pass
dist = np.nanmean(abs(self.markers.distances()))
logging.info('settling to frame %d: marker distance %.3f', frame_no, dist)
if dist < max_distance:
return self.skeleton.get_body_states()
for b in self.skeleton.bodies:
b.linear_velocity = 0, 0, 0
b.angular_velocity = 0, 0, 0
return states |
def follow_markers(self, start=0, end=1e100, states=None):
'''Iterate over a set of marker data, dragging its skeleton along.
Parameters
----------
start : int, optional
Start following marker data after this frame. Defaults to 0.
end : int, optional
Stop following marker data after this frame. Defaults to the end of
the marker data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(self.markers):
if frame_no < start:
continue
if frame_no >= end:
break
for states in self._step_to_marker_frame(frame_no):
yield states |
def _step_to_marker_frame(self, frame_no, dt=None):
'''Update the simulator to a specific frame of marker data.
This method returns a generator of body states for the skeleton! This
generator must be exhausted (e.g., by consuming this call in a for loop)
for the simulator to work properly.
This process involves the following steps:
- Move the markers to their new location:
- Detach from the skeleton
- Update marker locations
- Reattach to the skeleton
- Detect ODE collisions
- Yield the states of the bodies in the skeleton
- Advance the ODE world one step
Parameters
----------
frame_no : int
Step to this frame of marker data.
dt : float, optional
Step with this time duration. Defaults to ``self.dt``.
Returns
-------
states : sequence of state tuples
A generator of a sequence of one body state for the skeleton. This
generator must be exhausted for the simulation to work properly.
'''
# update the positions and velocities of the markers.
self.markers.detach()
self.markers.reposition(frame_no)
self.markers.attach(frame_no)
# detect collisions.
self.ode_space.collide(None, self.on_collision)
# record the state of each skeleton body.
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# yield the current simulation state to our caller.
yield states
# update the ode world.
self.ode_world.step(dt or self.dt)
# clear out contact joints to prepare for the next frame.
self.ode_contactgroup.empty() |
def inverse_kinematics(self, start=0, end=1e100, states=None, max_force=20):
'''Follow a set of marker data, yielding kinematic joint angles.
Parameters
----------
start : int, optional
Start following marker data after this frame. Defaults to 0.
end : int, optional
Stop following marker data after this frame. Defaults to the end of
the marker data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to maintain its equilibrium position. This
defaults to 20N. Set this value higher to simulate a stiff skeleton
while following marker data.
Returns
-------
angles : sequence of angle frames
Returns a generator of joint angle data for the skeleton. One set of
joint angles will be generated for each frame of marker data between
`start` and `end`.
'''
zeros = None
if max_force > 0:
self.skeleton.enable_motors(max_force)
zeros = np.zeros(self.skeleton.num_dofs)
for _ in self.follow_markers(start, end, states):
if zeros is not None:
self.skeleton.set_target_angles(zeros)
yield self.skeleton.joint_angles |
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100):
'''Follow a set of angle data, yielding dynamic joint torques.
Parameters
----------
angles : ndarray (num-frames x num-dofs)
Follow angle data provided by this array of angle values.
start : int, optional
Start following angle data after this frame. Defaults to the start
of the angle data.
end : int, optional
Stop following angle data after this frame. Defaults to the end of
the angle data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to follow the given joint angles. Defaults to
100N. Setting this value to be large results in more accurate
following but can cause oscillations in the PID controllers,
resulting in noisy torques.
Returns
-------
torques : sequence of torque frames
Returns a generator of joint torque data for the skeleton. One set
of joint torques will be generated for each frame of angle data
between `start` and `end`.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(angles):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# joseph's stability fix: step to compute torques, then reset the
# skeleton to the start of the step, and then step using computed
# torques. thus any numerical errors between the body states after
# stepping using angle constraints will be removed, because we
# will be stepping the model using the computed torques.
self.skeleton.enable_motors(max_force)
self.skeleton.set_target_angles(angles[frame_no])
self.ode_world.step(self.dt)
torques = self.skeleton.joint_torques
self.skeleton.disable_motors()
self.skeleton.set_body_states(states)
self.skeleton.add_torques(torques)
yield torques
self.ode_world.step(self.dt)
self.ode_contactgroup.empty() |
def forward_dynamics(self, torques, start=0, states=None):
'''Move the body according to a set of torque data.'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, torque in enumerate(torques):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
self.skeleton.add_torques(torque)
self.ode_world.step(self.dt)
yield
self.ode_contactgroup.empty() |
def resorted(values):
"""
Sort values, but put numbers after alphabetically sorted words.
This function is here to make outputs diff-compatible with Aleph.
Example::
>>> sorted(["b", "1", "a"])
['1', 'a', 'b']
>>> resorted(["b", "1", "a"])
['a', 'b', '1']
Args:
values (iterable): any iterable object/list/tuple/whatever.
Returns:
list of sorted values, but with numbers after words
"""
if not values:
return values
values = sorted(values)
# look for first word
first_word = next(
(cnt for cnt, val in enumerate(values)
if val and not val[0].isdigit()),
None
)
# if not found, just return the values
if first_word is None:
return values
words = values[first_word:]
numbers = values[:first_word]
return words + numbers |
def render(self, dt):
'''Draw all bodies in the world.'''
for frame in self._frozen:
for body in frame:
self.draw_body(body)
for body in self.world.bodies:
self.draw_body(body)
if hasattr(self.world, 'markers'):
# draw line between anchor1 and anchor2 for marker joints.
window.glColor4f(0.9, 0.1, 0.1, 0.9)
window.glLineWidth(3)
for j in self.world.markers.joints.values():
window.glBegin(window.GL_LINES)
window.glVertex3f(*j.getAnchor())
window.glVertex3f(*j.getAnchor2())
window.glEnd() |
def get_stream(self, error_callback=None, live=True):
""" Get room stream to listen for messages.
Kwargs:
error_callback (func): Callback to call when an error occurred (parameters: exception)
live (bool): If True, issue a live stream, otherwise an offline stream
Returns:
:class:`Stream`. Stream
"""
self.join()
return Stream(self, error_callback=error_callback, live=live) |
def get_users(self, sort=True):
""" Get list of users in the room.
Kwargs:
sort (bool): If True, sort rooms by name
Returns:
array. List of users
"""
self._load()
if sort:
self.users.sort(key=operator.itemgetter("name"))
return self.users |
def recent(self, message_id=None, limit=None):
""" Recent messages.
Kwargs:
message_id (int): If specified, return messages since the specified message ID
limit (int): If specified, limit the number of messages
Returns:
array. Messages
"""
parameters = {}
if message_id:
parameters["since_message_id"] = message_id
if limit:
parameters["limit"] = limit
messages = self._connection.get("room/%s/recent" % self.id, key="messages", parameters=parameters)
if messages:
messages = [Message(self._campfire, message) for message in messages]
return messages |
def set_name(self, name):
""" Set the room name.
Args:
name (str): Name
Returns:
bool. Success
"""
if not self._campfire.get_user().admin:
return False
result = self._connection.put("room/%s" % self.id, {"room": {"name": name}})
if result["success"]:
self._load()
return result["success"] |
def set_topic(self, topic):
""" Set the room topic.
Args:
topic (str): Topic
Returns:
bool. Success
"""
if not topic:
topic = ''
result = self._connection.put("room/%s" % self.id, {"room": {"topic": topic}})
if result["success"]:
self._load()
return result["success"] |
def speak(self, message):
""" Post a message.
Args:
message (:class:`Message` or string): Message
Returns:
bool. Success
"""
campfire = self.get_campfire()
if not isinstance(message, Message):
message = Message(campfire, message)
result = self._connection.post(
"room/%s/speak" % self.id,
{"message": message.get_data()},
parse_data=True,
key="message"
)
if result["success"]:
return Message(campfire, result["data"])
return result["success"] |
def transcript(self, for_date=None):
""" Recent messages.
Kwargs:
for_date (date): If specified, get the transcript for this specific date
Returns:
array. Messages
"""
url = "room/%s/transcript" % self.id
if for_date:
url = "%s/%d/%d/%d" % (url, for_date.year, for_date.month, for_date.day)
messages = self._connection.get(url, key="messages")
if messages:
messages = [Message(self._campfire, message) for message in messages]
return messages |
def upload(self, path, progress_callback=None, finished_callback=None, error_callback=None):
""" Create a new thread to upload a file (thread should be
then started with start() to perform upload.)
Args:
path (str): Path to file
Kwargs:
progress_callback (func): Callback to call as file is uploaded (parameters: current, total)
finished_callback (func): Callback to call when upload is finished
error_callback (func): Callback to call when an error occurred (parameters: exception)
Returns:
:class:`Upload`. Upload thread
"""
return Upload(
self,
{"upload": path},
progress_callback = progress_callback,
finished_callback = finished_callback,
error_callback = error_callback
) |
def get_new_call(group_name, app_name, search_path, filename, require_load,
version, secure):
# type: (str, str, Optional[str], str, bool, Optional[str], bool) -> str
'''
Build a call to use the new ``get_config`` function from args passed to
``Config.__init__``.
'''
new_call_kwargs = {
'group_name': group_name,
'filename': filename
} # type: Dict[str, Any]
new_call_lookup_options = {} # type: Dict[str, Any]
new_call_lookup_options['secure'] = secure
if search_path:
new_call_lookup_options['search_path'] = search_path
if require_load:
new_call_lookup_options['require_load'] = require_load
if version:
new_call_lookup_options['version'] = version
if new_call_lookup_options:
new_call_kwargs['lookup_options'] = new_call_lookup_options
output = build_call_str('get_config', (app_name,), new_call_kwargs)
return output |
def build_call_str(prefix, args, kwargs):
# type: (str, Any, Any) -> str
'''
Build a callable Python string for a function call. The output will be
combined similar to this template::
<prefix>(<args>, <kwargs>)
Example::
>>> build_call_str('foo', (1, 2), {'a': '10'})
"foo(1, 2, a='10')"
'''
kwargs_str = ', '.join(['%s=%r' % (key, value) for key, value in
kwargs.items()])
args_str = ', '.join([repr(arg) for arg in args])
output = [prefix, '(']
if args:
output.append(args_str)
if args and kwargs:
output.append(', ')
if kwargs:
output.append(kwargs_str)
output.append(')')
return ''.join(output) |
def get_xdg_dirs(self):
# type: () -> List[str]
"""
Returns a list of paths specified by the XDG_CONFIG_DIRS environment
variable or the appropriate default.
The list is sorted by precedence, with the most important item coming
*last* (required by the existing config_resolver logic).
"""
config_dirs = getenv('XDG_CONFIG_DIRS', '')
if config_dirs:
self._log.debug('XDG_CONFIG_DIRS is set to %r', config_dirs)
output = []
for path in reversed(config_dirs.split(':')):
output.append(join(path, self.group_name, self.app_name))
return output
return ['/etc/xdg/%s/%s' % (self.group_name, self.app_name)] |
def get_xdg_home(self):
# type: () -> str
"""
Returns the value specified in the XDG_CONFIG_HOME environment variable
or the appropriate default.
"""
config_home = getenv('XDG_CONFIG_HOME', '')
if config_home:
self._log.debug('XDG_CONFIG_HOME is set to %r', config_home)
return expanduser(join(config_home, self.group_name, self.app_name))
return expanduser('~/.config/%s/%s' % (self.group_name, self.app_name)) |
def _effective_filename(self):
# type: () -> str
"""
Returns the filename which is effectively used by the application. If
overridden by an environment variable, it will return that filename.
"""
# same logic for the configuration filename. First, check if we were
# initialized with a filename...
config_filename = ''
if self.filename:
config_filename = self.filename
# ... next, take the value from the environment
env_filename = getenv(self.env_filename_name)
if env_filename:
self._log.info('Configuration filename was overridden with %r '
'by the environment variable %s.',
env_filename,
self.env_filename_name)
config_filename = env_filename
return config_filename |
def _effective_path(self):
# type: () -> List[str]
"""
Returns a list of paths to search for config files in reverse order of
precedence. In other words: the last path element will override the
settings from the first one.
"""
# default search path
path = (['/etc/%s/%s' % (self.group_name, self.app_name)] +
self.get_xdg_dirs() +
[expanduser('~/.%s/%s' % (self.group_name, self.app_name)),
self.get_xdg_home(),
join(getcwd(), '.{}'.format(self.group_name), self.app_name)])
# If a path was passed directly to this instance, override the path.
if self.search_path:
path = self.search_path.split(pathsep)
# Next, consider the environment variables...
env_path = getenv(self.env_path_name)
if env_path and env_path.startswith('+'):
# If prefixed with a '+', append the path elements
additional_paths = env_path[1:].split(pathsep)
self._log.info('Search path extended with %r by the environment '
'variable %s.',
additional_paths,
self.env_path_name)
path.extend(additional_paths)
elif env_path:
# Otherwise, override again. This takes absolute precedence.
self._log.info("Configuration search path was overridden with "
"%r by the environment variable %r.",
env_path,
self.env_path_name)
path = env_path.split(pathsep)
return path |
def check_file(self, filename):
# type: (str) -> bool
"""
Check if ``filename`` can be read. Will return boolean which is True if
the file can be read, False otherwise.
"""
if not exists(filename):
return False
# Check if the file is version-compatible with this instance.
new_config = ConfigResolverBase()
new_config.read(filename)
if self.version and not new_config.has_option('meta', 'version'):
# self.version is set, so we MUST have a version in the file!
raise NoVersionError(
"The config option 'meta.version' is missing in {}. The "
"application expects version {}!".format(filename,
self.version))
elif not self.version and new_config.has_option('meta', 'version'):
# Automatically "lock-in" a version number if one is found.
# This prevents loading a chain of config files with incompatible
# version numbers!
self.version = StrictVersion(new_config.get('meta', 'version'))
self._log.info('%r contains a version number, but the config '
'instance was not created with a version '
'restriction. Will set version number to "%s" to '
'prevent accidents!',
filename, self.version)
elif self.version:
# This instance expected a certain version. We need to check the
# version in the file and compare.
file_version = new_config.get('meta', 'version')
major, minor, _ = StrictVersion(file_version).version
expected_major, expected_minor, _ = self.version.version
if expected_major != major:
self._log.error(
'Invalid major version number in %r. Expected %r, got %r!',
abspath(filename),
str(self.version),
file_version)
return False
if expected_minor != minor:
self._log.warning(
'Mismatching minor version number in %r. '
'Expected %r, got %r!',
abspath(filename),
str(self.version),
file_version)
return True
return True |
def get(self, section, option, **kwargs): # type: ignore
# type: (str, str, Any) -> Any
"""
Overrides :py:meth:`configparser.ConfigParser.get`.
In addition to ``section`` and ``option``, this call takes an optional
``default`` value. This behaviour works in *addition* to the
:py:class:`configparser.ConfigParser` default mechanism. Note that
a default value from ``ConfigParser`` takes precedence.
The reason this additional functionality is added, is because the
defaults of :py:class:`configparser.ConfigParser` are not dependent
on sections. If you specify a default for the option ``test``, then
this value will be returned for both ``section1.test`` and for
``section2.test``. Using the default on the ``get`` call gives you more
fine-grained control over this.
Also note, that if a default value was used, it will be logged with
level ``logging.DEBUG``.
:param section: The config file section.
:param option: The option name.
:param kwargs: These keyword args are passed through to
:py:meth:`configparser.ConfigParser.get`.
"""
if "default" in kwargs:
default = kwargs.pop("default")
new_kwargs = {'fallback': default}
new_kwargs.update(kwargs)
new_call = build_call_str('.get', (section, option), new_kwargs)
warn('Using the "default" argument to Config.get() will no '
'longer work in config_resolver 5.0! Version 5 will return '
'standard Python ConfigParser instances which use "fallback" '
'instead of "default". Replace your code with "%s"' % new_call,
DeprecationWarning,
stacklevel=2)
have_default = True
else:
have_default = False
try:
value = super(Config, self).get(section, option, **kwargs)
return value
except (NoSectionError, NoOptionError) as exc:
if have_default:
self._log.debug("%s: Returning default value %r", exc, default)
return default
else:
raise |
def load(self, reload=False, require_load=False):
# type: (bool, bool) -> None
"""
Searches for an appropriate config file. If found, loads the file into
the current instance. This method can also be used to reload a
configuration. Note that you may want to set ``reload`` to ``True`` to
clear the configuration before loading in that case. Without doing
that, values will remain available even if they have been removed from
the config files.
:param reload: if set to ``True``, the existing values are cleared
before reloading.
:param require_load: If set to ``True`` this will raise a
:py:exc:`IOError` if no config file has been found
to load.
"""
if reload: # pragma: no cover
self.config = None
# only load the config if necessary (or explicitly requested)
if self.config: # pragma: no cover
self._log.debug('Returning cached config instance. Use '
'``reload=True`` to avoid caching!')
return
path = self._effective_path()
config_filename = self._effective_filename()
# Next, use the resolved path to find the filenames. Keep track of
# which files we loaded in order to inform the user.
self._active_path = [join(_, config_filename) for _ in path]
for dirname in path:
conf_name = join(dirname, config_filename)
readable = self.check_file(conf_name)
if readable:
action = 'Updating' if self._loaded_files else 'Loading initial'
self._log.info('%s config from %s', action, conf_name)
self.read(conf_name)
if conf_name == expanduser("~/.%s/%s/%s" % (
self.group_name, self.app_name, self.filename)):
self._log.warning(
"DEPRECATION WARNING: The file "
"'%s/.%s/%s/app.ini' was loaded. The XDG "
"Basedir standard requires this file to be in "
"'%s/.config/%s/%s/app.ini'! This location "
"will no longer be parsed in a future version of "
"config_resolver! You can already (and should) move "
"the file!", expanduser("~"), self.group_name,
self.app_name, expanduser("~"), self.group_name,
self.app_name)
self._loaded_files.append(conf_name)
if not self._loaded_files and not require_load:
self._log.warning(
"No config file named %s found! Search path was %r",
config_filename,
path)
elif not self._loaded_files and require_load:
raise IOError("No config file named %s found! Search path "
"was %r" % (config_filename, path)) |
def check_file(self, filename):
# type: (str) -> bool
"""
Overrides :py:meth:`.Config.check_file`
"""
can_read = super(SecuredConfig, self).check_file(filename)
if not can_read:
return False
mode = get_stat(filename).st_mode
if (mode & stat.S_IRGRP) or (mode & stat.S_IROTH):
msg = "File %r is not secure enough. Change it's mode to 600"
self._log.warning(msg, filename)
return False
return True |
def setup_environ(self):
"""https://www.python.org/dev/peps/pep-0333/#environ-variables"""
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST'] = ''
env['CONTENT_LENGTH'] = ''
env['SCRIPT_NAME'] = '' |
def get_environ(self):
"""https://www.python.org/dev/peps/pep-0333/#environ-variables"""
env = self.base_environ.copy()
env['REQUEST_METHOD'] = self.request_method
if '?' in self.path:
path, query = self.path.split('?', 1)
else:
path, query = self.path, ''
env['PATH_INFO'] = urllib.parse.unquote(path)
env['QUERY_STRING'] = query
env['CONTENT_TYPE'] = self.headers.get('Content-Type', '')
env['CONTENT_LENGTH'] = self.headers.get('Content-Length', '0')
env['SERVER_PROTOCOL'] = self.request_version
env['REMOTE_ADDR'] = self.client_address[0]
env['REMOTE_PORT'] = self.client_address[1]
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = io.BytesIO(self.raw_request)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = False
for k, v in self.headers.items():
k = k.replace('-', '_').upper()
if k in env:
continue
env['HTTP_' + k] = v
return env |
def get(self, q=None, page=None):
"""Get styles."""
# Check cache to exit early if needed
etag = generate_etag(current_ext.content_version.encode('utf8'))
self.check_etag(etag, weak=True)
# Build response
res = jsonify(current_ext.styles)
res.set_etag(etag)
return res |
def create_from_settings(settings):
""" Create a connection with given settings.
Args:
settings (dict): A dictionary of settings
Returns:
:class:`Connection`. The connection
"""
return Connection(
settings["url"],
settings["base_url"],
settings["user"],
settings["password"],
authorizations = settings["authorizations"],
debug = settings["debug"]
) |
def delete(self, url=None, post_data={}, parse_data=False, key=None, parameters=None):
""" Issue a PUT request.
Kwargs:
url (str): Destination URL
post_data (dict): Dictionary of parameter and values
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
"""
return self._fetch("DELETE", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, full_return=True) |
def post(self, url=None, post_data={}, parse_data=False, key=None, parameters=None, listener=None):
""" Issue a POST request.
Kwargs:
url (str): Destination URL
post_data (dict): Dictionary of parameter and values
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
listener (func): callback called when uploading a file
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
"""
return self._fetch("POST", url, post_data=post_data, parse_data=parse_data, key=key, parameters=parameters, listener=listener, full_return=True) |
def get(self, url=None, parse_data=True, key=None, parameters=None):
""" Issue a GET request.
Kwargs:
url (str): Destination URL
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
Returns:
dict. Response (a dict with keys: success, data, info, body)
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError, Exception
"""
return self._fetch("GET", url, post_data=None, parse_data=parse_data, key=key, parameters=parameters) |
def get_headers(self):
""" Get headers.
Returns:
tuple: Headers
"""
headers = {
"User-Agent": "kFlame 1.0"
}
password_url = self._get_password_url()
if password_url and password_url in self._settings["authorizations"]:
headers["Authorization"] = self._settings["authorizations"][password_url]
return headers |
def _get_password_url(self):
""" Get URL used for authentication
Returns:
string: URL
"""
password_url = None
if self._settings["user"] or self._settings["authorization"]:
if self._settings["url"]:
password_url = self._settings["url"]
elif self._settings["base_url"]:
password_url = self._settings["base_url"]
return password_url |
def parse(self, text, key=None):
""" Parses a response.
Args:
text (str): Text to parse
Kwargs:
key (str): Key to look for, if any
Returns:
Parsed value
Raises:
ValueError
"""
try:
data = json.loads(text)
except ValueError as e:
raise ValueError("%s: Value: [%s]" % (e, text))
if data and key:
if key not in data:
raise ValueError("Invalid response (key %s not found): %s" % (key, data))
data = data[key]
return data |
def build_twisted_request(self, method, url, extra_headers={}, body_producer=None, full_url=False):
""" Build a request for twisted
Args:
method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None
url (str): Destination URL (full, or relative)
Kwargs:
extra_headers (dict): Headers (override default connection headers, if any)
body_producer (:class:`twisted.web.iweb.IBodyProducer`): Object producing request body
full_url (bool): If False, URL is relative
Returns:
tuple. Tuple with two elements: reactor, and request
"""
uri = url if full_url else self._url(url)
raw_headers = self.get_headers()
if extra_headers:
raw_headers.update(extra_headers)
headers = http_headers.Headers()
for header in raw_headers:
headers.addRawHeader(header, raw_headers[header])
agent = client.Agent(reactor)
request = agent.request(method, uri, headers, body_producer)
return (reactor, request) |
def _fetch(self, method, url=None, post_data=None, parse_data=True, key=None, parameters=None, listener=None, full_return=False):
""" Issue a request.
Args:
method (str): Request method (GET/POST/PUT/DELETE/etc.) If not specified, it will be POST if post_data is not None
Kwargs:
url (str): Destination URL
post_data (str): A string of what to POST
parse_data (bool): If true, parse response data
key (string): If parse_data==True, look for this key when parsing data
parameters (dict): Additional GET parameters to append to the URL
listener (func): callback called when uploading a file
full_return (bool): If set to True, get a full response (with success, data, info, body)
Returns:
dict. Response. If full_return==True, a dict with keys: success, data, info, body, otherwise the parsed data
Raises:
AuthenticationError, ConnectionError, urllib2.HTTPError, ValueError
"""
headers = self.get_headers()
headers["Content-Type"] = "application/json"
handlers = []
debuglevel = int(self._settings["debug"])
handlers.append(urllib2.HTTPHandler(debuglevel=debuglevel))
if hasattr(httplib, "HTTPS"):
handlers.append(urllib2.HTTPSHandler(debuglevel=debuglevel))
handlers.append(urllib2.HTTPCookieProcessor(cookielib.CookieJar()))
password_url = self._get_password_url()
if password_url and "Authorization" not in headers:
pwd_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
pwd_manager.add_password(None, password_url, self._settings["user"], self._settings["password"])
handlers.append(HTTPBasicAuthHandler(pwd_manager))
opener = urllib2.build_opener(*handlers)
if post_data is not None:
post_data = json.dumps(post_data)
uri = self._url(url, parameters)
request = RESTRequest(uri, method=method, headers=headers)
if post_data is not None:
request.add_data(post_data)
response = None
try:
response = opener.open(request)
body = response.read()
if password_url and password_url not in self._settings["authorizations"] and request.has_header("Authorization"):
self._settings["authorizations"][password_url] = request.get_header("Authorization")
except urllib2.HTTPError as e:
if e.code == 401:
raise AuthenticationError("Access denied while trying to access %s" % uri)
elif e.code == 404:
raise ConnectionError("URL not found: %s" % uri)
else:
raise
except urllib2.URLError as e:
raise ConnectionError("Error while fetching from %s: %s" % (uri, e))
finally:
if response:
response.close()
opener.close()
data = None
if parse_data:
if not key:
key = string.split(url, "/")[0]
data = self.parse(body, key)
if full_return:
info = response.info() if response else None
status = int(string.split(info["status"])[0]) if (info and "status" in info) else None
return {
"success": (status >= 200 and status < 300),
"data": data,
"info": info,
"body": body
}
return data |
def _url(self, url=None, parameters=None):
""" Build destination URL.
Kwargs:
url (str): Destination URL
parameters (dict): Additional GET parameters to append to the URL
Returns:
str. URL
"""
uri = url or self._settings["url"]
if url and self._settings["base_url"]:
uri = "%s/%s" % (self._settings["base_url"], url)
uri += ".json"
if parameters:
uri += "?%s" % urllib.urlencode(parameters)
return uri |
def is_text(self):
""" Tells if this message is a text message.
Returns:
bool. Success
"""
return self.type in [
self._TYPE_PASTE,
self._TYPE_TEXT,
self._TYPE_TWEET
] |
def get_rooms(self, sort=True):
""" Get rooms list.
Kwargs:
sort (bool): If True, sort rooms by name
Returns:
array. List of rooms (each room is a dict)
"""
rooms = self._connection.get("rooms")
if sort:
rooms.sort(key=operator.itemgetter("name"))
return rooms |
def get_room_by_name(self, name):
""" Get a room by name.
Returns:
:class:`Room`. Room
Raises:
RoomNotFoundException
"""
rooms = self.get_rooms()
for room in rooms or []:
if room["name"] == name:
return self.get_room(room["id"])
raise RoomNotFoundException("Room %s not found" % name) |
def get_room(self, id):
""" Get room.
Returns:
:class:`Room`. Room
"""
if id not in self._rooms:
self._rooms[id] = Room(self, id)
return self._rooms[id] |
def get_user(self, id = None):
""" Get user.
Returns:
:class:`User`. User
"""
if not id:
id = self._user.id
if id not in self._users:
self._users[id] = self._user if id == self._user.id else User(self, id)
return self._users[id] |
def search(self, terms):
""" Search transcripts.
Args:
terms (str): Terms for search
Returns:
array. Messages
"""
messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages")
if messages:
messages = [Message(self, message) for message in messages]
return messages |
def cookie_dump(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
:rtype: ``Cookie.SimpleCookie``
"""
cookie = SimpleCookie()
cookie[key] = value
for attr in ('max_age', 'expires', 'path', 'domain',
'secure', 'httponly'):
attr_key = attr.replace('_', '-')
attr_value = locals()[attr]
if attr_value:
cookie[key][attr_key] = attr_value
return cookie |
def response_status_string(code):
"""e.g. ``200 OK`` """
mean = HTTP_STATUS_CODES.get(code, 'unknown').upper()
return '{code} {mean}'.format(code=code, mean=mean) |
def cookies(self):
"""Request cookies
:rtype: dict
"""
http_cookie = self.environ.get('HTTP_COOKIE', '')
_cookies = {
k: v.value
for (k, v) in SimpleCookie(http_cookie).items()
}
return _cookies |
def attach(self, observer):
""" Attach an observer.
Args:
observer (func): A function to be called when new messages arrive
Returns:
:class:`Stream`. Current instance to allow chaining
"""
if not observer in self._observers:
self._observers.append(observer)
return self |
def incoming(self, messages):
""" Called when incoming messages arrive.
Args:
messages (tuple): Messages (each message is a dict)
"""
if self._observers:
campfire = self._room.get_campfire()
for message in messages:
for observer in self._observers:
observer(Message(campfire, message)) |
def run(self):
""" Called by the thread, it runs the process.
NEVER call this method directly. Instead call start() to start the thread.
To stop, call stop(), and then join()
"""
if self._live:
self._use_process = True
self._abort = False
campfire = self._room.get_campfire()
if self._live:
process = LiveStreamProcess(campfire.get_connection().get_settings(), self._room.id)
else:
process = StreamProcess(campfire.get_connection().get_settings(), self._room.id, pause=self._pause)
if not self._use_process:
process.set_callback(self.incoming)
if self._use_process:
queue = Queue()
process.set_queue(queue)
process.start()
if not process.is_alive():
return
self._streaming = True
while not self._abort:
if self._use_process:
if not process.is_alive():
self._abort = True
break
try:
incoming = queue.get_nowait()
if isinstance(incoming, list):
self.incoming(incoming)
elif isinstance(incoming, Exception):
self._abort = True
if self._error_callback:
self._error_callback(incoming, self._room)
except Empty:
time.sleep(self._pause)
pass
else:
process.fetch()
time.sleep(self._pause)
self._streaming = False
if self._use_process and self._abort and not process.is_alive() and self._error_callback:
self._error_callback(Exception("Streaming process was killed"), self._room)
if self._use_process:
queue.close()
if process.is_alive():
process.stop()
process.terminate()
process.join() |
def run(self):
""" Called by the process, it runs it.
NEVER call this method directly. Instead call start() to start the separate process.
If you don't want to use a second process, then call fetch() directly on this istance.
To stop, call terminate()
"""
if not self._queue:
raise Exception("No queue available to send messages")
while True:
self.fetch()
time.sleep(self._pause) |
def fetch(self):
""" Fetch new messages. """
try:
if not self._last_message_id:
messages = self._connection.get("room/%s/recent" % self._room_id, key="messages", parameters={
"limit": 1
})
self._last_message_id = messages[-1]["id"]
messages = self._connection.get("room/%s/recent" % self._room_id, key="messages", parameters={
"since_message_id": self._last_message_id
})
except:
messages = []
if messages:
self._last_message_id = messages[-1]["id"]
self.received(messages) |
def received(self, messages):
""" Called when new messages arrive.
Args:
messages (tuple): Messages
"""
if messages:
if self._queue:
self._queue.put_nowait(messages)
if self._callback:
self._callback(messages) |
def run(self):
""" Called by the process, it runs it.
NEVER call this method directly. Instead call start() to start the separate process.
If you don't want to use a second process, then call fetch() directly on this istance.
To stop, call terminate()
"""
if not self._queue:
raise Exception("No queue available to send messages")
factory = LiveStreamFactory(self)
self._reactor.connectSSL("streaming.campfirenow.com", 443, factory, ssl.ClientContextFactory())
self._reactor.run() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.