text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""This module contains the MessageSet class, which is a special kind of
protocol message which can contain other protocol messages without knowing
their types. See the class's doc string for more information."""
from google.net.proto import ProtocolBuffer
import logging
try:
from google3.net.proto import _net_proto___parse__python
except ImportError:
_net_proto___parse__python = None
TAG_BEGIN_ITEM_GROUP = 11
TAG_END_ITEM_GROUP = 12
TAG_TYPE_ID = 16
TAG_MESSAGE = 26
class Item:
def __init__(self, message, message_class=None):
self.message = message
self.message_class = message_class
def SetToDefaultInstance(self, message_class):
self.message = message_class()
self.message_class = message_class
def Parse(self, message_class):
if self.message_class is not None:
return 1
try:
message_obj = message_class()
message_obj.MergePartialFromString(self.message)
self.message = message_obj
self.message_class = message_class
return 1
except ProtocolBuffer.ProtocolBufferDecodeError:
logging.warn("Parse error in message inside MessageSet. Tried "
"to parse as: " + message_class.__name__)
return 0
def MergeFrom(self, other):
if self.message_class is not None:
if other.Parse(self.message_class):
self.message.MergeFrom(other.message)
elif other.message_class is not None:
if not self.Parse(other.message_class):
self.message = other.message_class()
self.message_class = other.message_class
self.message.MergeFrom(other.message)
else:
self.message += other.message
def Copy(self):
if self.message_class is None:
return Item(self.message)
else:
new_message = self.message_class()
new_message.CopyFrom(self.message)
return Item(new_message, self.message_class)
def Equals(self, other):
if self.message_class is not None:
if not other.Parse(self.message_class): return 0
return self.message.Equals(other.message)
elif other.message_class is not None:
if not self.Parse(other.message_class): return 0
return self.message.Equals(other.message)
else:
return self.message == other.message
def IsInitialized(self, debug_strs=None):
if self.message_class is None:
return 1
else:
return self.message.IsInitialized(debug_strs)
def ByteSize(self, pb, type_id):
message_length = 0
if self.message_class is None:
message_length = len(self.message)
else:
message_length = self.message.ByteSize()
return pb.lengthString(message_length) + pb.lengthVarInt64(type_id) + 2
def ByteSizePartial(self, pb, type_id):
message_length = 0
if self.message_class is None:
message_length = len(self.message)
else:
message_length = self.message.ByteSizePartial()
return pb.lengthString(message_length) + pb.lengthVarInt64(type_id) + 2
def OutputUnchecked(self, out, type_id):
out.putVarInt32(TAG_TYPE_ID)
out.putVarUint64(type_id)
out.putVarInt32(TAG_MESSAGE)
if self.message_class is None:
out.putPrefixedString(self.message)
else:
out.putVarInt32(self.message.ByteSize())
self.message.OutputUnchecked(out)
def OutputPartial(self, out, type_id):
out.putVarInt32(TAG_TYPE_ID)
out.putVarUint64(type_id)
out.putVarInt32(TAG_MESSAGE)
if self.message_class is None:
out.putPrefixedString(self.message)
else:
out.putVarInt32(self.message.ByteSizePartial())
self.message.OutputPartial(out)
def Decode(decoder):
type_id = 0
message = None
while 1:
tag = decoder.getVarInt32()
if tag == TAG_END_ITEM_GROUP:
break
if tag == TAG_TYPE_ID:
type_id = decoder.getVarUint64()
continue
if tag == TAG_MESSAGE:
message = decoder.getPrefixedString()
continue
if tag == 0: raise ProtocolBuffer.ProtocolBufferDecodeError
decoder.skipData(tag)
if type_id == 0 or message is None:
raise ProtocolBuffer.ProtocolBufferDecodeError
return (type_id, message)
Decode = staticmethod(Decode)
class MessageSet(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.items = dict()
if contents is not None: self.MergeFromString(contents)
def get(self, message_class):
if message_class.MESSAGE_TYPE_ID not in self.items:
return message_class()
item = self.items[message_class.MESSAGE_TYPE_ID]
if item.Parse(message_class):
return item.message
else:
return message_class()
def mutable(self, message_class):
if message_class.MESSAGE_TYPE_ID not in self.items:
message = message_class()
self.items[message_class.MESSAGE_TYPE_ID] = Item(message, message_class)
return message
item = self.items[message_class.MESSAGE_TYPE_ID]
if not item.Parse(message_class):
item.SetToDefaultInstance(message_class)
return item.message
def has(self, message_class):
if message_class.MESSAGE_TYPE_ID not in self.items:
return 0
item = self.items[message_class.MESSAGE_TYPE_ID]
return item.Parse(message_class)
def has_unparsed(self, message_class):
return message_class.MESSAGE_TYPE_ID in self.items
def GetTypeIds(self):
return self.items.keys()
def NumMessages(self):
return len(self.items)
def remove(self, message_class):
if message_class.MESSAGE_TYPE_ID in self.items:
del self.items[message_class.MESSAGE_TYPE_ID]
def __getitem__(self, message_class):
if message_class.MESSAGE_TYPE_ID not in self.items:
raise KeyError(message_class)
item = self.items[message_class.MESSAGE_TYPE_ID]
if item.Parse(message_class):
return item.message
else:
raise KeyError(message_class)
def __setitem__(self, message_class, message):
self.items[message_class.MESSAGE_TYPE_ID] = Item(message, message_class)
def __contains__(self, message_class):
return self.has(message_class)
def __delitem__(self, message_class):
self.remove(message_class)
def __len__(self):
return len(self.items)
def MergeFrom(self, other):
assert other is not self
for (type_id, item) in other.items.items():
if type_id in self.items:
self.items[type_id].MergeFrom(item)
else:
self.items[type_id] = item.Copy()
def Equals(self, other):
if other is self: return 1
if len(self.items) != len(other.items): return 0
for (type_id, item) in other.items.items():
if type_id not in self.items: return 0
if not self.items[type_id].Equals(item): return 0
return 1
def __eq__(self, other):
return ((other is not None)
and (other.__class__ == self.__class__)
and self.Equals(other))
def __ne__(self, other):
return not (self == other)
def IsInitialized(self, debug_strs=None):
initialized = 1
for item in self.items.values():
if not item.IsInitialized(debug_strs):
initialized = 0
return initialized
def ByteSize(self):
n = 2 * len(self.items)
for (type_id, item) in self.items.items():
n += item.ByteSize(self, type_id)
return n
def ByteSizePartial(self):
n = 2 * len(self.items)
for (type_id, item) in self.items.items():
n += item.ByteSizePartial(self, type_id)
return n
def Clear(self):
self.items = dict()
def OutputUnchecked(self, out):
for (type_id, item) in self.items.items():
out.putVarInt32(TAG_BEGIN_ITEM_GROUP)
item.OutputUnchecked(out, type_id)
out.putVarInt32(TAG_END_ITEM_GROUP)
def OutputPartial(self, out):
for (type_id, item) in self.items.items():
out.putVarInt32(TAG_BEGIN_ITEM_GROUP)
item.OutputPartial(out, type_id)
out.putVarInt32(TAG_END_ITEM_GROUP)
def TryMerge(self, decoder):
while decoder.avail() > 0:
tag = decoder.getVarInt32()
if tag == TAG_BEGIN_ITEM_GROUP:
(type_id, message) = Item.Decode(decoder)
if type_id in self.items:
self.items[type_id].MergeFrom(Item(message))
else:
self.items[type_id] = Item(message)
continue
if (tag == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
decoder.skipData(tag)
def _CToASCII(self, output_format):
if _net_proto___parse__python is None:
return ProtocolBuffer.ProtocolMessage._CToASCII(self, output_format)
else:
return _net_proto___parse__python.ToASCII(
self, "MessageSetInternal", output_format)
def ParseASCII(self, s):
if _net_proto___parse__python is None:
ProtocolBuffer.ProtocolMessage.ParseASCII(self, s)
else:
_net_proto___parse__python.ParseASCII(self, "MessageSetInternal", s)
def ParseASCIIIgnoreUnknown(self, s):
if _net_proto___parse__python is None:
ProtocolBuffer.ProtocolMessage.ParseASCIIIgnoreUnknown(self, s)
else:
_net_proto___parse__python.ParseASCIIIgnoreUnknown(
self, "MessageSetInternal", s)
def __str__(self, prefix="", printElemNumber=0):
text = ""
for (type_id, item) in self.items.items():
if item.message_class is None:
text += "%s[%d] <\n" % (prefix, type_id)
text += "%s (%d bytes)\n" % (prefix, len(item.message))
text += "%s>\n" % prefix
else:
text += "%s[%s] <\n" % (prefix, item.message_class.__name__)
text += item.message.__str__(prefix + " ", printElemNumber)
text += "%s>\n" % prefix
return text
_PROTO_DESCRIPTOR_NAME = 'MessageSet'
__all__ = ['MessageSet']
| {
"content_hash": "a719c97725704ffcf6225b29294aca58",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 78,
"avg_line_length": 19.77979797979798,
"alnum_prop": 0.6479419875395772,
"repo_name": "gauribhoite/personfinder",
"id": "9a9bfd56a47efde2a00a5db7e81bbe108a34fc23",
"size": "10397",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "env/google_appengine/google/net/proto/message_set.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "423"
},
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "413819"
},
{
"name": "CSS",
"bytes": "330448"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "720955"
},
{
"name": "JavaScript",
"bytes": "1072023"
},
{
"name": "Makefile",
"bytes": "16086"
},
{
"name": "PHP",
"bytes": "2582470"
},
{
"name": "Python",
"bytes": "60243792"
},
{
"name": "Shell",
"bytes": "7491"
},
{
"name": "TeX",
"bytes": "60219"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from llvm.core import *
from llvm.passes import *
from llvm.ee import *
# A helper class.
class strstream(object):
def __init__(self, s):
self.s = s
def read(self):
return self.s
# Create a module.
asm = """
define i32 @test() nounwind {
ret i32 42
}
define i32 @test1() nounwind {
entry:
%tmp = alloca i32
store i32 42, i32* %tmp, align 4
%tmp1 = load i32* %tmp, align 4
%tmp2 = call i32 @test()
%tmp3 = load i32* %tmp, align 4
%tmp4 = load i32* %tmp, align 4
ret i32 %tmp1
}
define i32 @test2() nounwind {
entry:
%tmp = call i32 @test()
ret i32 %tmp
}
"""
m = Module.from_assembly(strstream(asm))
print "-"*72
print m
# Let's run a module-level inlining pass. First, create a pass manager.
pm = PassManager.new()
# Add the target data as the first "pass". This is mandatory.
pm.add( TargetData.new('') )
# Add the inlining pass.
pm.add( PASS_FUNCTION_INLINING )
# Run it!
pm.run(m)
# Done with the pass manager.
del pm
# Print the result. Note the change in @test2.
print "-"*72
print m
# Let's run a DCE pass on the the function 'test1' now. First create a
# function pass manager.
fpm = FunctionPassManager.new(m)
# Add the target data as first "pass". This is mandatory.
fpm.add( TargetData.new('') )
# Add a DCE pass
fpm.add( PASS_AGGRESSIVE_DCE )
# Run the pass on the function 'test1'
fpm.run( m.get_function_named('test1') )
# Print the result. Note the change in @test1.
print "-"*72
print m
| {
"content_hash": "8850b7557d63b9133a1ca5dc4ac24a15",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 19.52,
"alnum_prop": 0.6598360655737705,
"repo_name": "mmcminn/llvm-py",
"id": "43f54948f7e0196cd9fe4abbb77526b77fc8bfe1",
"size": "1487",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/passes.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "119305"
},
{
"name": "C++",
"bytes": "15076"
},
{
"name": "JavaScript",
"bytes": "4102"
},
{
"name": "Python",
"bytes": "175438"
}
],
"symlink_target": ""
} |
from cities_light.models import City, Country, Region
from django import shortcuts
from django.contrib.auth.models import Group, User
from django.db.models import Q
def navigation_autocomplete(request,
template_name='navigation_autocomplete/autocomplete.html'):
q = request.GET.get('q', '')
context = {'q': q}
queries = {}
queries['users'] = User.objects.filter(
Q(username__icontains=q) |
Q(first_name__icontains=q) |
Q(last_name__icontains=q) |
Q(email__icontains=q)
).distinct()[:3]
queries['groups'] = Group.objects.filter(name__icontains=q)[:3]
queries['cities'] = City.objects.filter(search_names__icontains=q)[:3]
queries['regions'] = Region.objects.filter(name_ascii__icontains=q)[:3]
queries['countries'] = Country.objects.filter(name_ascii__icontains=q)[:3]
context.update(queries)
return shortcuts.render(request, template_name, context)
| {
"content_hash": "b23d488bfe9545f1d977f66d67418b1f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 34.629629629629626,
"alnum_prop": 0.679144385026738,
"repo_name": "dsanders11/django-autocomplete-light",
"id": "993fa8d0416de312685a7cbe2a5524bde949251f",
"size": "935",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "autocomplete_light/example_apps/navigation_autocomplete/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14611"
},
{
"name": "JavaScript",
"bytes": "2017"
},
{
"name": "Python",
"bytes": "252641"
},
{
"name": "Shell",
"bytes": "2878"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ldap3_sync', '0002_auto_20170214_1935'),
]
operations = [
migrations.RenameField(
model_name='ldapconnection',
old_name='sasl_credential',
new_name='sasl_credentials',
),
migrations.RemoveField(
model_name='ldapreferralhost',
name='server',
),
migrations.AddField(
model_name='ldapconnection',
name='auto_range',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='ldapserver',
name='allowed_referral_hosts',
field=models.ManyToManyField(to='ldap3_sync.LDAPReferralHost'),
),
migrations.AddField(
model_name='ldapserver',
name='tls',
field=models.CharField(blank=True, help_text=b'Path to a python object which contains TLS certificate information. LEAVE THIS BLANK.', max_length=255, null=True),
),
migrations.AlterField(
model_name='ldapconnection',
name='auto_bind',
field=models.CharField(choices=[(b'NONE', b'ldap3.AUTO_BIND_NONE'), (b'NO_TLS', b'ldap3.AUTO_BIND_NO_TLS'), (b'TLS_AFTER_BIND', b'ldap3.AUTO_BIND_TLS_AFTER_BIND'), (b'TLS_BEFORE_BIND', b'ldap3.AUTO_BIND_TLS_BEFORE_BIND')], default=b'NONE', max_length=128),
),
migrations.AlterField(
model_name='ldapconnection',
name='client_strategy',
field=models.CharField(choices=[(b'SYNC', b'ldap3.SYNC'), (b'RESTARTABLE', b'ldap3.RESTARTABLE')], default=b'SYNC', max_length=128),
),
migrations.AlterField(
model_name='ldapconnection',
name='sasl_mechanism',
field=models.CharField(blank=True, choices=[(b'EXTERNAL', b'ldap3.EXTERNAL'), (b'DIGEST-MD5', b'ldap3.DIGEST_MD5'), (b'GSSAPI', b'ldap3.KERBEROS'), (b'GSSAPI', b'ldap3.GSSAPI')], max_length=128, null=True),
),
migrations.AlterField(
model_name='ldappool',
name='pool_strategy',
field=models.CharField(choices=[(b'FIRST', b'ldap3.FIRST'), (b'ROUND_ROBIN', b'ldap3.ROUND_ROBIN'), (b'RANDOM', b'ldap3.RANDOM')], default=b'ROUND_ROBIN', max_length=128),
),
migrations.AlterField(
model_name='ldapserver',
name='get_info',
field=models.CharField(choices=[(b'NO_INFO', b'ldap3.NONE'), (b'DSA', b'ldap3.DSA'), (b'SCHEMA', b'ldap3.SCHEMA'), (b'ALL', b'ldap3.ALL'), (b'EDIR_8_8_8', b'ldap3.OFFLINE_EDIR_8_8_8'), (b'AD_2012_R2', b'ldap3.OFFLINE_AD_2012_R2'), (b'SLAPD_2_4', b'ldap3.OFFLINE_SLAPD_2_4'), (b'DS389_1_3_3', b'ldap3.OFFLINE_DS389_1_3_3')], default=b'SCHEMA', max_length=128),
),
migrations.AlterField(
model_name='ldapserver',
name='mode',
field=models.CharField(choices=[(b'IP_SYSTEM_DEFAULT', b'ldap3.IP_SYSTEM_DEFAULT'), (b'IP_V4_ONLY', b'ldap3.IP_V4_ONLY'), (b'IP_V6_ONLY', b'ldap3.IP_V6_ONLY'), (b'IP_V4_PREFERRED', b'ldap3.IP_V4_PREFERRED'), (b'IP_V6_PREFERRED', b'ldap3.IP_V6_PREFERRED')], default=b'IP_SYSTEM_DEFAULT', max_length=128),
),
migrations.AlterField(
model_name='ldapsyncjob',
name='name',
field=models.CharField(default=b'freely.hardy.mammal', max_length=255, unique=True),
),
]
| {
"content_hash": "3b12b755a1663ac54fab2ef7e4e6263a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 371,
"avg_line_length": 48.93055555555556,
"alnum_prop": 0.5926766959977292,
"repo_name": "PGower/django-ldap3-sync",
"id": "54cd71b20c2687addd0ea191366f8a8c5840ca36",
"size": "3596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ldap3_sync/migrations/0003_auto_20170214_2026.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "409"
},
{
"name": "Python",
"bytes": "98420"
}
],
"symlink_target": ""
} |
import itertools
import re
import json
import sys
import yaml
from thirdparty import networkx
from google.appengine.ext import ndb
ARTIFACT_STORE_KEY = ndb.Key("ArtifactStore", "default")
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
class Artifact(ndb.Model):
filename = ndb.StringProperty()
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
class ArtifactGraph(networkx.DiGraph):
# Hardcode these for now to hopefully make the node colors stay consistent
OS_GROUP_MAP = {"Darwin": 1, "Windows": 2, "Linux": 3,
"Darwin,Windows": 4, "Darwin,Linux": 5,
"Darwin,Linux,Windows": 6, "Linux,Windows": 7}
def __init__(self, *args, **kwargs):
self.top_level = kwargs.pop("top_level", None)
super(ArtifactGraph, self).__init__(*args, **kwargs)
self.provides_map = {}
def UpdateProvidesMap(self, artifact_dicts):
for artifact_dict in artifact_dicts:
if "provides" in artifact_dict:
for attr in artifact_dict["provides"]:
self.provides_map.setdefault(attr, []).append(artifact_dict["name"])
def LoadGraphFromDataStore(self):
results = Artifact.query(ancestor=ARTIFACT_STORE_KEY).fetch()
self.InitializeFromYAMLBuffers([x.content for x in results])
def InitializeFromYAMLBuffers(self, yaml_buffers):
"""Create the tree from a list of yaml buffers.
All buffers must be passed in at once to allow a valid dependency tree to be
created.
"""
raw_list = []
for yaml_buffer in yaml_buffers:
raw_list.extend(list(yaml.safe_load_all(yaml_buffer)))
self.UpdateProvidesMap(raw_list)
# Use this lookup dict to check os conditions so we don't create
# dependencies across operating system boundaries
artifact_lookup_dict = {}
for artifact_dict in raw_list:
artifact_lookup_dict[artifact_dict["name"]] = artifact_dict
for artifact_dict in raw_list:
self.add_node(artifact_dict["name"], data_dict=artifact_dict)
for dependency in self.GetArtifactPathDependencies(
artifact_dict["collectors"]):
for dep in self.provides_map[dependency]:
dep_os = set(artifact_lookup_dict[dep]["supported_os"])
if set(artifact_dict["supported_os"]).intersection(dep_os):
self.add_edge(artifact_dict["name"], dep)
# If top_level is set, take all nodes who have no predecessors and create a
# root node with that name so we have an overall parent
if self.top_level:
for nodename, in_degree in self.in_degree().iteritems():
if in_degree == 0:
self.add_edge(self.top_level, nodename)
self.node[self.top_level]["data_dict"] = {"supported_os": [
"Darwin", "Linux", "Windows"]}
def GetJSONTree(self, root, attrs={'children': 'children', 'id': 'label'}):
"""Based on networkx.readwrite.json_graph.tree_data()
Unlike the original we allow non-tree graphs because our leaves can have
multiple predecessors. i.e. many nodes require SystemRoot.
"""
id_ = attrs['id']
children = attrs['children']
if id_ == children:
raise networkx.NetworkXError('Attribute names are not unique.')
def add_children(n, self):
nbrs = self[n]
if len(nbrs) == 0:
return []
children_ = []
for child in nbrs:
d = dict(itertools.chain(self.node[child].items(), [(id_, child)]))
c = add_children(child, self)
if c:
d[children] = c
children_.append(d)
return children_
data = dict(itertools.chain(self.node[root].items(), [(id_, root)]))
data[children] = add_children(root, self)
return json.dumps([data])
def GetD3JSON(self, group=None):
"""Converts a NetworkX Graph to D3.js JSON formatted dictionary"""
ints_graph = networkx.convert_node_labels_to_integers(
self, label_attribute="name")
nodes_list = []
os_group_map = {}
next_int = 1
for nodenum in ints_graph.nodes():
artifact_name = ints_graph.node[nodenum]["name"]
# Use supported_os as the node color
supported_os_list = self.node[artifact_name]["data_dict"]["supported_os"]
supported_os_list.sort()
group = self.OS_GROUP_MAP[",".join(supported_os_list)]
nodes_list.append(dict(name=artifact_name,
group=group))
graph_edges = ints_graph.edges(data=True)
# Build up edge dictionary in JSON format
json_edges = list()
for j, k, w in graph_edges:
json_edges.append({'source' : j, 'target' : k, 'value': 1})
graph_json = {"links": json_edges, "nodes": nodes_list}
return json.dumps(graph_json)
def GetArtifactPathDependencies(self, collectors):
"""Return a set of knowledgebase path dependencies.
Returns:
A set of strings for the required kb objects e.g.
["users.appdata", "systemroot"]
"""
deps = set()
for collector in collectors:
for arg, value in collector["args"].items():
paths = []
if arg in ["path", "query"]:
paths.append(value)
if arg in ["paths", "path_list", "content_regex_list"]:
paths.extend(value)
for path in paths:
for match in INTERPOLATED_REGEX.finditer(path):
deps.add(match.group()[2:-2]) # Strip off %%.
return deps
| {
"content_hash": "a4e00316f9056db96f0d1f8c879fb77b",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 80,
"avg_line_length": 34.467948717948715,
"alnum_prop": 0.6360424028268551,
"repo_name": "destijl/forensicartifacts",
"id": "58cee9b6c39ef67aedb657df7b220328873f4bfc",
"size": "5377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/artifact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "467"
},
{
"name": "Python",
"bytes": "11655"
}
],
"symlink_target": ""
} |
"""
Single site config.
Set site id, domain name, default emails and allowed hosts.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from configurations import values
class SingleSite(object):
#: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = values.IntegerValue(1)
#: Default domain name (for email settings, allowed hosts list and session cookie domain)
DOMAIN_NAME = values.SecretValue()
#: Default site name (for email name settings)
SITE_NAME = values.SecretValue()
###########################################################
# Site emails
def get_default_from_email(self):
return "{} <info@{}>".format(self.SITE_NAME, self.DOMAIN_NAME)
def get_server_email(self):
return "server@{}".format(self.DOMAIN_NAME)
def get_email_subject_prefix(self):
return "[{}] ".format(self.SITE_NAME)
#: Default: ``info@<domain name>``
DEFAULT_FROM_EMAIL = values.Value(get_default_from_email)
#: Default: ``server@<domain name>``
SERVER_EMAIL = values.Value(get_server_email)
#: Default: ``[site name]``
EMAIL_SUBJECT_PREFIX = values.Value(get_email_subject_prefix)
###########################################################
# Allowed hosts and cookie domain
def get_allowed_hosts(self):
return [
self.DOMAIN_NAME,
"www.{}".format(self.DOMAIN_NAME),
"api.{}".format(self.DOMAIN_NAME)
]
def get_session_cookie_domain(self):
return "{}".format(self.DOMAIN_NAME)
#: Default: ``<domain name>``, ``www.<domain name>``, ``api.<domain name>``
ALLOWED_HOSTS = values.ListValue(get_allowed_hosts)
#: Default: ``<domain name>``
SESSION_COOKIE_DOMAIN = values.Value(get_session_cookie_domain)
| {
"content_hash": "22b5604a3d4c624993493403d510316a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 93,
"avg_line_length": 30.098360655737704,
"alnum_prop": 0.6034858387799564,
"repo_name": "nigma/django-common-configs",
"id": "17e7daf410d27a278715d925728bb68fd7f4e0a0",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common_configs/django/site.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67578"
},
{
"name": "Shell",
"bytes": "6488"
}
],
"symlink_target": ""
} |
import cherrypy
import contextlib
import pytest
import unittest.mock
from girder import config
from girder.api import access
from pytest_girder.assertions import assertStatus
@contextlib.contextmanager
def serverMode(mode):
old, config.getConfig()['server']['mode'] = config.getConfig()['server']['mode'], mode
try:
yield mode
finally:
config.getConfig()['server']['mode'] = old
@pytest.fixture
def exceptionServer(server):
@access.public
def _raiseException(*args, **kwargs):
raise Exception('Specific message ' + cherrypy.request.girderRequestUid)
server.root.api.v1.item.route('GET', ('exception',), _raiseException)
yield server
server.root.api.v1.item.removeRoute('GET', ('exception',))
@pytest.fixture
def uuidMock():
val = '1234'
with unittest.mock.patch('uuid.uuid4', return_value=val):
yield val
@pytest.mark.parametrize('mode,msg,hasTrace', [
('production', 'An unexpected error occurred on the server.', False),
('development', "Exception: Exception('Specific message 1234',)", True)
])
def testExceptionHandlingBasedOnServerMode(exceptionServer, uuidMock, mode, msg, hasTrace):
with serverMode(mode):
resp = exceptionServer.request('/item/exception', exception=True)
assertStatus(resp, 500)
assert resp.json['message'] == msg
assert resp.json['type'] == 'internal'
assert resp.json['uid'] == uuidMock
assert ('trace' in resp.json) is hasTrace
assert resp.headers['Girder-Request-Uid'] == uuidMock
@pytest.mark.parametrize('method', ['GET', 'PUT'], ids=['qs', 'body'])
def testDuplicateParameters(server, method):
# In addition to a dict, the urllib.parse.urlencode used by server.request can accept a list
# of tuples
params = [('foo', 'bar'), ('foo', 'baz')]
# Use /system/setting because it has both GET and PUT endpoints
path = '/system/version'
resp = server.request(path=path, method=method, params=params)
assertStatus(resp, 400)
assert resp.json['message'] == 'Parameter "foo" must not be specified multiple times.'
| {
"content_hash": "afd70b20fdd8e1a89dd391227973de2b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 96,
"avg_line_length": 33.26984126984127,
"alnum_prop": 0.691793893129771,
"repo_name": "manthey/girder",
"id": "86753ebe45b0a2120db4d1d34b7b5ea742d2484a",
"size": "2096",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_rest_exception_handling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "26244"
},
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "1528"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "JavaScript",
"bytes": "1175790"
},
{
"name": "Jinja",
"bytes": "322"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Pug",
"bytes": "137980"
},
{
"name": "Python",
"bytes": "2016093"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "3354"
},
{
"name": "Stylus",
"bytes": "48706"
}
],
"symlink_target": ""
} |
from octopus.core.framework import WSAppFramework
from octopus.worker import settings
from octopus.worker.worker import Worker
from octopus.worker.workerwebservice import WorkerWebService
def make_worker():
'''Returns an instance of the worker application.'''
return WSAppFramework(Worker, WorkerWebService, settings.PORT)
| {
"content_hash": "b53fcc478fb3ebcd55315e43f32b6554",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 66,
"avg_line_length": 37,
"alnum_prop": 0.8138138138138138,
"repo_name": "mikrosimage/OpenRenderManagement",
"id": "124799096f784aaa3bf9fc93b22cb697072995e9",
"size": "333",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/octopus/worker/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "878623"
},
{
"name": "Shell",
"bytes": "5347"
}
],
"symlink_target": ""
} |
import pytest
from kriegspiel.models import User
@pytest.fixture
def user(db):
user = User.objects.create(
email='[email protected]',
username='test',
# password hash for 'test'
password='pbkdf2_sha256$30000$hrV5EM0C6B5O$KAfbHEJbiiuaYyZUZmWQSf3t5KA5/rg9B48cickmrxk=',
)
return user
| {
"content_hash": "65f0aff278400ddb26d3c85a5ecd563a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 97,
"avg_line_length": 24.923076923076923,
"alnum_prop": 0.6820987654320988,
"repo_name": "Kriegspiel/ks-python-api",
"id": "b4f3e26585f45bda547d8d37d4ae595ef60eb28c",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "219"
},
{
"name": "Python",
"bytes": "26443"
}
],
"symlink_target": ""
} |
import os
# import ycm_core
# return the filename in the path without extension
def findFileName(path, ext):
name = ''
for projFile in os.listdir(path):
# cocoapods will generate _Pods.xcodeproj as well
if projFile.endswith(ext) and not projFile.startswith('_Pods'):
name= projFile[:-len(ext):]
return name
# WARNING!! No / in the end
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def findProjectName(working_directory):
projectName = findFileName(working_directory, '.xcodeproj')
if len(projectName) <= 0:
# cocoapod projects
projectName = findFileName(working_directory, '.podspec')
return projectName
flags = [
# TODO: find the correct cache path automatically
'-D__IPHONE_OS_VERSION_MIN_REQUIRED=80000',
'-miphoneos-version-min=9.3',
'-arch', 'arm64',
'-fblocks',
'-fmodules',
'-fobjc-arc',
'-fobjc-exceptions',
'-fexceptions',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include/c++/v1', # for c++ headers <string>, <iostream> definition
'-x',
'objective-c',
'-Wno-#pragma-messages',
'-Wno-#warnings',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks',
# '-I/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks/Foundation.framework/Headers',
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
# '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1'
# '-I/Library/Developer/CommandLineTools/usr/include',
#custom definition, include subfolders
'-ProductFrameworkInclude', # include the framework in the products(in derivedData) folder
'-I./Example/'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
'-ISUB./Pod/Classes', # old cocoapods directory
'-ISUB./'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
# use headers in framework instead
#'-ISUB./Example/Pods', # new cocoapods directory
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/Kiwi/',
# '-include',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
'-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk'
# '-fencode-extended-block-signature', #libclang may report error on this
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include', # let IncludeClangInXCToolChain handle it
# include-pch will make YouCompleteMe show 'no errors founded'
# '-include-pch',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
# modules failed trials
# '-fmodule-implementation-of',
# '-fimplicit-module-maps',
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/CocoaLumberjack',
# '-Wnon-modular-include-in-framework-module',
]
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# if os.path.exists( compilation_database_folder ):
# database = ycm_core.CompilationDatabase( compilation_database_folder )
# else:
# we don't use compilation database
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def Subdirectories(directory):
res = []
for path, subdirs, files in os.walk(directory):
for name in subdirs:
item = os.path.join(path, name)
res.append(item)
return res
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
def IncludeClangInXCToolChain(flags, working_directory):
if not working_directory:
return list( flags )
new_flags = list(flags)
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include',
path = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/'
clangPath = sorted_ls(path)[::-1] # newest file first
includePath = ''
if (len(clangPath) > 0):
includePath = os.path.join('', *[path, clangPath[0], 'include'])
new_flags.append('-I'+includePath)
return new_flags
def FindDerivedDataPath( derivedDataPath, projectName ):
simulatorPaths = ['Build/Intermediates/CodeCoverage/Products/Debug-iphonesimulator/', # if you enable CodeCoverage, the framework of test target will be put in coverage folder, strange
'Build/Products/Debug-iphonesimulator/']
# search ~/Library/Developer/Xcode/DerivedData/ to find <project_name>-dliwlpgcvwijijcdxarawwtrfuuh
derivedPath = sorted_ls(derivedDataPath)[::-1] # newest file first
for productPath in derivedPath:
if productPath.lower().startswith( projectName.lower() ):
for simulatorPath in simulatorPaths:
projectPath = os.path.join('', *[derivedDataPath, productPath, simulatorPath])
if (len(projectPath) > 0) and os.path.exists(projectPath):
return projectPath # the lastest product is what we want (really?)
return ''
def IncludeFlagsOfFrameworkHeaders( flags, working_directory ):
if not working_directory:
return flags
new_flags = []
path_flag = '-ProductFrameworkInclude'
derivedDataPath = os.path.expanduser('~/Library/Developer/Xcode/DerivedData/')
# find the project name
projectName = findProjectName(working_directory)
if len(projectName) <= 0:
return flags
# add all frameworks in the /Build/Products/Debug-iphonesimulator/xxx/xxx.framework
for flag in flags:
if not flag.startswith( path_flag ):
new_flags.append(flag)
continue
projectPath = FindDerivedDataPath( derivedDataPath, projectName )
if (len(projectPath) <= 0) or not os.path.exists(projectPath):
continue
# iterate through all frameworks folders /Debug-iphonesimulator/xxx/xxx.framework
for frameworkFolder in os.listdir(projectPath):
frameworkPath = os.path.join('', projectPath, frameworkFolder)
if not os.path.isdir(frameworkPath):
continue
# framwork folder '-F/Debug-iphonesimulator/<framework-name>'
# solve <Kiwi/KiwiConfigurations.h> not found problem
new_flags.append('-F'+frameworkPath)
# the framework name might be different than folder name
# we need to iterate all frameworks
for frameworkFile in os.listdir(frameworkPath):
if frameworkFile.endswith('framework'):
# include headers '-I/Debug-iphonesimulator/xxx/yyy.framework/Headers'
# allow you to use #import "Kiwi.h". NOT REQUIRED, but I am too lazy to change existing codes
new_flags.append('-I' + os.path.join('', frameworkPath, frameworkFile,'Headers'))
return new_flags
def IncludeFlagsOfSubdirectory( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_include_subdir = False
path_flags = [ '-ISUB']
for flag in flags:
# include the directory of flag as well
new_flag = [flag.replace('-ISUB', '-I')]
if make_next_include_subdir:
make_next_include_subdir = False
for subdir in Subdirectories(os.path.join(working_directory, flag)):
new_flag.append('-I')
new_flag.append(subdir)
for path_flag in path_flags:
if flag == path_flag:
make_next_include_subdir = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
for subdir in Subdirectories(os.path.join(working_directory, path)):
new_flag.append('-I' + subdir)
break
new_flags =new_flags + new_flag
return new_flags
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
#add include subfolders as well
flags = IncludeFlagsOfSubdirectory( flags, working_directory )
#include framework header in derivedData/.../Products
flags = IncludeFlagsOfFrameworkHeaders( flags, working_directory )
#include libclang header in xctoolchain
flags = IncludeClangInXCToolChain( flags, working_directory )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
import time
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
# try:
# final_flags.remove( '-stdlib=libc++' )
# except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
# update .clang for chromatica every 5min TODO: very dirty
chromatica_file = DirectoryOfThisScript() + '/.clang'
if (not os.path.exists(chromatica_file)) or (time.time() - os.stat(chromatica_file).st_mtime > 5*60):
parsed_flags = IncludeFlagsOfSubdirectory( final_flags, DirectoryOfThisScript() )
escaped = [flag for flag in parsed_flags if " " not in flag] # chromatica doesn't handle space in flag
f = open(chromatica_file, 'w') # truncate the current file
f.write('flags='+' '.join(escaped))
f.close()
return {
'flags': final_flags,
'do_cache': True
}
# if __name__ == "__main__":
# print (FlagsForFile(""))
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-ProductFrameworkInclude',
# '-ProductFrameworkInclude',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print IncludeClangInXCToolChain(flags, DirectoryOfThisScript())
# print IncludeFlagsOfFrameworkHeaders( flags, DirectoryOfThisScript() )
# # res = subdirectory( DirectoryOfThisScript())
# flags = [
# '-D__IPHONE_OS_VERSION_MIN_REQUIRED=70000',
# '-x',
# 'objective-c',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-ISUB./Pods/Headers/Public',
# '-MMD',
# ]
# print (IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() ))
# res = IncludeFlagsOfSubdirectory( flags, DirectoryOfThisScript() )
# escaped = []
# for flag in res:
# if " " not in flag:
# escaped.append(flag)
# print ' '.join(escaped)
| {
"content_hash": "677c9b2f4bcca1a6e9e914af35baa87b",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 186,
"avg_line_length": 38.117302052785924,
"alnum_prop": 0.7078012001846438,
"repo_name": "haifengkao/RACProgressSubject",
"id": "27a4ac760fdef69ae2c08ea2e588b17c22fbc015",
"size": "13022",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "30865"
},
{
"name": "DTrace",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "842054"
},
{
"name": "Python",
"bytes": "13022"
},
{
"name": "Ruby",
"bytes": "2375"
},
{
"name": "Shell",
"bytes": "20908"
}
],
"symlink_target": ""
} |
"""
mothership.serverinfo
Collects and displays information about servers
"""
# import some modules
import sys
import mothership
import mothership.kv
from mothership.mothership_models import *
from sqlalchemy import or_, desc, MetaData
class ServerInfoError(Exception):
pass
def get_host(cfg, hw_tag=None, ip=None, mac=None):
"""
[description]
search for a host based on info supplied
[parameter info]
required:
cfg: the config object. useful everywhere
optional:
hw_tag: the hardware tag to search for
ip: the ip to search for
mac: the mac address to search for
[return value]
returns a hostname if successful, raises an exception if unsuccessful
"""
if sum(x != None for x in (hw_tag, ip, mac)) != 1:
raise ServerInfoError(
"get_host() takes precisely ONE value to search on.\n"
"hw_tag=%s ip=%s mac=%s" % hw_tag, ip, mac)
elif hw_tag != None:
try:
s = cfg.dbsess.query(Server).\
filter(Server.hw_tag==hw_tag).\
filter(Server.virtual==False).first()
if s.hostname:
return s.hostname
except TypeError:
raise ServerInfoError("no host found with hw_tag: %s" % hw_tag)
elif ip != None:
# try the private ip
try:
s, n = cfg.dbsess.query(Server, Network).\
filter(Server.id==Network.server_id).\
filter(Network.ip==ip).first()
if s.hostname:
return s.hostname
except TypeError:
pass
# try the public ip
try:
s, n = cfg.dbsess.query(Server, Network).\
filter(Server.id==Network.server_id).\
filter(Network.public_ip==ip).first()
if s.hostname:
return s.hostname
except TypeError:
pass
raise ServerInfoError(
"no host found with public or private ip: %s" % ip)
elif mac != None:
try:
h, s = cfg.dbsess.query(Network, Server).\
filter(Network.hw_tag==Server.hw_tag).\
filter(Network.mac==mac).\
filter(Server.virtual==False).first()
if s.hostname:
return s.hostname
except TypeError:
pass
raise ServerInfoError("no host found with MAC address: %s" % mac)
else:
raise ServerInfoError("You did something weird, please don't."
"hw_tag=%s ip=%s mac=%s" % hw_tag, ip, mac)
def all(cfg, host, realm, site_id):
"""
[description]
search for a host based on info supplied
[parameter info]
required:
cfg: the config object. useful everywhere
host: the hostname of the server we're displaying
realm: the realm of the server we're displaying
site_id: the site_id of the server we're displaying
[return value]
no explicit return
"""
# gather server info from mothership's db
fqdn = '.'.join([host,realm,site_id])
try:
kvs = mothership.kv.collect(cfg, fqdn, key='tag')
except:
kvs = []
try:
h, s = cfg.dbsess.query(Hardware, Server).\
filter(Server.hostname==host).\
filter(Server.realm==realm).\
filter(Server.site_id==site_id).\
filter(Hardware.hw_tag==Server.hw_tag).first()
except TypeError:
raise ServerInfoError("host \"%s\" not found" % fqdn)
except:
raise ServerInfoError("something horrible happened")
# fire EVERYTHING!
print ""
print "hostname:\t\t%s.%s.%s" % (s.hostname, s.realm, s.site_id)
print "provisioned:\t\t%s" % (s.provision_date)
print "purchased:\t\t%s" % (h.purchase_date)
print "primary tag, index:\t%s, %s" % (s.tag, s.tag_index)
print "ancillary tags:\t%s" % (', '.join([kv.value for kv in kvs]))
print "security level:\t\t%s" % s.security_level
print "cobbler_profile:\t%s" % (s.cobbler_profile)
print "manufacturer, model:\t%s, %s" % (h.manufacturer, h.model)
print "hardware tag:\t\t%s" % (h.hw_tag)
if s.virtual is False:
print "cores:\t\t\t%s" % (h.cores)
print "ram (GB):\t\t%s" % (h.ram)
print "disk:\t\t\t%s" % (h.disk)
else:
print "cores:\t\t\t%s" % (s.cores)
print "ram (GB):\t\t%s" % (s.ram)
print "disk:\t\t\t%s" % (s.disk)
print "cpu speed:\t\t%s" % (h.cpu_speed)
print ""
for n in cfg.dbsess.query(Network).\
filter(Server.id==Network.server_id).\
filter(Server.id==s.id).\
order_by(Network.interface).all():
print "%s| mac: %-17s | ip: %-15s | public_ip: %-15s" % (n.interface, n.mac, n.ip, n.public_ip)
print "%s| vlan: %-3s | switch: %-15s | switch_port: %-10s" % (n.interface, n.vlan, n.switch, n.switch_port)
def ip_only(cfg, host, realm, site_id):
"""
[description]
print ip information for a server
[parameter info]
required:
cfg: the config object. useful everywhere
host: the hostname of the server we're displaying
realm: the realm of the server we're displaying
site_id: the site_id of the server we're displaying
[return value]
no explicit return
"""
# gather host info from mothership
h, s = cfg.dbsess.query(Hardware, Server).filter(Server.hostname==host).\
filter(Server.realm==realm).\
filter(Server.site_id==site_id).\
filter(Hardware.hw_tag==Server.hw_tag).first()
print "host|\t%s.%s.%s" % (host, realm, site_id)
# display the interface and associated ips
for n in cfg.dbsess.query(Network).\
filter(Server.id==Network.server_id).\
filter(Server.hostname==s.hostname).\
order_by(Network.interface).all():
if n.ip == None:
print "%s|\tprivate ip: %s\t | public ip: %s" % (n.interface, n.ip, n.public_ip)
else:
print "%s|\tprivate ip: %s\t | public ip: %s" % (n.interface, n.ip, n.public_ip)
def mac_only(cfg, host, realm, site_id):
"""
[description]
print mac address information for a server
[parameter info]
required:
cfg: the config object. useful everywhere
host: the hostname of the server we're displaying
realm: the realm of the server we're displaying
site_id: the site_id of the server we're displaying
[return value]
no explicit return
"""
# gather host info from mothership
h, s = cfg.dbsess.query(Hardware, Server).filter(Server.hostname==host).\
filter(Server.realm==realm).\
filter(Server.site_id==site_id).\
filter(Hardware.hw_tag==Server.hw_tag).first()
print "host|\t%s.%s.%s" % (host, realm, site_id)
# display the interface and associated mac address
for n in cfg.dbsess.query(Network).\
filter(Server.id==Network.server_id).\
filter(Server.hostname==s.hostname).\
order_by(Network.interface).all():
print "%s|\tip: %s" % (n.interface, n.mac)
| {
"content_hash": "548d8803941dffa39c6625abf6ce8d4d",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 116,
"avg_line_length": 33.388625592417064,
"alnum_prop": 0.5866572036905607,
"repo_name": "robinbowes/mothership",
"id": "ca13ddea210e7a46e6e72f56d4d6789405ff37b8",
"size": "7621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mothership/serverinfo/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import argparse
import atexit
import configparser
import getpass
import hashlib
import logging
import logging.handlers
import math
import multiprocessing
import os
import requests
import re
import sys
from collections import defaultdict
try:
from json.decoder import JSONDecodeError
except ImportError:
class JSONDecodeError(ValueError):
pass
from queue import Empty
from urllib.parse import urlencode
NAME = 'wasapi_client' if __name__ == '__main__' else __name__
LOGGER = logging.getLogger(NAME)
READ_LIMIT = 1024 * 512
PROFILE_PATH = os.path.join(os.path.expanduser('~'), '.wasapi-client')
PRE_SIGNED_REGEX = [re.compile(r'https://.*\.s3.amazonaws\.com/.*[?].*Signature=.+')]
def start_listener_logging(log_q, path=''):
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
if path:
handler = logging.FileHandler(filename=path)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# Get records from the queue and send them to the handler.
listener = logging.handlers.QueueListener(log_q, handler)
listener.start()
return listener
def configure_main_logging(log_q, log_level=logging.ERROR):
"""Put a handler on the root logger.
This allows handling log records from imported modules.
"""
root = logging.getLogger()
root.addHandler(logging.handlers.QueueHandler(log_q))
root.setLevel(log_level)
def configure_worker_logging(log_q, log_level=logging.ERROR):
"""Configure logging for worker processes."""
# Remove any existing handlers.
LOGGER.handlers = []
# Prevent root logger duplicating messages.
LOGGER.propagate = False
LOGGER.addHandler(logging.handlers.QueueHandler(log_q))
LOGGER.setLevel(log_level)
class WASAPIDownloadError(Exception):
pass
class WASAPIManifestError(Exception):
pass
def make_session(auth=None, headers={}):
"""Make a session that will store our auth.
`auth` is a tuple of the form (user, password)
"""
session = requests.Session()
session.auth = auth
session.headers.update(headers)
return session
def get_webdata(webdata_uri, session):
"""Make a request to the WASAPI."""
try:
response = session.get(webdata_uri)
except requests.exceptions.ConnectionError as err:
sys.exit('Could not connect at {}:\n{}'.format(webdata_uri, err))
LOGGER.info('requesting {}'.format(webdata_uri))
if response.status_code == 403:
sys.exit('Verify user/password for {}:\n{} {}'.format(webdata_uri,
response.status_code,
response.reason))
try:
return response.json()
except (JSONDecodeError, ValueError) as err:
sys.exit('Non-JSON response from {}:\n{}'.format(webdata_uri, err))
def get_files_count(webdata_uri, auth=None, headers={}):
"""Return total number of downloadable files."""
session = make_session(auth, headers)
webdata = get_webdata(webdata_uri, session)
session.close()
return webdata.get('count', None)
def get_files_size(page_uri, auth=None, headers={}):
"""Return total size (bytes) of downloadable files."""
session = make_session(auth, headers)
total = 0
count = 0
webdata = None
while page_uri:
webdata = get_webdata(page_uri, session)
for f in webdata['files']:
total += int(f['size'])
page_uri = webdata.get('next', None)
if webdata:
count = webdata.get('count', None)
session.close()
return count, total
def convert_bytes(size):
"""Make a human readable size."""
label = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
try:
i = int(math.floor(math.log(size, 1024)))
except ValueError:
i = 0
p = math.pow(1024, i)
readable_size = round(size/p, 2)
return '{}{}'.format(readable_size, label[i])
class Downloads:
"""Handles cycling through all of our query results.
If download is True, we create a queue of the files that need to be
downloaded. If manifest is True, store the checksums/filenames for
each available hash algorithm.
"""
def __init__(self, page_uri, auth=None, download=True, destination='',
headers={}):
self.page_uri = page_uri
self.auth = auth
self.download = download
if self.download:
self.get_q = multiprocessing.JoinableQueue()
self.checksums = defaultdict(list)
self.urls = []
self.destination = '' if destination == '.' else destination
self.headers = headers
self.populate_downloads()
def populate_downloads(self):
"""Repeat webdata requests to gather downloadable file info."""
session = make_session(self.auth, self.headers)
current_uri = self.page_uri
while current_uri:
webdata = get_webdata(current_uri, session)
for f in webdata['files']:
# Store the first locations URL per file only.
self.urls.append(f['locations'][0])
path = os.path.join(self.destination, f['filename'])
for algorithm, value in f['checksums'].items():
self.checksums[algorithm].append((value, path))
if self.download:
df = DataFile(f['locations'], f['filename'], f['checksums'], f['size'])
self.get_q.put(df)
current_uri = webdata.get('next', None)
session.close()
def generate_manifests(self):
"""Produce manifest files for all hash algorithms."""
for algorithm in self.checksums:
self.write_manifest_file(algorithm)
def write_manifest_file(self, algorithm):
"""Write a manifest file for the provided algorithm."""
if algorithm not in self.checksums:
raise WASAPIManifestError('No values for {}'.format(algorithm))
manifest_path = os.path.join(self.destination,
'manifest-{}.txt'.format(algorithm))
with open(manifest_path, 'w') as manifest_f:
for checksum, path in self.checksums[algorithm]:
manifest_f.write('{} {}\n'.format(checksum, path))
class DataFile:
"""Representation of a file to be downloaded.
`locations` is a list of URLs
`filename` is the name of the data file
`size` is the size of the file in bytes
`checksums` is a dictionary of hash algorithm/value pairs
`verified` is a Boolean value indicating a successful checksum verification
"""
def __init__(self, locations, filename, checksums, size):
self.locations = locations
self.filename = filename
self.checksums = checksums
self.size = size
self.verified = False
def download_file(data_file, session, output_path):
"""Download webdata file to disk."""
if check_exists(output_path, data_file.size, data_file.checksums):
# Don't download the file if it already exists.
LOGGER.info('{} exists with expected size/checksum'.format(data_file.filename))
data_file.verified = True
return data_file
for location in data_file.locations:
# if location matches a 'pre-signed' url regex pattern,
# skip auth for this location
for rx in PRE_SIGNED_REGEX:
if rx.match(location):
sesh = requests
else:
sesh = session
try:
response = sesh.get(location, stream=True)
except requests.exceptions.RequestException as err:
# This could be a remote disconnect, read timeout, connection timeout,
# temporary name resolution issue...
LOGGER.error('Error downloading {}:\n{}'.format(location, err))
continue
msg = '{}: {} {}'.format(location,
response.status_code,
response.reason)
if response.status_code == 200:
try:
write_file(response, output_path)
except OSError as err:
LOGGER.error('{}: {}'.format(location, str(err)))
break
# Successful download; don't try alternate locations.
LOGGER.info(msg)
return data_file
else:
LOGGER.error(msg)
# We didn't download successfully; raise error.
msg = 'FAILED to download {} from {}'.format(data_file.filename,
data_file.locations)
raise WASAPIDownloadError(msg)
def check_exists(path, size, checksums):
"""Check if file with matching size and checksum exists."""
if not os.path.isfile(path):
return False
if not os.path.getsize(path) == size:
return False
return verify_file(checksums, path)
def write_file(response, output_path=''):
"""Write file to disk."""
with open(output_path, 'wb') as wtf:
for chunk in response.iter_content(1024*4):
wtf.write(chunk)
def verify_file(checksums, file_path):
"""Verify the file checksum is correct.
Takes a dictionary of hash algorithms and the corresponding
expected value for the file_path provided. The first success
or failure determines if the file is valid.
"""
for algorithm, value in checksums.items():
read_limit = READ_LIMIT
hash_function = getattr(hashlib, algorithm, None)
if not hash_function and algorithm == 's3etag':
# if etag does not contain a '-', then its just a regular md5
if '-' not in value:
hash_function = hashlib.md5
# otherwise, its likely a 'double-md5'
# see: https://zihao.me/post/calculating-etag-for-aws-s3-objects/
else:
hash_function = S3DoubleMD5
# expected chunk size for S3 md5 computation
read_limit = 1024 * 1024 * 8
if not hash_function:
# The hash algorithm provided is not supported by hashlib.
LOGGER.debug('{} is unsupported'.format(algorithm))
continue
digest = calculate_sum(hash_function, file_path, read_limit)
if digest == value:
LOGGER.info('Checksum success at: {}'.format(file_path))
return True
else:
LOGGER.error('Checksum {} mismatch for {}: expected {}, got {}'.format(algorithm,
file_path,
value,
digest))
return False
# We didn't find a compatible algorithm.
return False
class S3DoubleMD5:
"""Implements double-md5 computation as suggested by:
https://zihao.me/post/calculating-etag-for-aws-s3-objects/
"""
def __init__(self):
self.md5s = []
def update(self, buff):
self.md5s.append(hashlib.md5(buff))
def hexdigest(self):
if len(self.md5s) == 1:
return self.md5s[0].hexdigest()
digests = b''.join(m.digest() for m in self.md5s)
digests_md5 = hashlib.md5(digests)
return '{}-{}'.format(digests_md5.hexdigest(), len(self.md5s))
def calculate_sum(hash_function, file_path, read_limit=READ_LIMIT):
"""Return the checksum of the given file."""
hasher = hash_function()
with open(file_path, 'rb') as rff:
r = rff.read(read_limit)
while r:
hasher.update(r)
r = rff.read(read_limit)
return hasher.hexdigest()
def convert_queue(tuple_q):
"""Convert a queue containing 2-element tuples into a dictionary.
The first element becomes a key. The key's value becomes a list
to which the second tuple element is appended.
"""
ddict = defaultdict(list)
while True:
try:
key, value = tuple_q.get(block=False)
except Empty:
break
ddict[key].append(value)
return ddict
def generate_report(result_q):
"""Create a summary of success/failure downloads."""
results = convert_queue(result_q)
success = len(results.get('success', []))
failure = len(results.get('failure', []))
total = success + failure
summary = ('Total downloads attempted: {}\n'
'Successful downloads: {}\n'
'Failed downloads: {}\n').format(total, success, failure)
if total != failure and failure > 0:
summary += 'Failed files (see log for details):\n'
for filename in results['failure']:
summary += ' {}\n'.format(filename)
return summary
class Downloader(multiprocessing.Process):
"""Worker for downloading web files with a persistent session."""
def __init__(self, get_q, result_q, log_q, log_level=logging.ERROR,
auth=None, destination='.', headers={}, *args, **kwargs):
super(Downloader, self).__init__(*args, **kwargs)
self.get_q = get_q
self.result_q = result_q
self.session = make_session(auth, headers)
self.destination = destination
configure_worker_logging(log_q, log_level)
def run(self):
"""Download files from the queue until there are no more.
Gets a file's data off the queue, attempts to download the
file, and puts the result onto another queue.
"""
while True:
try:
data_file = self.get_q.get(block=False)
except Empty:
break
result = 'failure'
output_path = os.path.join(self.destination, data_file.filename)
try:
data_file = download_file(data_file, self.session, output_path)
except WASAPIDownloadError as err:
LOGGER.error(str(err))
else:
# If we download the file without error, verify the checksum.
if data_file.verified or verify_file(data_file.checksums, output_path):
result = 'success'
self.result_q.put((result, data_file.filename))
self.get_q.task_done()
class SetQueryParametersAction(argparse.Action):
"""Store all of the query parameter argument values in a dict."""
def __call__(self, parser, namespace, values, option_string):
if not hasattr(namespace, 'query_params'):
setattr(namespace, 'query_params', {})
option = option_string.lstrip('-')
namespace.query_params[option] = values
def _parse_args(args=sys.argv[1:]):
"""Parse the commandline arguments."""
description = """
Download WARC files from a WASAPI access point.
Acceptable date/time formats are:
2017-01-01
2017-01-01T12:34:56
2017-01-01 12:34:56
2017-01-01T12:34:56Z
2017-01-01 12:34:56-0700
2017
2017-01"""
try:
# According to multiprocessing docs, this could fail on some platforms.
default_processes = multiprocessing.cpu_count()
except NotImplementedError:
default_processes = 1
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-b',
'--base-uri',
dest='base_uri',
default='https://partner.archive-it.org/wasapi/v1/webdata',
help='base URI for WASAPI access; default: '
'https://partner.archive-it.org/wasapi/v1/webdata')
parser.add_argument('-d',
'--destination',
default='.',
help='location for storing downloaded files')
parser.add_argument('-l',
'--log',
help='file to which logging should be written')
parser.add_argument('-n',
'--no-manifest',
action='store_true',
dest='skip_manifest',
help='do not generate checksum files (ignored'
' when used in combination with --manifest)')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='log verbosely; -v is INFO, -vv is DEBUG')
auth_group = parser.add_mutually_exclusive_group()
auth_group.add_argument('--profile',
dest='profile',
help='profile to use for API authentication')
auth_group.add_argument('-u',
'--user',
dest='user',
help='username for API authentication')
auth_group.add_argument('-t',
'--token',
dest='token',
help='token for API authentication')
out_group = parser.add_mutually_exclusive_group()
out_group.add_argument('-c',
'--count',
action='store_true',
help='print number of files for download and exit')
out_group.add_argument('-m',
'--manifest',
action='store_true',
help='generate checksum files only and exit')
out_group.add_argument('-p',
'--processes',
type=int,
default=default_processes,
help='number of WARC downloading processes')
out_group.add_argument('-s',
'--size',
action='store_true',
help='print count and total size of files and exit')
out_group.add_argument('-r',
'--urls',
action='store_true',
help='list URLs for downloadable files only and exit')
# Arguments to become part of query parameter string
param_group = parser.add_argument_group('query parameters',
'parameters for webdata request')
param_group.add_argument('--collection',
action=SetQueryParametersAction,
nargs='+',
help='collection identifier')
param_group.add_argument('--filename',
action=SetQueryParametersAction,
help='exact webdata filename to download')
param_group.add_argument('--crawl',
action=SetQueryParametersAction,
help='crawl job identifier')
param_group.add_argument('--crawl-time-after',
action=SetQueryParametersAction,
help='request files created on or after this '
'date/time')
param_group.add_argument('--crawl-time-before',
action=SetQueryParametersAction,
help='request files created before this date/time')
param_group.add_argument('--crawl-start-after',
action=SetQueryParametersAction,
help='request files from crawl jobs starting on '
'or after this date/time')
param_group.add_argument('--crawl-start-before',
action=SetQueryParametersAction,
help='request files from crawl jobs starting '
'before this date/time')
return parser.parse_args(args)
def get_credentials_env():
"""Get API credentials from environment variables."""
env = os.environ.get
auth = (env('WASAPI_USER'), env('WASAPI_PASS'))
if None in auth:
auth = None
else:
LOGGER.debug('Using API credentials from environment variables')
return auth
def get_credentials_config(profile, path=PROFILE_PATH):
"""Get API credentials from a config file."""
config = configparser.ConfigParser()
try:
config.read_file(open(path))
auth = (config.get(profile, 'username'),
config.get(profile, 'password'))
except (OSError,
configparser.NoSectionError,
configparser.NoOptionError) as err:
sys.exit('{}: please create config file to supply API credentials with format:\n\n'
'[{}]\n'
'username = someuser\n'
'password = secretpasswd\n'.format(err, profile))
LOGGER.debug('Using API credentials from {}'.format(path))
return auth
def get_credentials(user=None, profile=None):
"""Determine a username/password combination if one is supplied.
Order of precedence is command line, environment, config file."""
auth = None
if user:
# If there is a username, prompt for a password.
auth = (user, getpass.getpass())
else:
# Check for credentials in environment variables.
auth = get_credentials_env()
if profile and auth is None:
# Check for credentials in a config file.
auth = get_credentials_config(profile)
return auth
def main():
args = _parse_args()
if (not os.access(args.destination, os.W_OK)
and not args.size
and not args.count):
msg = 'Cannot write to destination: {}'.format(args.destination)
sys.exit(msg)
# Start log writing process.
manager = multiprocessing.Manager()
log_q = manager.Queue()
try:
listener = start_listener_logging(log_q, args.log)
except OSError as err:
print('Could not open file for logging:', err)
sys.exit(1)
@atexit.register
def stop_listener_logging():
"""Stop listener when exiting program normally."""
listener.stop()
# Configure a logger for the main process.
try:
log_level = [logging.ERROR, logging.INFO, logging.DEBUG][args.verbose]
except IndexError:
log_level = logging.DEBUG
configure_main_logging(log_q, log_level)
# Generate query string for the webdata request.
try:
query = '?{}'.format(urlencode(args.query_params, safe=':', doseq=True))
except AttributeError:
# Use empty query if user didn't enter any query parameters.
query = ''
webdata_uri = '{}{}'.format(args.base_uri, query)
# Set up authentication.
auth = None
headers = {}
if args.token:
# Set the HTTP Authentication header.
headers['Authorization'] = 'Token {}'.format(args.token)
else:
# Generate authentication tuple for the API calls.
auth = get_credentials(args.user, args.profile)
# If user wants the size, don't download files.
if args.size:
count, size = get_files_size(webdata_uri, auth, headers)
print('Number of Files: ', count)
print('Size of Files: ', convert_bytes(size))
sys.exit()
# If user wants a count, don't download files.
if args.count:
print('Number of Files: ', get_files_count(webdata_uri, auth, headers))
sys.exit()
# Process webdata requests to generate checksum files.
if args.manifest:
downloads = Downloads(webdata_uri, auth, download=False,
destination=args.destination, headers=headers)
downloads.generate_manifests()
sys.exit()
# Print the URLs for files that can be downloaded; don't download them.
if args.urls:
downloads = Downloads(webdata_uri, auth, download=False,
destination=args.destination, headers=headers)
for url in downloads.urls:
print(url)
sys.exit()
# Process webdata requests to fill webdata file queue.
downloads = Downloads(webdata_uri, auth, download=True,
destination=args.destination, headers=headers)
# Write manifest file(s).
if not args.skip_manifest:
downloads.generate_manifests()
# Download with multiple processes.
get_q = downloads.get_q
result_q = manager.Queue()
download_processes = []
try:
num_processes = min(args.processes, get_q.qsize())
except NotImplementedError:
num_processes = args.processes
for _ in range(num_processes):
dp = Downloader(get_q, result_q, log_q, log_level, auth,
args.destination, headers=headers)
dp.start()
download_processes.append(dp)
for dp in download_processes:
dp.join()
get_q.join()
print(generate_report(result_q))
if __name__ == '__main__':
main()
| {
"content_hash": "9a20302b172422121326dab75f7165a0",
"timestamp": "",
"source": "github",
"line_count": 690,
"max_line_length": 93,
"avg_line_length": 36.32753623188406,
"alnum_prop": 0.5755206255485519,
"repo_name": "unt-libraries/py-wasapi-client",
"id": "238e78cc93a96188e3761096bf15868efb9f0135",
"size": "25089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wasapi_client.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "59353"
}
],
"symlink_target": ""
} |
"""
@brief test log(time=1000s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, ExtTestCase
from code_beatrix.art.video import video_save, video_modification, video_replace_audio, video_extract_audio
from code_beatrix.art.video import video_concatenate, audio_concatenate, clean_video
class TestVideoAudioBug(ExtTestCase):
def test_video_audio_bug(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_video_audio_bug")
vid = os.path.join(temp, '..', 'data', 'videxa.mp4')
audio = video_extract_audio(vid)
vid2 = video_modification(vid, speed=2., mirrory=True, mirrorx=True)
audio1 = video_extract_audio(vid2)
audio3 = audio_concatenate([audio1, audio, audio1, audio1])
vid3 = video_concatenate([vid, vid2])
vid4 = video_replace_audio(vid3, audio3)
exp = os.path.join(temp, "courte2x.mp4")
video_save(vid4, exp)
self.assertExists(exp)
clean_video([vid4, vid2, vid3, audio, audio1, audio3])
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "0c89805dcc27e3ef5003bcb77d5f5482",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 107,
"avg_line_length": 36.1764705882353,
"alnum_prop": 0.6447154471544716,
"repo_name": "sdpython/code_beatrix",
"id": "4eb0f36561a3e5aee52d90a259612c188d45e78d",
"size": "1254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_unittests/ut_art/test_video_noenv5_audio_bug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "CSS",
"bytes": "13138"
},
{
"name": "CoffeeScript",
"bytes": "2022"
},
{
"name": "HTML",
"bytes": "12537"
},
{
"name": "JavaScript",
"bytes": "3718909"
},
{
"name": "Jupyter Notebook",
"bytes": "11928350"
},
{
"name": "Python",
"bytes": "163509"
},
{
"name": "Shell",
"bytes": "715"
},
{
"name": "Stylus",
"bytes": "16039"
}
],
"symlink_target": ""
} |
print('"Осел готов сносить все тяготы и огорчения. А упрямым его величает всяк, кому самому недостает выдержки и терпения."')
print("Леонардо Да Винчи")
input("Нажмите Enter для выхода.") | {
"content_hash": "4700e70d313b70f5645989a3c9681ca1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 126,
"avg_line_length": 47.25,
"alnum_prop": 0.7671957671957672,
"repo_name": "Mariaanisimova/pythonintask",
"id": "96e4dcd0ff5796273619b892e836f5cc953302ab",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PMIa/2015/MOLOKANOV_I_A/task_2_21.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GCC Machine Description",
"bytes": "122"
},
{
"name": "Python",
"bytes": "1550014"
}
],
"symlink_target": ""
} |
import argparse
import os
import shutil
import subprocess
import sys
from lib.config import enable_verbose_mode
from lib.util import electron_gyp, execute_stdout, rm_rf
if sys.platform == 'linux2':
# On Linux we use python-dbusmock to create a fake system bus and test
# powerMonitor interaction with org.freedesktop.login1 service. The
# dbus_mock module takes care of setting up the fake server with mock,
# while also setting DBUS_SYSTEM_BUS_ADDRESS environment variable, which
# will be picked up by electron.
try:
import lib.dbus_mock
except ImportError:
# If not available, the powerMonitor tests will be skipped since
# DBUS_SYSTEM_BUS_ADDRESS will not be set
pass
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
config = args.configuration
if args.verbose:
enable_verbose_mode()
spec_modules = os.path.join(SOURCE_ROOT, 'spec', 'node_modules')
if args.rebuild_native_modules or not os.path.isdir(spec_modules):
rebuild_native_modules(args.verbose, config)
if sys.platform == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
resources_path = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'Resources')
elif sys.platform == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.exe'.format(PROJECT_NAME))
resources_path = os.path.join(SOURCE_ROOT, 'out', config)
else:
electron = os.path.join(SOURCE_ROOT, 'out', config, PROJECT_NAME)
resources_path = os.path.join(SOURCE_ROOT, 'out', config)
returncode = 0
try:
if args.use_instrumented_asar:
install_instrumented_asar_file(resources_path)
subprocess.check_call([electron, 'spec'] + sys.argv[1:])
except subprocess.CalledProcessError as e:
returncode = e.returncode
except KeyboardInterrupt:
returncode = 0
if args.use_instrumented_asar:
restore_uninstrumented_asar_file(resources_path)
if os.environ.has_key('OUTPUT_TO_FILE'):
output_to_file = os.environ['OUTPUT_TO_FILE']
with open(output_to_file, 'r') as f:
print f.read()
rm_rf(output_to_file)
return returncode
def parse_args():
parser = argparse.ArgumentParser(description='Run Electron tests')
parser.add_argument('--use_instrumented_asar',
help='Run tests with coverage instructed asar file',
action='store_true',
required=False)
parser.add_argument('--rebuild_native_modules',
help='Rebuild native modules used by specs',
action='store_true',
required=False)
parser.add_argument('--ci',
help='Run tests in CI mode',
action='store_true',
required=False)
parser.add_argument('-g', '--grep',
help='Only run tests matching <pattern>',
metavar='pattern',
required=False)
parser.add_argument('-i', '--invert',
help='Inverts --grep matches',
action='store_true',
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-c', '--configuration',
help='Build configuration to run tests against',
default='D',
required=False)
return parser.parse_args()
def install_instrumented_asar_file(resources_path):
asar_path = os.path.join(resources_path, '{0}.asar'.format(PROJECT_NAME))
uninstrumented_path = os.path.join(resources_path,
'{0}-original.asar'.format(PROJECT_NAME))
instrumented_path = os.path.join(SOURCE_ROOT, 'out', 'coverage',
'{0}.asar'.format(PROJECT_NAME))
shutil.move(asar_path, uninstrumented_path)
shutil.move(instrumented_path, asar_path)
def restore_uninstrumented_asar_file(resources_path):
asar_path = os.path.join(resources_path, '{0}.asar'.format(PROJECT_NAME))
uninstrumented_path = os.path.join(resources_path,
'{0}-original.asar'.format(PROJECT_NAME))
os.remove(asar_path)
shutil.move(uninstrumented_path, asar_path)
def rebuild_native_modules(verbose, configuration):
script_path = os.path.join(SOURCE_ROOT, 'script', 'rebuild-test-modules.py')
args = ['--configuration', configuration]
if verbose:
args += ['--verbose']
execute_stdout([sys.executable, script_path] + args)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "366f7ec0ab8f4675545268b406304caf",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 36.41428571428571,
"alnum_prop": 0.607885445272656,
"repo_name": "Floato/electron",
"id": "67292997348b8e066e39f154cbbe053315c97e27",
"size": "5121",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "script/test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4499"
},
{
"name": "C++",
"bytes": "2811734"
},
{
"name": "HTML",
"bytes": "17537"
},
{
"name": "JavaScript",
"bytes": "866476"
},
{
"name": "Objective-C",
"bytes": "53527"
},
{
"name": "Objective-C++",
"bytes": "312033"
},
{
"name": "PowerShell",
"bytes": "99"
},
{
"name": "Python",
"bytes": "219510"
},
{
"name": "Shell",
"bytes": "3880"
}
],
"symlink_target": ""
} |
import traceback
from flask import url_for
from dashboards import Dashboard
import itertools
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Output, Input, State
from dash.exceptions import PreventUpdate
from dashboards.dashboard import StyledDash, get_plot_theme
from dashboards.nmr_metabolomics.processing.layouts import get_layout
from dashboards.nmr_metabolomics.processing.model import CollectionProcessingModel
class CollectionProcessingDashboard(Dashboard):
name = 'Fix Spectra'
prefix = '/dashboards/fix_spectra/'
description = 'Collection processing.'
id = 'proc'
def __init__(self):
model = CollectionProcessingModel()
@staticmethod
def _on_label_key_select(label_keys, op='=='):
if not label_keys or None in label_keys:
raise PreventUpdate('Callback triggered without action!')
label_keys = sorted(label_keys)
editor_data = CollectionProcessingModel(True)
unique_values = [editor_data.unique_vals[val] for val in label_keys]
option_pairs = list(itertools.product(*unique_values))
queries = [' & '.join([f'{key}{op}"{value}"' for key, value in zip(label_keys, option_pair)])
for option_pair in option_pairs]
query_labels = [','.join([f'{key}={value}' for key, value in zip(label_keys, option_pair)])
for option_pair in option_pairs]
return [[{'label': query_label, 'value': query} for query_label, query in zip(query_labels, queries)]]
@staticmethod
def _register_callbacks(app):
@app.callback(
[Output('loaded-collections', 'children'),
Output('collections-label', 'children'),
Output('spectrum-index', 'value'),
Output('spectrum-index', 'max'),
Output('region-min', 'min'),
Output('region-min', 'max'),
Output('region-max', 'min'),
Output('region-max', 'max'),
Output('pqn-ref-label', 'options'),
Output('hist-ref-label', 'options')],
[Input('get-collection', 'n_clicks')],
[State('collection-id', 'value'),
State('normalization-apply-button', 'n_clicks'),
State('baseline-apply-button', 'n_clicks'),
State('region-apply-button', 'n_clicks'),
State('finalize-button', 'n_clicks')],
)
def get_collections(n_clicks, value,
normalize_n_clicks,
baseline_n_clicks,
region_n_clicks,
finalize_n_clicks):
CollectionProcessingDashboard.check_clicks(n_clicks)
if not value:
raise PreventUpdate('Nothing to load.')
model = CollectionProcessingModel()
model.get_collections(value)
x_min, x_max = model.x_range
model.normalize_n_clicks = normalize_n_clicks
model.finalize_n_clicks = finalize_n_clicks
model.region_n_clicks = region_n_clicks
model.baseline_n_clicks = baseline_n_clicks
model.processing_log = None
label_data = model.get_label_data()
return (
model.get_collection_badges(),
model.get_collection_load_info(),
0,
model.spectrum_count - 1,
x_min,
x_max,
x_min,
x_max,
label_data,
label_data
)
@app.callback(
[Output('sum-normalization-form', 'style'),
Output('region-normalization-form', 'style'),
Output('hist-normalization-form', 'style'),
Output('pqn-normalization-form', 'style'),
Output('label-normalization-form', 'style')],
[Input('normalization-method', 'value')]
)
def normalize_method_switched(value):
if value == 'sum':
return {'display': 'block'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'}
elif value == 'region':
return {'display': 'none'}, {'display': 'block'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'}
elif value == 'label':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'}, {'display': 'block'}
elif value == 'histogram':
return {'display': 'none'}, {'display': 'none'}, {'display': 'block'}, {'display': 'none'}, {'display': 'none'}
elif value == 'probability_quotient':
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'}, {'display': 'block'}, {'display': 'none'}
else:
return {'display': 'none'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'}, {'display': 'none'}
@app.callback(
[Output('preview-graph', 'figure'),
Output('preview-graph', 'config'),
Output('processing-log', 'children')],
[Input('spectrum-index', 'value'),
Input('region-min', 'value'),
Input('region-max', 'value'),
Input('range-checklist', 'value')]
)
def update_plot(spectrum_index, region_min, region_max, show_box):
show_box = 'show_range_box' in show_box
spectrum_index = spectrum_index if spectrum_index is not None else 0
model = CollectionProcessingModel(True)
theme = get_plot_theme()
return [model.get_plot(spectrum_index, show_box, region_min, region_max, theme),
{'editable': show_box},
[html.Strong('Processing log: '), f'{model.processing_log}']]
@app.callback(
[Output('preview-graph-wrapper', 'children'),
Output('processing-log-wrapper', 'children')],
[Input('finalize-button', 'n_clicks'),
Input('normalization-apply-button', 'n_clicks'),
Input('baseline-apply-button', 'n_clicks'),
Input('region-apply-button', 'n_clicks')],
[State('normalization-method', 'value'),
State('norm-sum', 'value'),
State('peak-intensity', 'value'),
State('norm-label', 'value'),
State('hist-ref-type', 'value'),
State('hist-ref-query', 'value'),
State('pqn-ref-type', 'value'),
State('pqn-ref-query', 'value'),
State('baseline-method', 'value'),
State('rolling-ball-min-max', 'value'),
State('rolling-ball-smoothing', 'value'),
State('region-method', 'value'),
State('spectrum-index', 'value'),
State('region-min', 'value'),
State('region-max', 'value'),
State('range-checklist', 'value')]
)
def action_button(finalize_n_clicks, normalize_n_clicks, baseline_n_clicks, region_n_clicks,
normalization_method, norm_sum, region_peak_intensity, norm_label,
hist_ref_type, hist_ref_query, pqn_ref_type, pqn_ref_query,
baseline_method, rolling_ball_min_max, rolling_ball_smoothing,
region_method,
spectrum_index, region_min, region_max, show_box):
try:
model = CollectionProcessingModel(True)
if not any([finalize_n_clicks, normalize_n_clicks, baseline_n_clicks, region_n_clicks]):
raise PreventUpdate('Callback triggered without action!')
if normalize_n_clicks and (normalize_n_clicks != model.normalize_n_clicks):
print(f'normalize: ({normalize_n_clicks}, {model.normalize_n_clicks})')
pqn_ref_query = ' | '.join(pqn_ref_query) if pqn_ref_query and len(pqn_ref_query) else None
hist_ref_query = ' | '.join(hist_ref_query) if hist_ref_query and len(hist_ref_query) else None
model.normalize(normalization_method,
norm_sum=norm_sum,
region_peak_intensity=region_peak_intensity,
norm_label=norm_label,
region_min=region_min,
region_max=region_max,
hist_ref_type=hist_ref_type,
hist_ref_query=hist_ref_query,
pqn_ref_type=pqn_ref_type,
pqn_ref_query=pqn_ref_query)
model.normalize_n_clicks = normalize_n_clicks
if baseline_n_clicks and (baseline_n_clicks != model.baseline_n_clicks):
print(f'baseline: ({baseline_n_clicks}, {model.baseline_n_clicks})')
model.correct_baseline(baseline_method,
rolling_ball_min_max=rolling_ball_min_max,
rolling_ball_smoothing=rolling_ball_smoothing)
model.baseline_n_clicks = baseline_n_clicks
if region_n_clicks and (region_n_clicks != model.region_n_clicks):
print(f'region: ({region_n_clicks}, {model.region_n_clicks})')
model.process_region(region_method, region_min, region_max)
model.region_n_clicks = region_n_clicks
if finalize_n_clicks and (finalize_n_clicks != model.finalize_n_clicks):
print(f'finalize: ({finalize_n_clicks}, {model.finalize_n_clicks})')
model.finalize()
model.finalize_n_clicks = finalize_n_clicks
show_box = 'show_range_box' in show_box
theme = get_plot_theme()
figure = model.get_plot(spectrum_index, show_box, region_min, region_max, theme)
except Exception as e:
print(e)
traceback.print_exc()
figure = None
return [dcc.Graph(id='preview-graph', config={'editable': True}, figure=figure),
html.P([html.Strong('Processing log: '), f'{model.processing_log}'], id='processing-log')]
@app.callback(
[Output('region-min', 'value'),
Output('region-max', 'value')],
[Input('preview-graph', 'relayoutData')],
[State('region-min', 'value'),
State('region-max', 'value')]
)
def graph_relayout(relayout_data, region_min, region_max):
print(relayout_data)
if relayout_data is None:
relayout_data = {}
try:
model = CollectionProcessingModel(True)
if 'xaxis.range[0]' and 'xaxis.range[1]' in relayout_data:
model.x_axis_range = (relayout_data['xaxis.range[0]'], relayout_data['xaxis.range[1]'])
if 'yaxis.range[0]' and 'yaxis.range[1]' in relayout_data:
model.y_axis_range = (relayout_data['yaxis.range[0]'], relayout_data['yaxis.range[1]'])
if 'shapes[0].x0' and 'shapes[0].x1' in relayout_data:
x0, x1 = relayout_data['shapes[0].x0'], relayout_data['shapes[0].x1']
return model.nearest_x(x0, x1)
else:
return sorted([region_min, region_max])
except Exception as e:
print(e)
traceback.print_tb(e.__traceback__)
return [0, 0.1]
@app.callback([Output('post-message', 'children')],
[Input('post-button', 'n_clicks')],
[State('name-input', 'value'),
State('analysis-select', 'value')])
def post_collection(n_clicks, name, analysis_ids):
CollectionProcessingDashboard.check_clicks(n_clicks)
try:
editor_data = CollectionProcessingModel(True)
new_collection = editor_data.post_collection(name, analysis_ids)
return [
dbc.Alert(
[
'Posted results as ',
html.A(f'Collection {new_collection.id}.',
href=url_for('collections.render_collection',
collection_id=new_collection.id))
],
dismissable=True, color='success')
]
except Exception as e:
return [dbc.Alert([html.P([html.Strong('Error: '), f'{e}']),
html.Strong('Traceback:'),
html.P(html.Pre(traceback.format_exc(), className='text-white'))],
color='danger', dismissable=True)]
@app.callback([Output('pqn-ref-query', 'options')],
[Input('pqn-ref-label', 'value')])
def get_query_options(value):
return CollectionProcessingDashboard._on_label_key_select(value)
@app.callback([Output('hist-ref-query', 'options')],
[Input('hist-ref-label', 'value')])
def get_query_options(value):
return CollectionProcessingDashboard._on_label_key_select(value)
@staticmethod
def _register_layout(app):
app.layout = get_layout
@staticmethod
def _register_dash_app(app):
app.title = CollectionProcessingDashboard.name
CollectionProcessingDashboard._register_layout(app)
CollectionProcessingDashboard._register_callbacks(app)
@staticmethod
def create_dash_app(server):
# no users are logged in when this is created so we can't set light/dark theme preference
app = StyledDash(__name__,
server=server,
routes_pathname_prefix=CollectionProcessingDashboard.prefix,
requests_pathname_prefix='/omics' + CollectionProcessingDashboard.prefix)
# noinspection PyTypeChecker
CollectionProcessingDashboard._register_dash_app(app)
return app
| {
"content_hash": "549d8ebab35bd4b8b7c006afa9682058",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 127,
"avg_line_length": 49.727586206896554,
"alnum_prop": 0.5307537618750433,
"repo_name": "BiRG/Omics-Dashboard",
"id": "128a3419e02d1102c35cde092510a22f796bf575",
"size": "14421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "omics/omics_dashboard/dashboards/nmr_metabolomics/processing/dashboard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2625"
},
{
"name": "Common Workflow Language",
"bytes": "11017"
},
{
"name": "Dockerfile",
"bytes": "2078"
},
{
"name": "HTML",
"bytes": "119856"
},
{
"name": "JavaScript",
"bytes": "1716"
},
{
"name": "Python",
"bytes": "686576"
},
{
"name": "Shell",
"bytes": "586"
},
{
"name": "TypeScript",
"bytes": "27794"
}
],
"symlink_target": ""
} |
import subprocess
from ..installers import PackageManagerInstaller
from .source import SOURCE_INSTALLER
ARCH_OS_NAME = 'arch'
PACMAN_INSTALLER = 'pacman'
def register_installers(context):
context.set_installer(PACMAN_INSTALLER, PacmanInstaller())
def register_platforms(context):
context.add_os_installer_key(ARCH_OS_NAME, SOURCE_INSTALLER)
context.add_os_installer_key(ARCH_OS_NAME, PACMAN_INSTALLER)
context.set_default_os_installer_key(ARCH_OS_NAME, lambda self: PACMAN_INSTALLER)
def pacman_detect_single(p):
return not subprocess.call(['pacman', '-T', p], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def pacman_detect(packages):
return [p for p in packages if pacman_detect_single(p)]
class PacmanInstaller(PackageManagerInstaller):
def __init__(self):
super(PacmanInstaller, self).__init__(pacman_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
command = ['pacman', '-S']
if not interactive:
command.append('--noconfirm')
if not reinstall:
command.append('--needed')
if quiet:
command.append('-q')
return [self.elevate_priv(command + packages)]
| {
"content_hash": "850526d715a04db785fffbd7aab8f335",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 99,
"avg_line_length": 32.357142857142854,
"alnum_prop": 0.6880058866813834,
"repo_name": "spaghetti-/rosdep",
"id": "8c1f6763ba3cf9b01cd6dd2d8d0eff1344b9358e",
"size": "3009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rosdep2/platforms/arch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "516"
},
{
"name": "Python",
"bytes": "415531"
},
{
"name": "Shell",
"bytes": "296"
}
],
"symlink_target": ""
} |
"""A middleware that turns exceptions into parsable string.
Inspired by Cinder's faultwrapper.
"""
import sys
import traceback
from oslo_config import cfg
from oslo_utils import reflection
import six
import webob
from heat.common import exception
from heat.common import serializers
from heat.common import wsgi
class Fault(object):
def __init__(self, error):
self.error = error
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if req.content_type == 'application/xml':
serializer = serializers.XMLResponseSerializer()
else:
serializer = serializers.JSONResponseSerializer()
resp = webob.Response(request=req)
default_webob_exc = webob.exc.HTTPInternalServerError()
resp.status_code = self.error.get('code', default_webob_exc.code)
serializer.default(resp, self.error)
return resp
class FaultWrapper(wsgi.Middleware):
"""Replace error body with something the client can parse."""
error_map = {
'AttributeError': webob.exc.HTTPBadRequest,
'ActionInProgress': webob.exc.HTTPConflict,
'ValueError': webob.exc.HTTPBadRequest,
'EntityNotFound': webob.exc.HTTPNotFound,
'NotFound': webob.exc.HTTPNotFound,
'ResourceActionNotSupported': webob.exc.HTTPBadRequest,
'InvalidGlobalResource': webob.exc.HTTPInternalServerError,
'ResourceNotAvailable': webob.exc.HTTPNotFound,
'PhysicalResourceNameAmbiguity': webob.exc.HTTPBadRequest,
'InvalidTenant': webob.exc.HTTPForbidden,
'Forbidden': webob.exc.HTTPForbidden,
'StackExists': webob.exc.HTTPConflict,
'StackValidationFailed': webob.exc.HTTPBadRequest,
'InvalidSchemaError': webob.exc.HTTPBadRequest,
'InvalidTemplateReference': webob.exc.HTTPBadRequest,
'InvalidTemplateVersion': webob.exc.HTTPBadRequest,
'InvalidTemplateSection': webob.exc.HTTPBadRequest,
'UnknownUserParameter': webob.exc.HTTPBadRequest,
'RevertFailed': webob.exc.HTTPInternalServerError,
'StopActionFailed': webob.exc.HTTPInternalServerError,
'EventSendFailed': webob.exc.HTTPInternalServerError,
'ServerBuildFailed': webob.exc.HTTPInternalServerError,
'NotSupported': webob.exc.HTTPBadRequest,
'MissingCredentialError': webob.exc.HTTPBadRequest,
'UserParameterMissing': webob.exc.HTTPBadRequest,
'RequestLimitExceeded': webob.exc.HTTPBadRequest,
'Invalid': webob.exc.HTTPBadRequest,
'ResourcePropertyConflict': webob.exc.HTTPBadRequest,
'PropertyUnspecifiedError': webob.exc.HTTPBadRequest,
'ObjectFieldInvalid': webob.exc.HTTPBadRequest,
'ReadOnlyFieldError': webob.exc.HTTPBadRequest,
'ObjectActionError': webob.exc.HTTPBadRequest,
'IncompatibleObjectVersion': webob.exc.HTTPBadRequest,
'OrphanedObjectError': webob.exc.HTTPBadRequest,
'UnsupportedObjectError': webob.exc.HTTPBadRequest,
'ResourceTypeUnavailable': webob.exc.HTTPBadRequest,
'InvalidBreakPointHook': webob.exc.HTTPBadRequest,
'ImmutableParameterModified': webob.exc.HTTPBadRequest
}
def _map_exception_to_error(self, class_exception):
if class_exception == Exception:
return webob.exc.HTTPInternalServerError
if class_exception.__name__ not in self.error_map:
return self._map_exception_to_error(class_exception.__base__)
return self.error_map[class_exception.__name__]
def _error(self, ex):
trace = None
traceback_marker = 'Traceback (most recent call last)'
webob_exc = None
if isinstance(ex, exception.HTTPExceptionDisguise):
# An HTTP exception was disguised so it could make it here
# let's remove the disguise and set the original HTTP exception
if cfg.CONF.debug:
trace = ''.join(traceback.format_tb(ex.tb))
ex = ex.exc
webob_exc = ex
ex_type = reflection.get_class_name(ex, fully_qualified=False)
is_remote = ex_type.endswith('_Remote')
if is_remote:
ex_type = ex_type[:-len('_Remote')]
full_message = six.text_type(ex)
if '\n' in full_message and is_remote:
message, msg_trace = full_message.split('\n', 1)
elif traceback_marker in full_message:
message, msg_trace = full_message.split(traceback_marker, 1)
message = message.rstrip('\n')
msg_trace = traceback_marker + msg_trace
else:
msg_trace = 'None\n'
if sys.exc_info() != (None, None, None):
msg_trace = traceback.format_exc()
message = full_message
if isinstance(ex, exception.HeatException):
message = ex.message
if cfg.CONF.debug and not trace:
trace = msg_trace
if not webob_exc:
webob_exc = self._map_exception_to_error(ex.__class__)
error = {
'code': webob_exc.code,
'title': webob_exc.title,
'explanation': webob_exc.explanation,
'error': {
'message': message,
'type': ex_type,
'traceback': trace,
}
}
return error
def process_request(self, req):
try:
return req.get_response(self.application)
except Exception as exc:
return req.get_response(Fault(self._error(exc)))
| {
"content_hash": "30dfb15fcb7bc473d03985a837b5a81a",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 75,
"avg_line_length": 37.5,
"alnum_prop": 0.6445045045045045,
"repo_name": "cwolferh/heat-scratch",
"id": "8fa839f0d793cc88de7b440553557a34dea8ab47",
"size": "6161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/api/middleware/fault.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
} |
import os
import cPickle
# Change this to the directory where the code is stored
os.chdir('D://Stephen/Documents/Code/wormtracker-matlab/python')
import wormtracker as wt
import wormtracker.parallel as wtp
import multiprocessing
"""
This script runs a performance test for the serial analysis on a
single region of a video segment.
Performance results on MUSSORGSKY:
Intel Core i7-2600K @ 3.4 GHz (8 threads)
8 GB RAM
Windows 7
64-bit Anaconda Python 2.7 environment
Processing in parallel took 4.08436319613 min.
Average time per frame was 0.0255060149633 s.
Projected time for 1 hour of video for a single region: 17.0040099756 min.
Projected time for 16 regions: 4.53440266015 h.
"""
# configure libav
# wt.libavPath = C:\\libav\\bin
if __name__ == '__main__':
multiprocessing.freeze_support()
# video settings
# change this to the video folder
videoFile = 'D:\\n2_day7_short_test.avi'
storeFile = 'D:\\test.h5'
pickleFile = 'D:\\N2_a_b_day_7.dat'
# load object
with open(pickleFile, 'rb') as f:
wv = cPickle.load(f)
# only analyze 1 region / core
wv.regions = wv.regions[:8]
# update references
wv.updateVideoFile(videoFile)
wv.storeFile = storeFile
for region in wv.regions:
region.resultsStoreFile = storeFile
# run analysis
import time
tStart = time.clock()
wtp.parallelProcessRegions(wv) # analyzes each region in parallel
tStop = time.clock()
tDuration = tStop - tStart
print 'Processing in parallel took {0} min.'.format(str(tDuration/60))
tPerFrame = tDuration / len(wv.regions) / wv.nFrames
print 'Average time per frame was {0} s.'.format(str(tPerFrame))
tPerRegion = tPerFrame*40000
print ('Projected time for 1 hour of video for a single region: ' +
'{0} min.'.format(str(tPerRegion/60)))
tVideo = tPerRegion*16
print ('Projected time for 16 regions: {0} h.'.format(str(tVideo/60**2)))
| {
"content_hash": "39f35db264f8ad3d949fb625afc79a84",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 74,
"avg_line_length": 29.015873015873016,
"alnum_prop": 0.7407002188183808,
"repo_name": "stephenhelms/WormTracker",
"id": "6116e3d68c90facc4fb91eb059cd0dc3a99bf45b",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test_script_parallel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "61258"
},
{
"name": "Python",
"bytes": "205362"
}
],
"symlink_target": ""
} |
import json
def flat_to_tree_dict_helper(dictionary, split_symbol='.'):
"""
Gets a flat dictionary in the following format:
{"name.first_name": 'Martin', "name.last_name": "Rusev"}
and converts it to:
{"name": {"first_name": "Martin", "last_name": "Rusev" }}
It converts deeper dictionary keys to underscore:
{"name.first.name": 'Martin', "name.last.name": "Rusev"}
becomes:
"""
filtered_dict = {}
for key, value in dictionary.items():
key_parts = key.split(split_symbol)
total_elements = len(key_parts)
if total_elements > 0:
first_element = key_parts[0]
subdocument_key = None
if len(key_parts[1:]) > 0:
subdocument_key = "_".join(key_parts[1:]) # Strip dots
key_exists = filtered_dict.get(first_element)
if key_exists == None:
filtered_dict[first_element] = {}
if type(value) not in [int, float]:
value = value.replace(',','.')
try:
value = float(value)
except ValueError:
try:
value = int(value)
except:
value = value
if subdocument_key == None:
filtered_dict[first_element] = value
else:
try:
filtered_dict[first_element][subdocument_key] = value
except:
pass
return filtered_dict
def remove_dots(dictionary):
dict_without_underscore = {}
for key, value in dictionary.items():
new_key = key.replace("_",".")
dict_without_underscore[new_key] = value
return dict_without_underscore
def replace_underscore_with_dot_helper(dictionary):
new_dict = json.loads(json.dumps(dictionary), object_hook=remove_dots)
return new_dict
| {
"content_hash": "c9374194ce79ff00453cdb2de40544b5",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 74,
"avg_line_length": 26.213333333333335,
"alnum_prop": 0.5213631739572736,
"repo_name": "martinrusev/amonone",
"id": "1cfe5e59b84d2c98d74f5e2a0e11325578e3b135",
"size": "1966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amon/apps/plugins/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77950"
},
{
"name": "JavaScript",
"bytes": "28811"
},
{
"name": "Python",
"bytes": "180983"
},
{
"name": "Ruby",
"bytes": "131"
},
{
"name": "Shell",
"bytes": "5652"
}
],
"symlink_target": ""
} |
import os
import Tkinter
def _load_tkdnd(master):
tkdndlib = os.environ.get('TKDND_LIBRARY')
if tkdndlib:
master.tk.eval('global auto_path; lappend auto_path {%s}' % tkdndlib)
master.tk.eval('package require tkdnd')
master._tkdnd_loaded = True
class TkDND(object):
def __init__(self, master):
if not getattr(master, '_tkdnd_loaded', False):
_load_tkdnd(master)
self.master = master
self.tk = master.tk
# Available pre-defined values for the 'dndtype' parameter:
# text/plain
# text/plain;charset=UTF-8
# text/uri-list
def bindtarget(self, window, callback, dndtype, event='<Drop>', priority=50):
cmd = self._prepare_tkdnd_func(callback)
return self.tk.call('dnd', 'bindtarget', window, dndtype, event,
cmd, priority)
def bindtarget_query(self, window, dndtype=None, event='<Drop>'):
return self.tk.call('dnd', 'bindtarget', window, dndtype, event)
def cleartarget(self, window):
self.tk.call('dnd', 'cleartarget', window)
def bindsource(self, window, callback, dndtype, priority=50):
cmd = self._prepare_tkdnd_func(callback)
self.tk.call('dnd', 'bindsource', window, dndtype, cmd, priority)
def bindsource_query(self, window, dndtype=None):
return self.tk.call('dnd', 'bindsource', window, dndtype)
def clearsource(self, window):
self.tk.call('dnd', 'clearsource', window)
def drag(self, window, actions=None, descriptions=None,
cursorwin=None, callback=None):
cmd = None
if cursorwin is not None:
if callback is not None:
cmd = self._prepare_tkdnd_func(callback)
self.tk.call('dnd', 'drag', window, actions, descriptions,
cursorwin, cmd)
_subst_format = ('%A', '%a', '%b', '%D', '%d', '%m', '%T',
'%W', '%X', '%Y', '%x', '%y')
_subst_format_str = " ".join(_subst_format)
def _prepare_tkdnd_func(self, callback):
funcid = self.master.register(callback, self._dndsubstitute)
cmd = ('%s %s' % (funcid, self._subst_format_str))
return cmd
def _dndsubstitute(self, *args):
if len(args) != len(self._subst_format):
return args
def try_int(x):
x = str(x)
try:
return int(x)
except ValueError:
return x
A, a, b, D, d, m, T, W, X, Y, x, y = args
event = Tkinter.Event()
event.action = A # Current action of the drag and drop operation.
event.action_list = a # Action list supported by the drag source.
event.mouse_button = b # Mouse button pressed during the drag and drop.
event.data = D # The data that has been dropped.
event.descr = d # The list of descriptions.
event.modifier = m # The list of modifier keyboard keys pressed.
event.dndtype = T
event.widget = self.master.nametowidget(W)
event.x_root = X # Mouse pointer x coord, relative to the root win.
event.y_root = Y
event.x = x # Mouse pointer x coord, relative to the widget.
event.y = y
event.action_list = str(event.action_list).split()
for name in ('mouse_button', 'x', 'y', 'x_root', 'y_root'):
setattr(event, name, try_int(getattr(event, name)))
return (event, )
## the second tkdnd implement .
class DnD:
def __init__(self, tkroot):
self._tkroot = tkroot
tkroot.tk.eval('package require tkdnd')
# make self an attribute of the parent window for easy access in child classes
tkroot.dnd = self
def bindsource(self, widget, type=None, command=None, arguments=None, priority=None):
'''Register widget as drag source; for details on type, command and arguments, see bindtarget().
priority can be a value between 1 and 100, where 100 is the highest available priority (default: 50).
If command is omitted, return the current binding for type; if both type and command are omitted,
return a list of registered types for widget.'''
command = self._generate_callback(command, arguments)
tkcmd = self._generate_tkcommand('bindsource', widget, type, command, priority)
res = self._tkroot.tk.eval(tkcmd)
if type == None:
res = res.split()
return res
def bindtarget(self, widget, type=None, sequence=None, command=None, arguments=None, priority=None):
'''Register widget as drop target; type may be one of text/plain, text/uri-list, text/plain;charset=UTF-8
(see the man page tkDND for details on other (platform specific) types);
sequence may be one of '<Drag>', '<DragEnter>', '<DragLeave>', '<Drop>' or '<Ask>' ;
command is the callback associated with the specified event, argument is an optional tuple of arguments
that will be passed to the callback; possible arguments include: %A %a %b %C %c %D %d %L %m %T %t %W %X %x %Y %y
(see the tkDND man page for details); priority may be a value in the range 1 to 100 ; if there are
bindings for different types, the one with the priority value will be proceeded first (default: 50).
If command is omitted, return the current binding for type, where sequence defaults to '<Drop>'.
If both type and command are omitted, return a list of registered types for widget.'''
command = self._generate_callback(command, arguments)
tkcmd = self._generate_tkcommand('bindtarget', widget, type, sequence, command, priority)
res = self._tkroot.tk.eval(tkcmd)
if type == None:
res = res.split()
return res
def clearsource(self, widget):
'''Unregister widget as drag source.'''
self._tkroot.tk.call('dnd', 'clearsource', widget)
def cleartarget(self, widget):
'''Unregister widget as drop target.'''
self._tkroot.tk.call('dnd', 'cleartarget', widget)
def drag(self, widget, actions=None, descriptions=None, cursorwindow=None, command=None, arguments=None):
'''Initiate a drag operation with source widget.'''
command = self._generate_callback(command, arguments)
if actions:
if actions[1:]:
actions = '-actions {%s}' % ' '.join(actions)
else:
actions = '-actions %s' % actions[0]
if descriptions:
descriptions = ['{%s}'%i for i in descriptions]
descriptions = '{%s}' % ' '.join(descriptions)
if cursorwindow:
cursorwindow = '-cursorwindow %s' % cursorwindow
tkcmd = self._generate_tkcommand('drag', widget, actions, descriptions, cursorwindow, command)
self._tkroot.tk.eval(tkcmd)
def _generate_callback(self, command, arguments):
'''Register command as tk callback with an optional list of arguments.'''
cmd = None
if command:
cmd = self._tkroot._register(command)
if arguments:
cmd = '{%s %s}' % (cmd, ' '.join(arguments))
return cmd
def _generate_tkcommand(self, base, widget, *opts):
'''Create the command string that will be passed to tk.'''
tkcmd = 'dnd %s %s' % (base, widget)
for i in opts:
if i is not None:
tkcmd += ' %s' % i
return tkcmd
| {
"content_hash": "4dd741ad51ff39fae9901dff90708cc2",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 120,
"avg_line_length": 42.69832402234637,
"alnum_prop": 0.5889048802826116,
"repo_name": "iiduane/veffectsys",
"id": "29e26689ac18aaab5c2e154507e7174c45c2ec27",
"size": "7712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vtkdnd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8201"
},
{
"name": "Python",
"bytes": "32426"
},
{
"name": "Tcl",
"bytes": "86440"
}
],
"symlink_target": ""
} |
import itertools
import os
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/ucl/eisuc296/workspace/inferbeddings/'
command = 'python3 {}/bin/kbp-cli.py' \
' --train {}/data/guo-emnlp16/wn18/wn18.triples.train' \
' --valid {}/data/guo-emnlp16/wn18/wn18.triples.valid' \
' --test {}/data/guo-emnlp16/wn18/wn18.triples.test' \
' --clauses {}/data/guo-emnlp16/wn18/clauses/wn18-clauses.pl' \
' --nb-epochs {}' \
' --lr 0.1' \
' --nb-batches 10' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --head-subsample-size {}' \
' --loss {}' \
' --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {} --adv-pooling {}' \
''.format(_path, _path, _path, _path, _path,
c['epochs'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['subsample_size'],
c['loss'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'], c['adv_pooling'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_guo-wn18_adv_xshot-head_v1.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the EMERALD cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space_distmult_complex = dict(
epochs=[100],
model=['DistMult', 'ComplEx'],
similarity=['dot'],
margin=[1],
embedding_size=[20, 50, 100, 150, 200],
loss=['hinge'],
subsample_size=[1],
adv_lr=[.1],
adv_epochs=[0, 10],
disc_epochs=[10],
adv_weight=[0, 1, 100, 10000, 1000000],
adv_batch_size=[1, 10, 100],
# adv_pooling=['mean', 'max']
adv_pooling=['sum', 'mean', 'max', 'logsumexp']
)
configurations_distmult_complex = cartesian_product(hyperparameters_space_distmult_complex)
path = '/home/ucl/eisuc296/workspace/inferbeddings/logs/ucl_guo-wn18_adv_xshot-head_v1/'
# Check that we are on the UCLCS cluster first
if os.path.exists('/home/ucl/eisuc296/'):
# If the folder that will contain logs does not exist, create it
if not os.path.exists(path):
os.makedirs(path)
configurations = list(configurations_distmult_complex)
command_lines = set()
for cfg in configurations:
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
command_line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
command_lines |= {command_line}
# Sort command lines and remove duplicates
sorted_command_lines = sorted(command_lines)
nb_jobs = len(sorted_command_lines)
header = """#BSUB -o /dev/null
#BSUB -e /dev/null
#BSUB -J "myarray[1-""" + str(nb_jobs) + """]"
#BSUB -W 4:00
alias python3="LD_LIBRARY_PATH='${HOME}/utils/libc6_2.17/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}' '${HOME}/utils/libc6_2.17/lib/x86_64-linux-gnu/ld-2.17.so' $(command -v python3)"
export CUDA_VISIBLE_DEVICES=`~/bin/lugpu.sh`
"""
print(header)
for job_id, command_line in enumerate(sorted_command_lines, 1):
print('test $LSB_JOBINDEX -eq {} && {}'.format(job_id, command_line))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| {
"content_hash": "7aae9698785ea224dbdd7bc11ad4d024",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 179,
"avg_line_length": 35.246153846153845,
"alnum_prop": 0.5608904408555216,
"repo_name": "uclmr/inferbeddings",
"id": "848e7d5891ee22b69d0e91bc2c38460036358ed6",
"size": "4630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/guo-wn18/EMERALD_ARRAY_GUO-WN18_adv_xshot-head_v1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "102860"
},
{
"name": "Prolog",
"bytes": "569340"
},
{
"name": "Python",
"bytes": "1319760"
},
{
"name": "R",
"bytes": "769"
},
{
"name": "Shell",
"bytes": "22689"
}
],
"symlink_target": ""
} |
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPStatementBreak272AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"space_after_break": (btp_space10.BTPSpace10,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"space_after_break": "spaceAfterBreak", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_statement_break272_all_of.BTPStatementBreak272AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
space_after_break (btp_space10.BTPSpace10): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| {
"content_hash": "60b0fbed82daeee67dafc669fefc74b6",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 95,
"avg_line_length": 33.241610738255034,
"alnum_prop": 0.5838885523924894,
"repo_name": "onshape-public/onshape-clients",
"id": "182240d153ee6df84fddb93e5e4963bb98f8ca73",
"size": "4970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/btp_statement_break272_all_of.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
} |
"""
Data sources classes and their associated functions for mlab.
"""
# Author: Gael Varoquaux <[email protected]>
# Prabhu Ramachandran
# Copyright (c) 2007-2010, Enthought, Inc.
# License: BSD Style.
import numpy as np
from traits.api import Bool, HasTraits, Instance, on_trait_change
from tvtk.api import tvtk
from tvtk.common import camel2enthought
from mayavi.sources.array_source import ArraySource
from mayavi.core.registry import registry
from mayavi.core.trait_defs import ArrayNumberOrNone, ArrayOrNone
from . import tools
from .engine_manager import get_null_engine, engine_manager
__all__ = ['vector_scatter', 'vector_field', 'scalar_scatter',
'scalar_field', 'line_source', 'array2d_source', 'grid_source',
'open', 'triangular_mesh_source', 'vertical_vectors_source',
]
###############################################################################
# `MlabSource` class.
###############################################################################
class MlabSource(HasTraits):
"""
This class represents the base class for all mlab sources. These
classes allow a user to easily update the data without having to
recreate the whole pipeline.
"""
# The TVTK dataset we manage.
dataset = Instance(tvtk.DataSet)
# The Mayavi data source we manage.
m_data = Instance(HasTraits)
########################################
# Private traits.
# Disable the update when data is changed.
_disable_update = Bool(False)
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Function to create the data from input arrays etc.
This is to be used when the size of the arrays change or the
first time when the data is created. This regenerates the data
structures and will be slower in general.
"""
raise NotImplementedError()
def update(self):
"""Update the visualization.
This is to be called after the data of the visualization has
changed.
"""
if not self._disable_update:
self.dataset.modified()
md = self.m_data
if md is not None:
if hasattr(md, '_assign_attribute'):
md._assign_attribute.update()
md.data_changed = True
def set(self, trait_change_notify=True, **traits):
"""Shortcut for setting object trait attributes.
This is an overridden method that will make changing multiple
traits easier. This method is to be called when the arrays have
changed content but not in shape/size. In that case one must
call the `reset` method.
Parameters
----------
trait_change_notify : Boolean
If **True** (the default), then each value assigned may generate a
trait change notification. If **False**, then no trait change
notifications will be generated. (see also: trait_setq)
traits : list of key/value pairs
Trait attributes and their values to be set
Returns
-------
self
The method returns this object, after setting attributes.
"""
try:
self._disable_update = True
super(MlabSource, self).set(trait_change_notify, **traits)
finally:
self._disable_update = False
if trait_change_notify:
self.update()
return self
######################################################################
# Non-public interface.
######################################################################
def _m_data_changed(self, ds):
if not hasattr(ds, 'mlab_source'):
ds.add_trait('mlab_source', Instance(MlabSource))
ds.mlab_source = self
###############################################################################
# `MGlyphSource` class.
###############################################################################
class MGlyphSource(MlabSource):
"""
This class represents a glyph data source for Mlab objects and
allows the user to set the x, y, z, scalar/vector attributes.
"""
# The x, y, z and points of the glyphs.
x = ArrayNumberOrNone
y = ArrayNumberOrNone
z = ArrayNumberOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayNumberOrNone
# The u, v, w components of the vector and the vectors.
u = ArrayNumberOrNone
v = ArrayNumberOrNone
w = ArrayNumberOrNone
vectors = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First convert numbers to arrays.
for name in ('x', 'y', 'z', 'u', 'v', 'w', 'scalars'):
if name in traits and traits[name] is not None:
traits[name] = np.atleast_1d(traits[name])
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
vectors = self.vectors
scalars = self.scalars
points = self.points
x, y, z = self.x, self.y, self.z
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
if 'points' in traits:
x = points[:, 0].ravel()
y = points[:, 1].ravel()
z = points[:, 2].ravel()
self.set(x=x, y=y, z=z, trait_change_notify=False)
else:
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (points.size / 3, 3)
self.set(points=points, trait_change_notify=False)
u, v, w = self.u, self.v, self.w
if u is not None:
u = np.atleast_1d(u)
v = np.atleast_1d(v)
w = np.atleast_1d(w)
if len(u) > 0:
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (vectors.size / 3, 3)
self.set(vectors=vectors, trait_change_notify=False)
if 'vectors' in traits:
u = vectors[:, 0].ravel()
v = vectors[:, 1].ravel()
w = vectors[:, 2].ravel()
self.set(u=u, v=v, w=w, trait_change_notify=False)
else:
if u is not None and len(u) > 0:
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (vectors.size / 3, 3)
self.set(vectors=vectors, trait_change_notify=False)
if vectors is not None and len(vectors) > 0:
assert len(points) == len(vectors)
if scalars is not None:
scalars = np.atleast_1d(scalars)
if len(scalars) > 0:
assert len(points) == len(scalars.ravel())
# Create the dataset.
polys = np.arange(0, len(points), 1, 'l')
polys = np.reshape(polys, (len(points), 1))
if self.dataset is None:
# Create new dataset if none exists
pd = tvtk.PolyData()
else:
# Modify existing one.
pd = self.dataset
pd.set(points=points, polys=polys)
if self.vectors is not None:
pd.point_data.vectors = self.vectors
pd.point_data.vectors.name = 'vectors'
if self.scalars is not None:
pd.point_data.scalars = self.scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
x = np.atleast_1d(x)
self.points[:, 0] = x.ravel()
self.update()
def _y_changed(self, y):
y = np.atleast_1d(y)
self.points[:, 1] = y.ravel()
self.update()
def _z_changed(self, z):
z = np.atleast_1d(z)
self.points[:, 2] = z.ravel()
self.update()
def _u_changed(self, u):
u = np.atleast_1d(u)
self.vectors[:, 0] = u.ravel()
self.update()
def _v_changed(self, v):
v = np.atleast_1d(v)
self.vectors[:, 1] = v.ravel()
self.update()
def _w_changed(self, w):
w = np.atleast_1d(w)
self.vectors[:, 2] = w.ravel()
self.update()
def _points_changed(self, p):
p = np.atleast_2d(p)
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
if s is None:
self.dataset.point_data.scalars = None
self.dataset.point_data.remove_array('scalars')
else:
s = np.atleast_1d(s)
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
def _vectors_changed(self, v):
self.dataset.point_data.vectors = v
self.dataset.point_data.vectors.name = 'vectors'
self.update()
###############################################################################
# `MVerticalGlyphSource` class.
###############################################################################
class MVerticalGlyphSource(MGlyphSource):
"""
This class represents a vertical glyph data source for Mlab objects
and allows the user to set the x, y, z, scalar attributes. The
vectors are created from the scalars to represent them in the
vertical direction.
"""
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
if 'scalars' in traits:
s = traits['scalars']
if s is not None:
traits['u'] = traits['v'] = np.ones_like(s),
traits['w'] = s
super(MVerticalGlyphSource, self).reset(**traits)
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s
self.dataset.point_data.scalars.name = 'scalars'
self.set(vectors=np.c_[np.ones_like(s),
np.ones_like(s),
s])
self.update()
###############################################################################
# `MArraySource` class.
###############################################################################
class MArraySource(MlabSource):
"""
This class represents an array data source for Mlab objects and
allows the user to set the x, y, z, scalar/vector attributes.
"""
# The x, y, z arrays for the volume.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The u, v, w components of the vector and the vectors.
u = ArrayOrNone
v = ArrayOrNone
w = ArrayOrNone
vectors = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
vectors = self.vectors
scalars = self.scalars
x, y, z = [np.atleast_3d(a) for a in (self.x, self.y, self.z)]
u, v, w = self.u, self.v, self.w
if 'vectors' in traits:
u = vectors[:, 0].ravel()
v = vectors[:, 1].ravel()
w = vectors[:, 2].ravel()
self.set(u=u, v=v, w=w, trait_change_notify=False)
else:
if u is not None and len(u) > 0:
#vectors = np.concatenate([u[..., np.newaxis],
# v[..., np.newaxis],
# w[..., np.newaxis] ],
# axis=3)
vectors = np.c_[u.ravel(), v.ravel(),
w.ravel()].ravel()
vectors.shape = (u.shape[0], u.shape[1], w.shape[2], 3)
self.set(vectors=vectors, trait_change_notify=False)
if vectors is not None and len(vectors) > 0 and scalars is not None:
assert len(scalars) == len(vectors)
if x.shape[0] <= 1:
dx = 1
else:
dx = x[1, 0, 0] - x[0, 0, 0]
if y.shape[1] <= 1:
dy = 1
else:
dy = y[0, 1, 0] - y[0, 0, 0]
if z.shape[2] <= 1:
dz = 1
else:
dz = z[0, 0, 1] - z[0, 0, 0]
if self.m_data is None:
ds = ArraySource(transpose_input_array=True)
else:
ds = self.m_data
old_scalar = ds.scalar_data
ds.set(vector_data=vectors,
origin=[x.min(), y.min(), z.min()],
spacing=[dx, dy, dz],
scalar_data=scalars)
if scalars is old_scalar:
ds._scalar_data_changed(scalars)
self.dataset = ds.image_data
self.m_data = ds
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('[x, y, z]')
def _xyz_changed(self):
x, y, z = self.x, self.y, self.z
dx = x[1, 0, 0] - x[0, 0, 0]
dy = y[0, 1, 0] - y[0, 0, 0]
dz = z[0, 0, 1] - z[0, 0, 0]
ds = self.dataset
ds.origin = [x.min(), y.min(), z.min()]
ds.spacing = [dx, dy, dz]
if self.m_data is not None:
self.m_data.set(origin=ds.origin, spacing=ds.spacing)
self.update()
def _u_changed(self, u):
self.vectors[..., 0] = u
self.m_data._vector_data_changed(self.vectors)
def _v_changed(self, v):
self.vectors[..., 1] = v
self.m_data._vector_data_changed(self.vectors)
def _w_changed(self, w):
self.vectors[..., 2] = w
self.m_data._vector_data_changed(self.vectors)
def _scalars_changed(self, s):
old = self.m_data.scalar_data
self.m_data.scalar_data = s
if old is s:
self.m_data._scalar_data_changed(s)
def _vectors_changed(self, v):
self.m_data.vector_data = v
###############################################################################
# `MLineSource` class.
###############################################################################
class MLineSource(MlabSource):
"""
This class represents a line data source for Mlab objects and
allows the user to set the x, y, z, scalar attributes.
"""
# The x, y, z and points of the glyphs.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
if 'points' in traits:
x = points[:, 0].ravel()
y = points[:, 1].ravel()
z = points[:, 2].ravel()
self.set(x=x, y=y, z=z, trait_change_notify=False)
else:
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (len(x), 3)
self.set(points=points, trait_change_notify=False)
# Create the dataset.
n_pts = len(points) - 1
lines = np.zeros((n_pts, 2), 'l')
lines[:, 0] = np.arange(0, n_pts - 0.5, 1, 'l')
lines[:, 1] = np.arange(1, n_pts + 0.5, 1, 'l')
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
# Avoid lines refering to non existing points: First set the
# lines to None, then set the points, then set the lines
# refering to the new points.
pd.set(lines=None)
pd.set(points=points)
pd.set(lines=lines)
if scalars is not None and len(scalars) > 0:
assert len(x) == len(scalars)
pd.point_data.scalars = np.ravel(scalars)
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.points[:, 0] = x
self.update()
def _y_changed(self, y):
self.points[:, 1] = y
self.update()
def _z_changed(self, z):
self.points[:, 2] = z
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
###############################################################################
# `MArray2DSource` class.
###############################################################################
class MArray2DSource(MlabSource):
"""
This class represents a 2D array data source for Mlab objects and
allows the user to set the x, y and scalar attributes.
"""
# The x, y values.
# Values of X and Y as None are accepted, in that case we would build
# values of X and Y automatically from the shape of scalars
x = ArrayOrNone
y = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The masking array.
mask = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
x, y, mask = self.x, self.y, self.mask
scalars = self.scalars
# We may have used this without specifying x and y at all in
# which case we set them from the shape of scalars.
nx, ny = scalars.shape
#Build X and Y from shape of Scalars if they are none
if x is None and y is None:
x, y = np.mgrid[-nx / 2.:nx / 2, -ny / 2.:ny / 2]
if mask is not None and len(mask) > 0:
scalars[mask.astype('bool')] = np.nan
# The NaN trick only works with floats.
scalars = scalars.astype('float')
self.set(scalars=scalars, trait_change_notify=False)
z = np.array([0])
self.set(x=x, y=y, z=z, trait_change_notify=False)
# Do some magic to extract the first row/column, independently of
# the shape of x and y
x = np.atleast_2d(x.squeeze().T)[0, :].squeeze()
y = np.atleast_2d(y.squeeze())[0, :].squeeze()
if x.ndim == 0:
dx = 1
else:
dx = x[1] - x[0]
if y.ndim == 0:
dy = 1
else:
dy = y[1] - y[0]
if self.m_data is None:
ds = ArraySource(transpose_input_array=True)
else:
ds = self.m_data
old_scalar = ds.scalar_data
ds.set(origin=[x.min(), y.min(), 0],
spacing=[dx, dy, 1],
scalar_data=scalars)
if old_scalar is scalars:
ds._scalar_data_changed(scalars)
self.dataset = ds.outputs[0]
self.m_data = ds
#####################################################################
# Non-public interface.
#####################################################################
@on_trait_change('[x, y]')
def _xy_changed(self):
x, y, scalars = self.x, self.y, self.scalars
nx, ny = scalars.shape
if x is None or y is None:
x, y = np.mgrid[-nx / 2.:nx / 2, -ny / 2.:ny / 2]
self.trait_setq(x=x, y=y)
x = np.atleast_2d(x.squeeze().T)[0, :].squeeze()
y = np.atleast_2d(y.squeeze())[0, :].squeeze()
dx = x[1] - x[0]
dy = y[1] - y[0]
ds = self.dataset
ds.origin = [x.min(), y.min(), 0]
ds.spacing = [dx, dy, 1]
if self.m_data is not None:
self.m_data.set(origin=ds.origin, spacing=ds.spacing)
self.update()
def _scalars_changed(self, s):
mask = self.mask
if mask is not None and len(mask) > 0:
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
self.set(scalars=s, trait_change_notify=False)
old = self.m_data.scalar_data
self.m_data.scalar_data = s
if s is old:
self.m_data._scalar_data_changed(s)
##############################################################################
# `MGridSource` class.
##############################################################################
class MGridSource(MlabSource):
"""
This class represents a grid source for Mlab objects and
allows the user to set the x, y, scalar attributes.
"""
# The x, y, z and points of the grid.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
# The masking array.
mask = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z, mask = self.x, self.y, self.z, self.mask
if mask is not None and len(mask) > 0:
scalars[mask.astype('bool')] = np.nan
# The NaN trick only works with floats.
scalars = scalars.astype('float')
self.set(scalars=scalars, trait_change_notify=False)
assert len(x.shape) == 2, "Array x must be 2 dimensional."
assert len(y.shape) == 2, "Array y must be 2 dimensional."
assert len(z.shape) == 2, "Array z must be 2 dimensional."
assert x.shape == y.shape, "Arrays x and y must have same shape."
assert y.shape == z.shape, "Arrays y and z must have same shape."
# Points in the grid source will always be created using x,y,z
# Changing of points is not allowed because it cannot be used to
# modify values of x,y,z
nx, ny = x.shape
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (nx * ny, 3)
self.set(points=points, trait_change_notify=False)
i, j = np.mgrid[0:nx - 1, 0:ny - 1]
i, j = np.ravel(i), np.ravel(j)
t1 = i * ny + j, (i + 1) * ny + j, (i + 1) * ny + (j + 1)
t2 = (i + 1) * ny + (j + 1), i * ny + (j + 1), i * ny + j
nt = len(t1[0])
triangles = np.zeros((nt * 2, 3), 'l')
triangles[0:nt, 0], triangles[0:nt, 1], triangles[0:nt, 2] = t1
triangles[nt:, 0], triangles[nt:, 1], triangles[nt:, 2] = t2
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
pd.set(points=points, polys=triangles)
if scalars is not None and len(scalars) > 0:
if not scalars.flags.contiguous:
scalars = scalars.copy()
self.set(scalars=scalars, trait_change_notify=False)
assert x.shape == scalars.shape
pd.point_data.scalars = scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.trait_setq(x=x)
self.points[:, 0] = x.ravel()
self.update()
def _y_changed(self, y):
self.trait_setq(y=y)
self.points[:, 1] = y.ravel()
self.update()
def _z_changed(self, z):
self.trait_setq(z=z)
self.points[:, 2] = z.ravel()
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
mask = self.mask
if mask is not None and len(mask) > 0:
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
self.set(scalars=s, trait_change_notify=False)
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
###############################################################################
# `MTriangularMeshSource` class.
###############################################################################
class MTriangularMeshSource(MlabSource):
"""
This class represents a triangular mesh source for Mlab objects and
allows the user to set the x, y, scalar attributes.
"""
# The x, y, z and points of the grid.
x = ArrayOrNone
y = ArrayOrNone
z = ArrayOrNone
points = ArrayOrNone
triangles = ArrayOrNone
# The scalars shown on the glyphs.
scalars = ArrayOrNone
######################################################################
# `MlabSource` interface.
######################################################################
def reset(self, **traits):
"""Creates the dataset afresh or resets existing data source."""
# First set the attributes without really doing anything since
# the notification handlers are not called.
self.set(trait_change_notify=False, **traits)
points = self.points
scalars = self.scalars
x, y, z = self.x, self.y, self.z
points = np.c_[x.ravel(), y.ravel(), z.ravel()].ravel()
points.shape = (points.size / 3, 3)
self.set(points=points, trait_change_notify=False)
triangles = self.triangles
assert triangles.shape[1] == 3, \
"The shape of the triangles array must be (X, 3)"
assert triangles.max() < len(points), \
"The triangles indices must be smaller that the number of points"
assert triangles.min() >= 0, \
"The triangles indices must be positive or null"
if self.dataset is None:
pd = tvtk.PolyData()
else:
pd = self.dataset
# Set the points first, and the triangles after: so that the
# polygone can refer to the right points, in the polydata.
pd.set(points=points)
pd.set(polys=triangles)
if (not 'scalars' in traits
and scalars is not None
and scalars.shape != x.shape):
# The scalars where set probably automatically to z, by the
# factory. We need to reset them, as the size has changed.
scalars = z
if scalars is not None and len(scalars) > 0:
if not scalars.flags.contiguous:
scalars = scalars.copy()
self.set(scalars=scalars, trait_change_notify=False)
assert x.shape == scalars.shape
pd.point_data.scalars = scalars.ravel()
pd.point_data.scalars.name = 'scalars'
self.dataset = pd
######################################################################
# Non-public interface.
######################################################################
def _x_changed(self, x):
self.trait_setq(x=x)
self.points[:, 0] = x.ravel()
self.update()
def _y_changed(self, y):
self.trait_setq(y=y)
self.points[:, 1] = y.ravel()
self.update()
def _z_changed(self, z):
self.trait_setq(z=z)
self.points[:, 2] = z.ravel()
self.update()
def _points_changed(self, p):
self.dataset.points = p
self.update()
def _scalars_changed(self, s):
self.dataset.point_data.scalars = s.ravel()
self.dataset.point_data.scalars.name = 'scalars'
self.update()
def _triangles_changed(self, triangles):
if triangles.min() < 0:
raise ValueError('The triangles array has negative values')
if triangles.max() > self.x.size:
raise ValueError('The triangles array has values larger than' \
'the number of points')
self.dataset.polys = triangles
self.update()
############################################################################
# Argument processing
############################################################################
def convert_to_arrays(args):
""" Converts a list of iterables to a list of arrays or callables,
if needed.
"""
args = list(args)
for index, arg in enumerate(args):
if not callable(arg):
if not hasattr(arg, 'shape'):
arg = np.atleast_1d(np.array(arg))
if np.any(np.isinf(arg)):
raise ValueError("""Input array contains infinite values
You can remove them using: a[np.isinf(a)] = np.nan
""")
args[index] = arg
return args
def process_regular_vectors(*args):
""" Converts different signatures to (x, y, z, u, v, w). """
args = convert_to_arrays(args)
if len(args) == 3:
u, v, w = [np.atleast_3d(a) for a in args]
assert len(u.shape) == 3, "3D array required"
x, y, z = np.indices(u.shape)
elif len(args) == 6:
x, y, z, u, v, w = args
elif len(args) == 4:
x, y, z, f = args
if not callable(f):
raise ValueError("When 4 arguments are provided, the fourth must "
"be a callable")
u, v, w = f(x, y, z)
else:
raise ValueError("wrong number of arguments")
assert (x.shape == y.shape and
y.shape == z.shape and
u.shape == z.shape and
v.shape == u.shape and
w.shape == v.shape), "argument shape are not equal"
return x, y, z, u, v, w
def process_regular_scalars(*args):
""" Converts different signatures to (x, y, z, s). """
args = convert_to_arrays(args)
if len(args) == 1:
s = np.atleast_3d(args[0])
assert len(s.shape) == 3, "3D array required"
x, y, z = np.indices(s.shape)
elif len(args) == 3:
x, y, z = args
s = None
elif len(args) == 4:
x, y, z, s = args
if callable(s):
s = s(x, y, z)
else:
raise ValueError("wrong number of arguments")
assert (x.shape == y.shape and
y.shape == z.shape and
(s is None
or s.shape == z.shape)), "argument shape are not equal"
return x, y, z, s
def process_regular_2d_scalars(*args, **kwargs):
""" Converts different signatures to (x, y, s). """
args = convert_to_arrays(args)
for index, arg in enumerate(args):
if not callable(arg):
args[index] = np.atleast_2d(arg)
if len(args) == 1:
s = args[0]
assert len(s.shape) == 2, "2D array required"
x, y = np.indices(s.shape)
elif len(args) == 3:
x, y, s = args
if callable(s):
s = s(x, y)
else:
raise ValueError("wrong number of arguments")
assert len(s.shape) == 2, "2D array required"
if 'mask' in kwargs:
mask = kwargs['mask']
s[mask.astype('bool')] = np.nan
# The NaN tric only works with floats.
s = s.astype('float')
return x, y, s
############################################################################
# Sources
############################################################################
def vector_scatter(*args, **kwargs):
""" Creates scattered vector data.
**Function signatures**::
vector_scatter(u, v, w, ...)
vector_scatter(x, y, z, u, v, w, ...)
vector_scatter(x, y, z, f, ...)
If only 3 arrays u, v, w are passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If 4 positional arguments are passed the last one must be a callable, f,
that returns vectors.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
x, y, z, u, v, w = process_regular_vectors(*args)
scalars = kwargs.pop('scalars', None)
if scalars is not None:
scalars = np.ravel(scalars)
name = kwargs.pop('name', 'VectorScatter')
data_source = MGlyphSource()
data_source.reset(x=x, y=y, z=z, u=u, v=v, w=w, scalars=scalars)
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def vector_field(*args, **kwargs):
""" Creates vector field data.
**Function signatures**::
vector_field(u, v, w, ...)
vector_field(x, y, z, u, v, w, ...)
vector_field(x, y, z, f, ...)
If only 3 arrays u, v, w are passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If the x, y and z arrays are passed, they should have been generated
by `numpy.mgrid` or `numpy.ogrid`. The function builds a scalar field
assuming the points are regularily spaced on an orthogonal grid.
If 4 positional arguments are passed the last one must be a callable, f,
that returns vectors.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 3:
x = y = z = np.atleast_3d(1)
u, v, w = [np.atleast_3d(a) for a in args]
else:
x, y, z, u, v, w = [np.atleast_3d(a)
for a in process_regular_vectors(*args)]
scalars = kwargs.pop('scalars', None)
if scalars is not None:
scalars = np.atleast_3d(scalars)
data_source = MArraySource()
data_source.reset(x=x, y=y, z=z, u=u, v=v, w=w, scalars=scalars)
name = kwargs.pop('name', 'VectorField')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def scalar_scatter(*args, **kwargs):
"""
Creates scattered scalar data.
**Function signatures**::
scalar_scatter(s, ...)
scalar_scatter(x, y, z, s, ...)
scalar_scatter(x, y, z, s, ...)
scalar_scatter(x, y, z, f, ...)
If only 1 array s is passed the x, y and z arrays are assumed to be
made from the indices of vectors.
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
x, y, z, s = process_regular_scalars(*args)
if s is not None:
s = np.ravel(s)
data_source = MGlyphSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'ScalarScatter')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def scalar_field(*args, **kwargs):
"""
Creates a scalar field data.
**Function signatures**::
scalar_field(s, ...)
scalar_field(x, y, z, s, ...)
scalar_field(x, y, z, f, ...)
If only 1 array s is passed the x, y and z arrays are assumed to be
made from the indices of arrays.
If the x, y and z arrays are passed they are supposed to have been
generated by `numpy.mgrid`. The function builds a scalar field assuming
the points are regularily spaced.
If 4 positional arguments are passed the last one must be an array s, or
a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 1:
# Be lazy, don't create three big arrays for 1 input array. The
# MArraySource is clever-enough to handle flat arrays
x = y = z = np.atleast_1d(1)
s = args[0]
else:
x, y, z, s = process_regular_scalars(*args)
data_source = MArraySource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'ScalarField')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def line_source(*args, **kwargs):
"""
Creates line data.
**Function signatures**::
line_source(x, y, z, ...)
line_source(x, y, z, s, ...)
line_source(x, y, z, f, ...)
If 4 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization."""
if len(args) == 1:
raise ValueError("wrong number of arguments")
x, y, z, s = process_regular_scalars(*args)
data_source = MLineSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'LineSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def array2d_source(*args, **kwargs):
"""
Creates structured 2D data from a 2D array.
**Function signatures**::
array2d_source(s, ...)
array2d_source(x, y, s, ...)
array2d_source(x, y, f, ...)
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinnates of positions corresponding to the s values.
x and y can be 1D or 2D arrays (such as returned by numpy.ogrid or
numpy.mgrid), but the points should be located on an orthogonal grid
(possibly non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value.
If only 1 array s is passed the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
:mask: Mask points specified in a boolean masking array.
"""
data_source = MArray2DSource()
mask = kwargs.pop('mask', None)
if len(args) == 1:
args = convert_to_arrays(args)
s = np.atleast_2d(args[0])
data_source.reset(scalars=s, mask=mask)
else:
x, y, s = process_regular_2d_scalars(*args, **kwargs)
data_source.reset(x=x, y=y, scalars=s, mask=mask)
name = kwargs.pop('name', 'Array2DSource')
return tools.add_dataset(data_source.m_data, name, **kwargs)
def grid_source(x, y, z, **kwargs):
"""
Creates 2D grid data.
x, y, z are 2D arrays giving the positions of the vertices of the surface.
The connectivity between these points is implied by the connectivity on
the arrays.
For simple structures (such as orthogonal grids) prefer the array2dsource
function, as it will create more efficient data structures.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
:mask: Mask points specified in a boolean masking array.
"""
scalars = kwargs.pop('scalars', None)
if scalars is None:
scalars = z
mask = kwargs.pop('mask', None)
x, y, z, scalars = convert_to_arrays((x, y, z, scalars))
data_source = MGridSource()
data_source.reset(x=x, y=y, z=z, scalars=scalars, mask=mask)
name = kwargs.pop('name', 'GridSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def vertical_vectors_source(*args, **kwargs):
"""
Creates a set of vectors pointing upward, useful eg for bar graphs.
**Function signatures**::
vertical_vectors_source(s, ...)
vertical_vectors_source(x, y, s, ...)
vertical_vectors_source(x, y, f, ...)
vertical_vectors_source(x, y, z, s, ...)
vertical_vectors_source(x, y, z, f, ...)
If only one positional argument is passed, it can be a 1D, 2D, or 3D
array giving the length of the vectors. The positions of the data
points are deducted from the indices of array, and an
uniformly-spaced data set is created.
If 3 positional arguments (x, y, s) are passed the last one must be
an array s, or a callable, f, that returns an array. x and y give the
2D coordinates of positions corresponding to the s values. The
vertical position is assumed to be 0.
If 4 positional arguments (x, y, z, s) are passed, the 3 first are
arrays giving the 3D coordinates of the data points, and the last one
is an array s, or a callable, f, that returns an array giving the
data value.
**Keyword arguments**:
:name: the name of the vtk object created.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
if len(args) == 3:
x, y, data = args
if np.isscalar(x):
z = 0
else:
z = np.zeros_like(x)
args = (x, y, z, data)
x, y, z, s = process_regular_scalars(*args)
if s is not None:
s = np.ravel(s)
data_source = MVerticalGlyphSource()
data_source.reset(x=x, y=y, z=z, scalars=s)
name = kwargs.pop('name', 'VerticalVectorsSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def triangular_mesh_source(x, y, z, triangles, **kwargs):
"""
Creates 2D mesh by specifying points and triangle connectivity.
x, y, z are 2D arrays giving the positions of the vertices of the surface.
The connectivity between these points is given by listing triplets of
vertices inter-connected. These vertices are designed by there
position index.
**Keyword arguments**:
:name: the name of the vtk object created.
:scalars: optional scalar data.
:figure: optionally, the figure on which to add the data source.
If None, the source is not added to any figure, and will
be added automatically by the modules or
filters. If False, no figure will be created by modules
or filters applied to the source: the source can only
be used for testing, or numerical algorithms, not
visualization.
"""
x, y, z, triangles = convert_to_arrays((x, y, z, triangles))
if triangles.min() < 0:
raise ValueError('The triangles array has negative values')
if triangles.max() > x.size:
raise ValueError('The triangles array has values larger than' \
'the number of points')
scalars = kwargs.pop('scalars', None)
if scalars is None:
scalars = z
data_source = MTriangularMeshSource()
data_source.reset(x=x, y=y, z=z, triangles=triangles, scalars=scalars)
name = kwargs.pop('name', 'TriangularMeshSource')
ds = tools.add_dataset(data_source.dataset, name, **kwargs)
data_source.m_data = ds
return ds
def open(filename, figure=None):
"""Open a supported data file given a filename. Returns the source
object if a suitable reader was found for the file.
If 'figure' is False, no view is opened, and the code does not need
GUI or openGL context.
"""
if figure is None:
engine = tools.get_engine()
elif figure is False:
# Get a null engine that we can use.
engine = get_null_engine()
else:
engine = engine_manager.find_figure_engine(figure)
engine.current_scene = figure
src = engine.open(filename)
return src
############################################################################
# Automatically generated sources from registry.
############################################################################
def _create_data_source(metadata):
"""Creates a data source and adds it to the mayavi engine given
metadata of the source. Returns the created source.
"""
factory = metadata.get_callable()
src = factory()
engine = tools.get_engine()
engine.add_source(src)
return src
def _make_functions(namespace):
"""Make the automatic functions and add them to the namespace."""
for src in registry.sources:
if len(src.extensions) == 0:
func_name = camel2enthought(src.id)
if func_name.endswith('_source'):
func_name = func_name[:-7]
func = lambda metadata=src: _create_data_source(metadata)
func.__doc__ = src.help
func.__name__ = func_name
# Inject function into the namespace and __all__.
namespace[func_name] = func
__all__.append(func_name)
_make_functions(locals())
| {
"content_hash": "3f8e263921023c004581534cb0eb0dfc",
"timestamp": "",
"source": "github",
"line_count": 1427,
"max_line_length": 79,
"avg_line_length": 34.665031534688154,
"alnum_prop": 0.5304344310348313,
"repo_name": "dmsurti/mayavi",
"id": "9e89009d95c97f03149ece7fdd2788975b438b0a",
"size": "49467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mayavi/tools/sources.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2494055"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
} |
"""A tff.aggregator for collecting summed histograms of client model weights."""
import tensorflow as tf
import tensorflow_federated as tff
class HistogramWeightsFactory(tff.aggregators.UnweightedAggregationFactory):
"""Aggregator reporting a histogram of client weights as a metric.
The created tff.templates.AggregationProcess sums values placed at CLIENTS,
and outputs the sum placed at SERVER.
The process has empty state and returns summed histograms of client values in
measurements. For computing both the resulting summed value and summed
histograms, implementation delegates to the tff.federated_sum operator.
The value returned in measurements is one histogram if the client value_type
is a single tensor of weights, or a list of histograms - one for each layer -
if the client value_type is a struct of weight tensors.
"""
def __init__(self, mn=-1.0, mx=1.0, nbins=50):
"""Initializer for HistogramWeightsFactory.
Defines the tf.histogram_fixed_width bins and bounds.
Args:
mn: A float that specifies the lower bound of the histogram.
mx: A float that specifies the upper bound of the histogram.
nbins: An integer that specifies the number of bins in the histogram.
"""
self._min = mn
self._max = mx
self._nbins = nbins
def create(self, value_type):
if not (tff.types.is_structure_of_floats(value_type) or
(value_type.is_tensor() and value_type.dtype == tf.float32)):
raise ValueError("Expect value_type to be float tensor or structure of "
f"float tensors, found {value_type}.")
@tff.federated_computation()
def init_fn():
return tff.federated_value((), tff.SERVER)
@tff.tf_computation(value_type)
def compute_client_histogram(value):
bounds = [self._min, self._max]
histogram_fn = lambda x: tf.histogram_fixed_width(x, bounds, self._nbins)
return tf.nest.map_structure(histogram_fn, value)
@tff.federated_computation(init_fn.type_signature.result,
tff.type_at_clients(value_type))
def next_fn(state, value):
summed_value = tff.federated_sum(value)
client_histograms = tff.federated_map(compute_client_histogram, value)
server_histograms = tff.federated_sum(client_histograms)
return tff.templates.MeasuredProcessOutput(
state=state, result=summed_value, measurements=server_histograms)
return tff.templates.AggregationProcess(init_fn, next_fn)
| {
"content_hash": "63b08c24a8eaf11c01ba498efda2639f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 39.171875,
"alnum_prop": 0.7080175508575988,
"repo_name": "google-research/federated",
"id": "4259ad21b2adda261d82630f947e87dcfcc37278",
"size": "3084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compressed_communication/aggregators/histogram_weights.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76424"
},
{
"name": "Python",
"bytes": "4122952"
},
{
"name": "Shell",
"bytes": "7089"
},
{
"name": "Starlark",
"bytes": "97189"
}
],
"symlink_target": ""
} |
import sys
from io import BytesIO as IO
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from webob import Response
import cgi
import json
import base64
import zipfile
import uuid
import urllib
import ast
import shutil
import datetime
import tempfile
import pprint
import platform
import socket
import psutil
import os
import multiprocessing
import inspect
import pudb
import inspect
# pfioh local dependencies
try:
from ._colors import Colors
from .debug import debug
except:
from _colors import Colors
from debug import debug
# Global var
Gd_internalvar = {
'name': "pfioh",
'version': "",
'storeBase': "/tmp",
'key2address': {},
'httpResponse': False,
'createDirsAsNeeded': False,
'b_swiftStorage': False
}
class StoreHandler(BaseHTTPRequestHandler):
b_quiet = False
def __init__(self, *args, **kwargs):
"""
"""
global Gd_internalvar
self.__name__ = 'StoreHandler'
self.d_ctlVar = Gd_internalvar
b_test = False
self.pp = pprint.PrettyPrinter(indent=4)
for k,v in kwargs.items():
if k == 'test': b_test = True
if not b_test:
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def qprint(self, msg, **kwargs):
str_comms = ""
for k,v in kwargs.items():
if k == 'comms': str_comms = v
str_caller = inspect.stack()[1][3]
if not StoreHandler.b_quiet:
if str_comms == 'status': print(Colors.PURPLE, end="")
if str_comms == 'error': print(Colors.RED, end="")
if str_comms == "tx": print(Colors.YELLOW + "<----")
if str_comms == "rx": print(Colors.GREEN + "---->")
print('%s' % datetime.datetime.now() + " | " + os.path.basename(__file__) + ':' + self.__name__ + "." + str_caller + '() | ', end="")
print(msg)
if str_comms == "tx": print(Colors.YELLOW + "<----")
if str_comms == "rx": print(Colors.GREEN + "---->")
print(Colors.NO_COLOUR, end="", flush=True)
def remoteLocation_resolve(self, d_remote):
"""
Resolve the remote path location
:param d_remote: the "remote" specification
:return: a string representation of the remote path
"""
b_status = False
str_remotePath = ""
if Gd_internalvar['b_swiftStorage']:
b_status= True
str_remotePath= d_remote['key']
elif 'path' in d_remote.keys():
str_remotePath = d_remote['path']
b_status = True
elif 'key' in d_remote.keys():
d_ret = self.storage_resolveBasedOnKey(key = d_remote['key'])
if d_ret['status']:
b_status = True
str_remotePath = d_ret['path']
return {
'status': b_status,
'path': str_remotePath
}
def do_GET_remoteStatus(self, d_msg, **kwargs):
"""
This method is used to get information about the remote
server -- for example, is a remote directory/file valid?
"""
global Gd_internalvar
d_meta = d_msg['meta']
d_remote = d_meta['remote']
# pudb.set_trace()
str_serverPath = self.remoteLocation_resolve(d_remote)['path']
self.qprint('server path resolves to %s' % str_serverPath, comms = 'status')
b_isFile = os.path.isfile(str_serverPath)
b_isDir = os.path.isdir(str_serverPath)
b_exists = os.path.exists(str_serverPath)
b_createdNewDir = False
if Gd_internalvar['b_swiftStorage']:
b_exists = True
b_swiftStore = True
if not b_exists and Gd_internalvar['createDirsAsNeeded']:
os.makedirs(str_serverPath)
b_createdNewDir = True
d_ret = {
'status': b_exists or b_createdNewDir,
'isfile': b_isFile,
'isswiftstore': b_swiftStore,
'isdir': b_isDir,
'createdNewDir': b_createdNewDir
}
self.send_response(200)
self.end_headers()
self.ret_client(d_ret)
self.qprint(d_ret, comms = 'tx')
return {'status': b_exists or b_createdNewDir}
def do_GET_withCompression(self, d_msg):
"""
Process a "GET" using zip/base64 encoding
:return:
"""
# d_msg = ast.literal_eval(d_server)
d_meta = d_msg['meta']
# d_local = d_meta['local']
d_remote = d_meta['remote']
d_transport = d_meta['transport']
d_compress = d_transport['compress']
d_ret = {}
str_serverPath = self.remoteLocation_resolve(d_remote)['path']
d_ret['preop'] = self.do_GET_preop( meta = d_meta,
path = str_serverPath)
if d_ret['preop']['status']:
str_serverPath = d_ret['preop']['outgoingPath']
str_fileToProcess = str_serverPath
b_cleanup = False
# b_zip = True
str_encoding = 'base64'
if 'encoding' in d_compress: str_encoding = d_compress['encoding']
if 'cleanup' in d_compress: b_cleanup = d_compress['cleanup']
str_archive = d_compress['archive']
if str_archive == 'zip': b_zip = True
else: b_zip = False
if os.path.isdir(str_serverPath):
b_zip = True
# str_archive = 'zip'
d_ret = self.getData(path=str_fileToProcess, is_zip= b_zip,
encoding= str_encoding, cleanup= b_cleanup, d_ret=d_ret)
d_ret['postop'] = self.do_GET_postop( meta = d_meta)
self.ret_client(d_ret)
self.qprint(self.pp.pformat(d_ret).strip(), comms = 'tx')
return d_ret
def getData(self, **kwargs):
raise NotImplementedError('Abstract Method: Please implement this method in child class')
def do_GET_withCopy(self, d_msg):
"""
Process a "GET" using copy operations
:return:
"""
d_meta = d_msg['meta']
d_local = d_meta['local']
d_remote = d_meta['remote']
d_transport = d_meta['transport']
d_copy = d_transport['copy']
str_serverPath = self.remoteLocation_resolve(d_remote)['path']
str_clientPath = d_local['path']
# str_fileToProcess = str_serverPath
b_copyTree = False
b_copyFile = False
b_symlink = False
d_ret = {'status': True}
if not d_copy['symlink']:
if os.path.isdir(str_serverPath):
b_copyTree = True
str_serverNode = str_serverPath.split('/')[-1]
try:
shutil.copytree(str_serverPath, os.path.join(str_clientPath, str_serverNode))
except BaseException as e:
d_ret['status'] = False
d_ret['msg'] = str(e)
if os.path.isfile(str_serverPath):
b_copyFile = True
shutil.copy2(str_serverPath, str_clientPath)
if d_copy['symlink']:
str_serverNode = str_serverPath.split('/')[-1]
try:
os.symlink(str_serverPath, os.path.join(str_clientPath, str_serverNode))
b_symlink = True
except BaseException as e:
d_ret['status'] = False
d_ret['msg'] = str(e)
b_symlink = False
d_ret['source'] = str_serverPath
d_ret['destination'] = str_clientPath
d_ret['copytree'] = b_copyTree
d_ret['copyfile'] = b_copyFile
d_ret['symlink'] = b_symlink
d_ret['timestamp'] = '%s' % datetime.datetime.now()
self.ret_client(d_ret)
return d_ret
def log_message(self, format, *args):
"""
This silences the server from spewing to stdout!
"""
return
def do_GET(self):
d_server = dict(urllib.parse.parse_qsl(urllib.parse.urlsplit(self.path).query))
d_meta = ast.literal_eval(d_server['meta'])
d_msg = {
'action': d_server['action'],
'meta': d_meta
}
d_transport = d_meta['transport']
self.qprint(self.path, comms = 'rx')
# pudb.set_trace()
if 'checkRemote' in d_transport and d_transport['checkRemote']:
self.qprint('Getting status on server filesystem...', comms = 'status')
d_ret = self.do_GET_remoteStatus(d_msg)
return d_ret
if 'compress' in d_transport:
d_ret = self.do_GET_withCompression(d_msg)
return d_ret
if 'copy' in d_transport :
if b_swiftStorage:
d_ret = self.do_GET_withCompression(d_msg)
return d_ret
else:
d_ret = self.do_GET_withCopy(d_msg)
return d_ret
def form_get(self, str_verb, data):
"""
Returns a form from cgi.FieldStorage
"""
return cgi.FieldStorage(
IO(data),
headers = self.headers,
environ =
{
'REQUEST_METHOD': str_verb,
'CONTENT_TYPE': self.headers['Content-Type'],
}
)
def storage_resolveBasedOnKey(self, *args, **kwargs):
"""
Associate a 'key' text string to an actual storage location in the filesystem space
on which this service has been launched.
:param args:
:param kwargs:
:return:
"""
global Gd_internalvar
str_key = ""
b_status = False
for k,v in kwargs.items():
if k == 'key': str_key = v
if len(str_key):
str_internalLocation = os.path.join('%s/key-%s' %(Gd_internalvar['storeBase'], str_key),'')
Gd_internalvar['key2address'][str_key] = str_internalLocation
b_status = True
return {
'status': b_status,
'path': str_internalLocation
}
def internalctl_varprocess(self, *args, **kwargs):
"""
get/set a specific variable as parsed from the meta JSON.
:param args:
:param kwargs:
:return:
"""
global Gd_internalvar
d_meta = {}
d_ret = {}
str_var = ''
b_status = False
for k,v in kwargs.items():
if k == 'd_meta': d_meta = v
str_var = d_meta['var']
if d_meta:
if 'get' in d_meta.keys():
d_ret[str_var] = Gd_internalvar[str_var]
b_status = True
if 'set' in d_meta.keys():
Gd_internalvar[str_var] = d_meta['set']
d_ret[str_var] = d_meta['set']
b_status = True
if 'compute' in d_meta.keys() and str_var == 'key2address':
d_path = self.storage_resolveBasedOnKey(key = d_meta['compute'])
d_ret[str_var] = d_path['path']
b_status = d_path['status']
return {'d_ret': d_ret,
'status': b_status}
def internalctl_process(self, *args, **kwargs):
"""
Process the 'internalctl' action.
{ "action": "internalctl",
"meta": {
"var": "<internalVar>",
"set": "/some/new/path"
}
}
{ "action": "internalctl",
"meta": {
"var": "<internalVar>",
"get": "currentPath"
}
}
{ "action": "internalctl",
"meta": {
"var": "key2address",
"compute": "<keyToken>"
}
}
<internalVar>: <meta actions>
* storeBase: get/set
* key: get/set
* storeAddress: get/compute
:param args:
:param kwargs:
:return:
"""
d_request = {}
b_status = False
d_ret = {
'status': b_status
}
for k,v in kwargs.items():
if k == 'request': d_request = v
if d_request:
d_meta = d_request['meta']
d_ret = self.internalctl_varprocess(d_meta = d_meta)
return d_ret
def hello_process(self, *args, **kwargs):
"""
The 'hello' action is merely to 'speak' with the server. The server
can return current date/time, echo back a string, query the startup
command line args, etc.
This method is a simple means of checking if the server is "up" and
running.
:param args:
:param kwargs:
:return:
"""
global Gd_internalvar
self.qprint("hello_process()", comms = 'status')
b_status = False
d_ret = {}
d_request = {}
for k, v in kwargs.items():
if k == 'request': d_request = v
d_meta = d_request['meta']
if 'askAbout' in d_meta.keys():
str_askAbout = d_meta['askAbout']
d_ret['name'] = Gd_internalvar['name']
d_ret['version'] = Gd_internalvar['version']
if str_askAbout == 'timestamp':
str_timeStamp = datetime.datetime.today().strftime('%Y%m%d%H%M%S.%f')
d_ret['timestamp'] = {}
d_ret['timestamp']['now'] = str_timeStamp
b_status = True
if str_askAbout == 'sysinfo':
d_ret['sysinfo'] = {}
d_ret['sysinfo']['system'] = platform.system()
d_ret['sysinfo']['machine'] = platform.machine()
d_ret['sysinfo']['platform'] = platform.platform()
d_ret['sysinfo']['uname'] = platform.uname()
d_ret['sysinfo']['version'] = platform.version()
d_ret['sysinfo']['memory'] = psutil.virtual_memory()
d_ret['sysinfo']['cpucount'] = multiprocessing.cpu_count()
d_ret['sysinfo']['loadavg'] = os.getloadavg()
d_ret['sysinfo']['cpu_percent'] = psutil.cpu_percent()
d_ret['sysinfo']['hostname'] = socket.gethostname()
d_ret['sysinfo']['inet'] = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
b_status = True
if str_askAbout == 'echoBack':
d_ret['echoBack'] = {}
d_ret['echoBack']['msg'] = d_meta['echoBack']
b_status = True
return { 'stdout': {
'd_ret': d_ret,
'status': b_status
}
}
def rmtree_process(self, **kwargs):
"""
Remove (if possible) a directory location either specified directly
with a "path" or indirectly with a "key".
"""
str_path = ''
str_msg = ''
b_status = False
d_msg = {}
d_ret = {}
for k,v in kwargs.items():
if k == 'request': d_msg = v
d_meta = d_msg['meta']
str_path = self.remoteLocation_resolve(d_meta)['path']
if len(str_path):
# str_path = d_meta['path']
self.qprint('Will rmtree <%s>; UID %s, eUID %s...' % (str_path, os.getuid(), os.getegid()))
try:
shutil.rmtree(str_path)
b_status = True
str_msg = 'Successfully removed tree %s' % str_path
self.qprint(str_msg, comms = 'status')
except:
b_status = False
str_msg = 'Could not remove tree %s. Possible permission error or invalid path. Try using action `ls`' % str_path
self.qprint(str_msg, comms = 'error')
d_ret = {
'status': b_status,
'msg': str_msg
}
return d_ret
def ls_process(self, **kwargs):
"""
Return a list of dictionary entries of directory entries of
path or key store.
"""
str_path = ''
str_msg = ''
b_status = False
d_msg = {}
d_ret = {}
d_e = {}
d_ls = {}
for k,v in kwargs.items():
if k == 'request': d_msg = v
d_meta = d_msg['meta']
str_path = self.remoteLocation_resolve(d_meta)['path']
if 'subdir' in d_meta.keys():
str_path = os.path.join(str_path, d_meta['subdir'])
if len(str_path):
self.qprint('scandir on %s...' % str_path)
try:
for e in os.scandir(str_path):
str_type = ''
if e.is_file(): str_type = 'file'
if e.is_dir(): str_type = 'dir'
if e.is_symlink(): str_type = 'symlink'
d_e = {
'type': str_type,
'path': os.path.join(str_path, e.name),
'uid': e.stat().st_uid,
'gid': e.stat().st_gid,
'size': e.stat().st_size,
'mtime': e.stat().st_mtime,
'ctime': e.stat().st_ctime,
'atime': e.stat().st_atime
}
d_ls[e.name] = d_e
b_status = True
str_msg = 'Successful scandir on %s' % str_path
self.qprint(str_msg, comms = 'status')
except:
b_status = False
str_msg = 'Could not scandir >%s<. Possible permission error.' % str_path
self.qprint(str_msg, comms = 'error')
d_ret = {
'status': b_status,
'msg': str_msg,
'd_ls': d_ls
}
return d_ret
def getHeaders(self):
"""
Return headers of the request
"""
self.qprint('headers= %s' %self.headers)
return self.headers['content-length']
def rfileRead(self, length):
"""
Return the contents of the file transmitted
"""
return self.rfile.read(int(length))
def unpackForm(self, form, d_form):
"""
Load the json request
"""
self.qprint("Unpacking multi-part form message...", comms = 'status')
for key in form:
self.qprint("\tUnpacking field '%s..." % key, comms = 'status')
d_form[key] = form.getvalue(key)
d_msg = json.loads((d_form['d_msg']))
return d_msg
def do_POST(self, **kwargs):
b_skipInit = False
d_msg = {}
for k,v in kwargs.items():
self.qprint('in for ' + str(k))
if k == 'd_msg':
d_msg = v
b_skipInit = True
if not b_skipInit:
# Parse the form data posted
self.qprint(str(self.headers), comms = 'rx')
length = self.getHeaders()
data = self.rfileRead(length)
form = self.form_get('POST', data)
d_form = {}
d_ret = {
'msg' : 'In do_POST',
'status' : True,
'formsize' : sys.getsizeof(form)
}
self.qprint('data length = %d' % len(data), comms = 'status')
self.qprint('form length = %d' % len(form), comms = 'status')
if len(form):
d_msg = self.unpackForm(form, d_form)
else:
self.qprint("Parsing JSON data...", comms = 'status')
d_data = json.loads(data.decode())
try:
d_msg = d_data['payload']
except:
d_msg = d_data
self.qprint('d_msg = %s' % self.pp.pformat(d_msg).strip(), comms = 'status')
d_meta = d_msg['meta']
if 'action' in d_msg:
self.qprint("verb: %s detected." % d_msg['action'], comms = 'status')
if 'Path' not in d_msg['action']:
str_method = '%s_process' % d_msg['action']
self.qprint("method to call: %s(request = d_msg) " % str_method, comms = 'status')
d_done = {'status': False}
try:
method = getattr(self, str_method)
d_done = method(request = d_msg)
except:
str_msg = "Class '{}' does not implement method '{}'".format(self.__class__.__name__,
str_method)
d_done = {
'status': False,
'msg': str_msg
}
self.qprint(str_msg, comms = 'error')
self.qprint(self.pp.pformat(d_done).strip(), comms = 'tx')
d_ret = d_done
if 'ctl' in d_meta:
self.do_POST_serverctl(d_meta)
if 'transport' in d_meta:
d_transport = d_meta['transport']
if 'compress' in d_transport:
d_ret = self.do_POST_withCompression(
data = data,
length = length,
form = form,
d_form = d_form
)
if 'copy' in d_transport :
if b_swiftStorage:
d_ret = self.do_POST_withCompression(
data = data,
length = length,
form = form,
d_form = d_form
)
else:
d_ret = self.do_POST_withCopy(d_meta)
if not b_skipInit: self.ret_client(d_ret)
return d_ret
def do_POST_serverctl(self, d_meta):
"""
"""
d_ctl = d_meta['ctl']
self.qprint('Processing server ctl...', comms = 'status')
self.qprint(d_meta, comms = 'rx')
if 'serverCmd' in d_ctl:
if d_ctl['serverCmd'] == 'quit':
self.qprint('Shutting down server', comms = 'status')
d_ret = {
'msg': 'Server shut down',
'status': True
}
self.qprint(d_ret, comms = 'tx')
self.ret_client(d_ret)
os._exit(0)
def do_POST_withCopy(self, d_meta):
"""
Process a "POST" using copy operations
:return:
"""
d_local = d_meta['local']
d_remote = d_meta['remote']
d_transport = d_meta['transport']
d_copy = d_transport['copy']
str_serverPath = self.remoteLocation_resolve(d_remote)['path']
str_clientPath = d_local['path']
b_copyTree = False
b_copyFile = False
d_ret = {'status': True}
if not d_copy['symlink']:
if os.path.isdir(str_clientPath):
b_copyTree = True
str_clientNode = str_clientPath.split('/')[-1]
try:
shutil.copytree(str_clientPath, os.path.join(str_serverPath, str_clientNode))
except BaseException as e:
d_ret['status'] = False
d_ret['msg'] = str(e)
if os.path.isfile(str_clientPath):
b_copyFile = True
shutil.copy2(str_clientPath, str_serverPath)
d_ret['copytree'] = b_copyTree
d_ret['copyfile'] = b_copyFile
if d_copy['symlink']:
str_clientNode = str_clientPath.split('/')[-1]
try:
os.symlink(str_clientPath, os.path.join(str_serverPath, str_clientNode))
except BaseException as e:
d_ret['status'] = False
d_ret['msg'] = str(e)
d_ret['symlink'] = 'ln -s %s %s' % (str_clientPath, str_serverPath)
# d_ret['d_meta'] = d_meta
d_ret['source'] = str_clientPath
d_ret['destination'] = str_serverPath
d_ret['copytree'] = b_copyTree
d_ret['copyfile'] = b_copyFile
d_ret['timestamp'] = '%s' % datetime.datetime.now()
# self.ret_client(d_ret)
return d_ret
def do_GET_preop(self, **kwargs):
"""
Perform any pre-operations relating to a "PULL" or "GET" request.
Essentially, for the 'plugin' case, this means appending a string
'outgoing' to the remote storage location path and create that dir!
"""
d_meta = {}
d_postop = {}
d_ret = {}
b_status = False
str_path = ''
for k,v in kwargs.items():
if k == 'meta': d_meta = v
if k == 'path': str_path = v
if 'specialHandling' in d_meta:
d_preop = d_meta['specialHandling']
if 'cmd' in d_preop.keys():
str_cmd = d_postop['cmd']
str_keyPath = ''
if 'remote' in d_meta.keys():
str_keyPath = self.remoteLocation_resolve(d_meta['remote'])['path']
str_cmd = str_cmd.replace('%key', str_keyPath)
b_status = True
d_ret['cmd'] = str_cmd
if 'op' in d_preop.keys():
# pudb.set_trace()
if d_preop['op'] == 'plugin':
str_outgoingPath = '%s/outgoing' % str_path
d_ret['op'] = 'plugin'
d_ret['outgoingPath'] = str_outgoingPath
b_status = True
d_ret['status'] = b_status
d_ret['timestamp'] = '%s' % datetime.datetime.now()
return d_ret
def ls_do(self, **kwargs):
"""
Perform an ls based on the passed args.
"""
for k,v in kwargs.items():
if k == 'meta': d_meta = v
if 'remote' in d_meta.keys():
d_remote = d_meta['remote']
for subdir in ['incoming', 'outgoing']:
d_remote['subdir'] = subdir
dmsg_lstree = {
'action': 'rmtree',
'meta' : d_remote
}
d_ls = self.ls_process( request = dmsg_lstree)
self.qprint("target ls = \n%s" % self.pp.pformat(d_ls).strip())
def do_GET_postop(self, **kwargs):
"""
Perform any post-operations relating to a "GET" / "PULL" request.
:param kwargs:
:return:
"""
str_cmd = ''
d_meta = {}
d_postop = {}
d_ret = {}
b_status = False
str_path = ''
for k,v in kwargs.items():
if k == 'meta': d_meta = v
if 'specialHandling' in d_meta:
d_postop = d_meta['specialHandling']
if 'cleanup' in d_postop.keys():
if d_postop['cleanup']:
#
# In this case we remove the 'remote' path or key lookup:
#
# mv $str_path /tmp/$str_path
# mkdir $str_path
# mv /tmp/$str_path $str_path/incoming
#
if 'remote' in d_meta.keys():
d_remote = d_meta['remote']
self.ls_do(meta = d_meta)
dmsg_rmtree = {
'action': 'rmtree',
'meta' : d_remote
}
self.qprint("Performing GET postop cleanup", comms = 'status')
self.qprint("dmsg_rmtree: %s" % dmsg_rmtree, comms = 'status')
d_ret['rmtree'] = self.rmtree_process(request = dmsg_rmtree)
d_ret['op'] = 'plugin'
b_status = d_ret['rmtree']['status']
d_ret['status'] = b_status
d_ret['timestamp'] = '%s' % datetime.datetime.now()
return d_ret
def do_POST_postop(self, **kwargs):
"""
Perform any post-operations relating to a "POST" request.
:param kwargs:
:return:
"""
str_cmd = ''
d_meta = {}
d_postop = {}
d_ret = {}
b_status = False
str_path = ''
for k,v in kwargs.items():
if k == 'meta': d_meta = v
if k == 'path': str_path = v
if 'specialHandling' in d_meta:
d_postop = d_meta['specialHandling']
if 'cmd' in d_postop.keys():
str_cmd = d_postop['cmd']
str_keyPath = ''
if 'remote' in d_meta.keys():
str_keyPath = self.remoteLocation_resolve(d_meta['remote'])['path']
str_cmd = str_cmd.replace('%key', str_keyPath)
b_status = True
d_ret['cmd'] = str_cmd
if 'op' in d_postop.keys():
if d_postop['op'] == 'plugin':
#
# In this case the contents of the keyStore need to be moved to a
# directory called 'incoming' within that store. In shell, this
# would amount to:
#
# mv $str_path /tmp/$str_path
# mkdir $str_path
# mv /tmp/$str_path $str_path/incoming
#
str_uuid = '%s' % uuid.uuid4()
str_tmp = os.path.join('/tmp', str_uuid)
str_incomingPath = os.path.join(str_path, 'incoming')
str_outgoingPath = os.path.join(str_path, 'outgoing')
self.qprint("Moving %s to %s..." % (str_path, str_tmp))
shutil.move(str_path, str_tmp)
self.qprint("Recreating clean path %s..." % str_path)
os.makedirs(str_path)
self.qprint("Moving %s to %s" % (str_tmp, str_incomingPath))
shutil.move(str_tmp, str_incomingPath)
d_ret['op'] = 'plugin'
d_ret['shareDir'] = str_path
d_ret['tmpPath'] = str_tmp
d_ret['incomingPath'] = str_incomingPath
d_ret['outgoingPath'] = str_outgoingPath
os.makedirs(str_outgoingPath)
b_status = True
d_ret['status'] = b_status
d_ret['timestamp'] = '%s' % datetime.datetime.now()
return d_ret
def do_POST_withCompression(self, **kwargs):
# Parse the form data posted
self.qprint(str(self.headers), comms = 'rx')
self.qprint('do_POST_withCompression()', comms = 'status')
# data = None
# length = 0
# form = None
d_form = {}
d_ret = {}
for k,v in kwargs.items():
# if k == 'data': data = v
# if k == 'length': length = v
# if k == 'form': form = v
if k == 'd_form': d_form = v
d_msg = json.loads((d_form['d_msg']))
d_meta = d_msg['meta']
#
# d_meta = json.loads(d_form['d_meta'])
fileContent = d_form['local']
str_fileName = d_meta['local']['path']
str_encoding = d_form['encoding']
d_remote = d_meta['remote']
b_unpack = False
# b_serverPath = False
# str_unpackBase = self.server.str_fileBase
str_unpackPath = self.remoteLocation_resolve(d_remote)['path']
str_unpackBase = os.path.join(str_unpackPath,'')
d_transport = d_meta['transport']
d_compress = d_transport['compress']
if 'unpack' in d_compress:
b_unpack = d_compress['unpack']
str_fileOnly = os.path.split(str_fileName)[-1]
str_fileSuffix = ""
if d_compress['archive'] == "zip":
str_fileSuffix = ".zip"
str_localFile = "%s%s%s" % (str_unpackBase, str_fileOnly, str_fileSuffix)
#Decoding
if str_encoding == "base64":
d_ret['decode'] = {}
d_ret['write'] = {}
try:
data = base64.b64decode(fileContent)
d_ret['decode']['status'] = True
d_ret['decode']['msg'] = 'base64 decode successful!'
except Exception as err:
d_ret['decode']['status'] = False
d_ret['decode']['msg'] = 'base64 decode unsuccessful!'
self.ret_client(d_ret)
self.qprint(d_ret, comms = 'tx')
self.qprint(err)
return d_ret
d_ret['decode']['timestamp'] = '%s' % datetime.datetime.now()
else:
d_ret['write'] = {}
try:
data = fileContent.decode()
d_ret['write']['decode'] = True
except Exception as err:
d_ret['write']['decode'] = False
data = fileContent
b_zip = b_unpack and (d_compress['archive'] == 'zip')
d_ret= self.storeData(file_name= str_localFile, client_path = str_fileName,
file_content= data, Path= str_unpackPath, is_zip=b_zip,d_ret=d_ret)
# pudb.set_trace()
d_ret['postop'] = self.do_POST_postop(meta = d_meta,
path = str_unpackPath)
self.send_response(200)
self.end_headers()
d_ret['User-agent'] = self.headers['user-agent']
# self.ret_client(d_ret)
self.qprint(self.pp.pformat(d_ret).strip(), comms = 'tx')
return d_ret
def storeData(self, **kwargs):
raise NotImplementedError('Abstract Method: Please implement this method in child class')
def ret_client(self, d_ret):
"""
Simply "writes" the d_ret using json and the client wfile.
:param d_ret:
:return:
"""
global Gd_internalvar
if not Gd_internalvar['httpResponse']:
self.wfile.write(json.dumps(d_ret).encode())
else:
self.wfile.write(str(Response(json.dumps(d_ret))).encode())
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""
Handle requests in a separate thread.
"""
def col2_print(self, str_left, str_right):
print(Colors.WHITE +
('%*s' % (self.LC, str_left)), end='')
print(Colors.LIGHT_BLUE +
('%*s' % (self.RC, str_right)) + Colors.NO_COLOUR)
def __init__(self, *args, **kwargs):
"""
Holder for constructor of class -- allows for explicit setting
of member 'self' variables.
:return:
"""
HTTPServer.__init__(self, *args, **kwargs)
self.LC = 40
self.RC = 40
self.args = None
self.str_desc = 'pfioh'
self.str_name = self.str_desc
self.str_version = ""
self.str_fileBase = "received-"
self.str_storeBase = ""
self.b_createDirsAsNeeded = False
self.b_swiftStorage = False
self.str_unpackDir = "/tmp/unpack"
self.b_removeZip = False
self.dp = debug(verbosity=0, level=-1)
def setup(self, **kwargs):
global Gd_internalvar
for k,v in kwargs.items():
if k == 'args': self.args = v
if k == 'desc': self.str_desc = v
if k == 'ver': self.str_version = v
self.str_fileBase = "received-"
self.str_storeBase = self.args['storeBase']
self.b_createDirsAsNeeded = self.args['b_createDirsAsNeeded']
self.str_unpackDir = self.args['storeBase']
self.b_removeZip = False
self.b_swiftStorage = self.args['b_swiftStorage']
# print(self.args)
Gd_internalvar['httpResponse'] = self.args['b_httpResponse']
Gd_internalvar['name'] = self.str_name
Gd_internalvar['version'] = self.str_version
Gd_internalvar['createDirsAsNeeded'] = self.args['b_createDirsAsNeeded']
Gd_internalvar['storeBase'] = self.args['storeBase']
Gd_internalvar['b_swiftStorage'] = self.args['b_swiftStorage']
print(self.str_desc)
self.col2_print("Listening on address:", self.args['ip'])
self.col2_print("Listening on port:", self.args['port'])
self.col2_print("Server listen forever:", self.args['b_forever'])
self.col2_print("Return HTTP responses:", self.args['b_httpResponse'])
print(Colors.LIGHT_GREEN + "\n\n\tWaiting for incoming data..." + Colors.NO_COLOUR, flush=True)
def zipdir(path, ziph, **kwargs):
"""
Zip up a directory.
:param path:
:param ziph:
:param kwargs:
:return:
"""
str_arcroot = ""
for k, v in kwargs.items():
if k == 'arcroot': str_arcroot = v
for root, dirs, files in os.walk(path):
for file in files:
str_arcfile = os.path.join(root, file)
if len(str_arcroot):
str_arcname = str_arcroot.split('/')[-1] + str_arcfile.split(str_arcroot)[1]
else:
str_arcname = str_arcfile
try:
ziph.write(str_arcfile, arcname = str_arcname)
except:
print("Skipping %s" % str_arcfile)
def zip_process(**kwargs):
"""
Process zip operations.
:param kwargs:
:return:
"""
str_localPath = ""
str_zipFileName = ""
str_action = "zip"
str_arcroot = ""
for k,v in kwargs.items():
if k == 'path': str_localPath = v
if k == 'action': str_action = v
if k == 'payloadFile': str_zipFileName = v
if k == 'arcroot': str_arcroot = v
if str_action == 'zip':
str_mode = 'w'
str_arcFileName = '%s/%s' % (tempfile.gettempdir(), uuid.uuid4())
str_zipFileName = str_arcFileName + '.zip'
else:
str_mode = 'r'
ziphandler = zipfile.ZipFile(str_zipFileName, str_mode, zipfile.ZIP_DEFLATED)
if str_mode == 'w':
if os.path.isdir(str_localPath):
zipdir(str_localPath, ziphandler, arcroot = str_arcroot)
# str_zipFileName = shutil.make_archive(str_arcFileName, 'zip', str_localPath)
else:
if len(str_arcroot):
str_arcname = str_arcroot.split('/')[-1] + str_localPath.split(str_arcroot)[1]
else:
str_arcname = str_localPath
try:
ziphandler.write(str_localPath, arcname = str_arcname)
except:
ziphandler.close()
os.remove(str_zipFileName)
return {
'msg': json.dumps({"msg": "No file or directory found for '%s'" % str_localPath}),
'status': False
}
if str_mode == 'r':
ziphandler.extractall(str_localPath)
ziphandler.close()
return {
'msg': '%s operation successful' % str_action,
'fileProcessed': str_zipFileName,
'status': True,
'path': str_localPath,
'zipmode': str_mode,
'filesize': "{:,}".format(os.stat(str_zipFileName).st_size),
'timestamp': '%s' % datetime.datetime.now()
}
def base64_process(**kwargs):
"""
Process base64 file io
"""
str_fileToSave = ""
str_fileToRead = ""
str_action = "encode"
data = None
for k,v in kwargs.items():
if k == 'action': str_action = v
if k == 'payloadBytes': data = v
if k == 'payloadFile': str_fileToRead = v
if k == 'saveToFile': str_fileToSave = v
# if k == 'sourcePath': str_sourcePath = v
if str_action == "encode":
# Encode the contents of the file at targetPath as ASCII for transmission
if len(str_fileToRead):
with open(str_fileToRead, 'rb') as f:
data = f.read()
f.close()
data_b64 = base64.b64encode(data)
with open(str_fileToSave, 'wb') as f:
f.write(data_b64)
f.close()
return {
'msg': 'Encode successful',
'fileProcessed': str_fileToSave,
'status': True
# 'encodedBytes': data_b64
}
if str_action == "decode":
if len(data) % 4:
# not a multiple of 4, add padding:
data += '=' * (4 - len(data) % 4)
bytes_decoded = base64.b64decode(data)
with open(str_fileToSave, 'wb') as f:
f.write(bytes_decoded)
f.close()
return {
'msg': 'Decode successful',
'fileProcessed': str_fileToSave,
'status': True
# 'decodedBytes': bytes_decoded
}
| {
"content_hash": "807c6bd032e58b7dcf1ec46de03fdef4",
"timestamp": "",
"source": "github",
"line_count": 1244,
"max_line_length": 303,
"avg_line_length": 35.78456591639871,
"alnum_prop": 0.446446221583251,
"repo_name": "awalkaradi95moc/pfioh",
"id": "ea0fa43b9bac9513298c2ba551485157d1c525a8",
"size": "44540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pfioh/pfioh.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187250"
},
{
"name": "Shell",
"bytes": "1572"
}
],
"symlink_target": ""
} |
def extractLamiadaughterBlogspotCom(item):
'''
Parser for 'lamiadaughter.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "7896897077dba39a12c6b8d261d0011c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.80952380952381,
"alnum_prop": 0.6376554174067496,
"repo_name": "fake-name/ReadableWebProxy",
"id": "456e881686e7ef12ac14b5ebb3147940ec1eb615",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractLamiadaughterBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from apps.session.models import UserProfile, Message,\
GroupMessage, Block, Group
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname', 'points')
class MessageAdmin(admin.ModelAdmin):
list_display = ('content', 'sender', 'receiver')
class GroupMessageAdmin(admin.ModelAdmin):
list_display = ('content', 'sender', 'receivers', 'created_time')
class BlockAdmin(admin.ModelAdmin):
list_display = ('sender', 'receiver')
class GroupAdmin(admin.ModelAdmin):
list_display = ('members_list', 'name')
def members_list(self, obj):
return ", ".join([str(user) for user in obj.members.all()])
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(GroupMessage, GroupMessageAdmin)
admin.site.register(Block, BlockAdmin)
admin.site.register(Group, GroupAdmin)
| {
"content_hash": "21bc93b6d4bf6cc60ef192952e747b51",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 69,
"avg_line_length": 28.46875,
"alnum_prop": 0.7310647639956093,
"repo_name": "sparcs-kaist/araplus",
"id": "1b76be72cd482703fc654dc811f93dfcf81bc1a1",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/session/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "967"
},
{
"name": "HTML",
"bytes": "49759"
},
{
"name": "JavaScript",
"bytes": "7989"
},
{
"name": "Python",
"bytes": "64716"
}
],
"symlink_target": ""
} |
from south.db import db
from south.v2 import SchemaMigration
from zinnia.migrations import user_name
from zinnia.migrations import user_table
from zinnia.migrations import user_orm_label
from zinnia.migrations import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Entry.featured'
db.add_column('zinnia_entry', 'featured', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Entry.featured'
db.delete_column('zinnia_entry', 'featured')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_name, 'db_table': "'%s'" % user_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['zinnia.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'zinnia.entry': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Entry'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['%s']" % user_orm_label, 'symmetrical': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['zinnia.Category']", 'null': 'True', 'blank': 'True'}),
'comment_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2042, 3, 15, 0, 0)'}),
'excerpt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pingback_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_rel_+'", 'null': 'True', 'to': "orm['zinnia.Entry']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'start_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'template': ('django.db.models.fields.CharField', [], {'default': "'zinnia/entry_detail.html'", 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['zinnia']
| {
"content_hash": "82b2313d44bec58e82af75444aeeda1d",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 182,
"avg_line_length": 74.84615384615384,
"alnum_prop": 0.5635919835560124,
"repo_name": "1844144/django-blog-zinnia",
"id": "f8a977512aa07f0897b1705fe75c163e790b406b",
"size": "7784",
"binary": false,
"copies": "4",
"ref": "refs/heads/deep",
"path": "zinnia/migrations/0007_entry_featured.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "77370"
},
{
"name": "HTML",
"bytes": "75068"
},
{
"name": "JavaScript",
"bytes": "235617"
},
{
"name": "Makefile",
"bytes": "1789"
},
{
"name": "Python",
"bytes": "506854"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.switch.wemo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for WeMo switches.
"""
import logging
from homeassistant.components.switch import SwitchDevice
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return WeMo switches. """
try:
# pylint: disable=no-name-in-module, import-error
import homeassistant.external.pywemo.pywemo as pywemo
import homeassistant.external.pywemo.pywemo.discovery as discovery
except ImportError:
logging.getLogger(__name__).exception((
"Failed to import pywemo. "
"Did you maybe not run `git submodule init` "
"and `git submodule update`?"))
return
if discovery_info is not None:
device = discovery.device_from_description(discovery_info)
if device:
add_devices_callback([WemoSwitch(device)])
return
logging.getLogger(__name__).info("Scanning for WeMo devices")
switches = pywemo.discover_devices()
# Filter out the switches and wrap in WemoSwitch object
add_devices_callback(
[WemoSwitch(switch) for switch in switches
if isinstance(switch, pywemo.Switch)])
class WemoSwitch(SwitchDevice):
""" Represents a WeMo switch within Home Assistant. """
def __init__(self, wemo):
self.wemo = wemo
self.insight_params = None
@property
def unique_id(self):
""" Returns the id of this WeMo switch """
return "{}.{}".format(self.__class__, self.wemo.serialnumber)
@property
def name(self):
""" Returns the name of the switch if any. """
return self.wemo.name
@property
def current_power_mwh(self):
""" Current power usage in mwh. """
if self.insight_params:
return self.insight_params['currentpower']
@property
def today_power_mw(self):
""" Today total power usage in mw. """
if self.insight_params:
return self.insight_params['todaymw']
@property
def is_on(self):
""" True if switch is on. """
return self.wemo.get_state()
def turn_on(self, **kwargs):
""" Turns the switch on. """
self.wemo.on()
def turn_off(self):
""" Turns the switch off. """
self.wemo.off()
def update(self):
""" Update WeMo state. """
self.wemo.get_state(True)
if self.wemo.model.startswith('Belkin Insight'):
self.insight_params = self.wemo.insight_params
| {
"content_hash": "6a3eaaebd0f9aba2bb008e31a3ac4ad2",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 28.97752808988764,
"alnum_prop": 0.6114773167894533,
"repo_name": "teodoc/home-assistant",
"id": "eb55e0662b7888a35b2796683ae2a3ce062bd74a",
"size": "2579",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "homeassistant/components/switch/wemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "787935"
},
{
"name": "Python",
"bytes": "682083"
},
{
"name": "Shell",
"bytes": "5097"
}
],
"symlink_target": ""
} |
import argparse
import sys
import tak
from tak import mcts
from xformer import loading
from tak.model import wrapper, grpc
import torch
import time
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"--simulations",
dest="simulations",
type=int,
default=100,
metavar="POSITIONS",
)
parser.add_argument(
"--size",
dest="size",
type=int,
default=3,
metavar="SIZE",
)
parser.add_argument(
"--graph",
action="store_true",
default=False,
help="Use CUDA graphs to run the network",
)
parser.add_argument(
"--fp16",
action="store_true",
default=False,
help="Run model in float16",
)
parser.add_argument(
"--device",
type=str,
default="cpu",
)
parser.add_argument(
"--model",
type=str,
)
parser.add_argument(
"--host",
type=str,
)
parser.add_argument(
"--port",
type=int,
default=5001,
)
args = parser.parse_args(argv)
if (args.model and args.host) or not (args.model or args.host):
raise ValueError("Must specify either --host or --model, not both")
if args.model:
model = loading.load_model(args.model, args.device)
if args.fp16:
model = model.to(torch.float16)
if args.graph:
network = wrapper.GraphedWrapper(model)
else:
network = wrapper.ModelWrapper(model, device=args.device)
else:
network = grpc.GRPCNetwork(host=args.host, port=args.port)
p = tak.Position.from_config(tak.Config(size=args.size))
engine = mcts.MCTS(
mcts.Config(
network=network,
simulation_limit=args.simulations,
time_limit=0,
)
)
start = time.time()
tree = engine.analyze(p)
end = time.time()
print(f"done simulations={tree.simulations} duration={end-start:.2f}")
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "6e73bc81d6a3179c35eef3898720409c",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 22.23404255319149,
"alnum_prop": 0.5583732057416267,
"repo_name": "nelhage/taktician",
"id": "b110037d6361e28f20e83bc8820a9085579b02c6",
"size": "2090",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/bench/mcts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1798"
},
{
"name": "Dockerfile",
"bytes": "212"
},
{
"name": "Go",
"bytes": "296293"
},
{
"name": "Makefile",
"bytes": "995"
},
{
"name": "Python",
"bytes": "165477"
},
{
"name": "Shell",
"bytes": "1724"
}
],
"symlink_target": ""
} |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import traceback
from typing import Dict, Optional, List, Any, Callable, Collection
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
# Integration information
INTEGRATION_NAME = 'illuminate'
INTEGRATION_CONTEXT_BRAND = 'Illuminate'
MALICIOUS_DATA: Dict[str, str] = {
'Vendor': 'illuminate',
'Description': 'illuminate has determined that this indicator is malicious via internal analysis.'
}
''' HELPER FUNCTIONS '''
class IdNamePair(object):
def __init__(self, unique_id: int, name: str):
self.id = unique_id
self.name = name
def __str__(self):
return f'id = {self.id}, name = {self.name}'
class EnrichmentOutput(object):
def __init__(self, illuminate_context_data: dict, raw_data: dict, indicator_type: str) -> None:
self.illuminate_context_data = illuminate_context_data
self.raw_data = raw_data
self.indicator_type = indicator_type
self.reputation_context: dict = {}
def get_human_readable_output(self) -> str:
human_readable_data = self.illuminate_context_data.copy()
human_readable_data['Actors'] = [IdNamePair(d['id'], d['name']) for d in human_readable_data['Actors']]
human_readable_data['Malwares'] = [IdNamePair(d['id'], d['name']) for d in human_readable_data['Malwares']]
return tableToMarkdown(
t=human_readable_data,
name=f'{INTEGRATION_NAME} {self.indicator_type.capitalize()} Information',
removeNull=True
)
def build_illuminate_context(self) -> dict:
return {
f'{INTEGRATION_CONTEXT_BRAND}.{self.indicator_type.capitalize()}(val.ID && val.ID === obj.ID)':
self.illuminate_context_data
}
def generate_reputation_context(
self,
primary_key: str,
indicator_value: str,
indicator_type: str,
reputation_key: str,
extra_context: Optional[dict] = None
):
if self.has_context_data():
reputation_context: Dict[str, Any] = {primary_key: indicator_value}
if extra_context is not None:
reputation_context.update(extra_context)
malicious = Client.is_indicator_malicious(self.raw_data)
if malicious:
reputation_context['Malicious'] = MALICIOUS_DATA
self.add_reputation_context(
f'{reputation_key}(val.{primary_key} && val.{primary_key} === obj.{primary_key})',
reputation_context
)
self.add_reputation_context('DBotScore', {
'Indicator': indicator_value,
'Score': 3 if malicious else 1,
'Type': indicator_type,
'Vendor': INTEGRATION_NAME
})
def build_all_context(self) -> dict:
all_context = {}
all_context.update(self.build_illuminate_context())
if len(self.reputation_context) > 0:
all_context.update(self.reputation_context)
return all_context
def return_outputs(self):
# We need to use the underlying demisto.results function call rather than using return_outputs because
# we need to add the IgnoreAutoExtract key to ensure that our illuminate links are not marked as indicators
entry = {
"Type": entryTypes["note"],
"HumanReadable": self.get_human_readable_output(),
"ContentsFormat": formats["json"],
"Contents": self.raw_data,
"EntryContext": self.build_all_context(),
"IgnoreAutoExtract": True
}
demisto.results(entry)
def add_illuminate_context(self, key: str, data: Any):
self.illuminate_context_data[key] = data
def add_reputation_context(self, key: str, context: dict):
self.reputation_context[key] = context
def has_context_data(self):
return len(self.illuminate_context_data) > 0
class Client(BaseClient):
def __init__(self, server: str, username: str, password: str, insecure: bool, proxy: bool):
# NB: 404 is a valid response since that just means no entries, and we want the UI to respect that and show "No Entries"
super().__init__(
base_url=f'https://{server}/api/1_0/',
verify=not insecure,
proxy=proxy,
auth=(username, password),
ok_codes=(200, 404)
)
def indicator_search(self, indicator_type: str, indicator: str) -> dict:
params = {'type': indicator_type, 'value': indicator}
return self._http_request(method='GET', url_suffix='indicator/match', params=params)
def perform_test_request(self):
data: dict = self._http_request(method='GET', url_suffix='')
if data['links'] is None:
raise DemistoException('Invalid URL or Credentials. JSON structure not recognized')
def enrich_indicator(self, indicator: str, indicator_type: str) -> EnrichmentOutput:
raw_data: dict = self.indicator_search(indicator_type, indicator)
if raw_data is None:
return EnrichmentOutput({}, {}, indicator_type)
context_data = self.get_context_from_response(raw_data)
return EnrichmentOutput(context_data, raw_data, indicator_type)
@staticmethod
def get_data_key(data: dict, key: str) -> Optional[Any]:
return None if key not in data else data[key]
@staticmethod
def get_nested_data_key(data: dict, key: str, nested_key: str) -> Optional[Any]:
top_level = Client.get_data_key(data, key)
return None if top_level is None or nested_key not in top_level else top_level[nested_key]
@staticmethod
def get_data_key_as_date(data: dict, key: str, fmt: str) -> Optional[str]:
value = Client.get_data_key(data, key)
return None if value is None else datetime.fromtimestamp(value / 1000.0).strftime(fmt)
@staticmethod
def get_data_key_as_list(data: dict, key: str) -> List[Any]:
data_list = Client.get_data_key(data, key)
return [] if data_list is None or not isinstance(data[key], (list,)) else data_list
@staticmethod
def get_data_key_as_list_of_values(data: dict, key: str, value_key: str) -> List[Any]:
data_list = Client.get_data_key_as_list(data, key)
return [value_data[value_key] for value_data in data_list]
@staticmethod
def get_data_key_as_list_of_dicts(data: dict, key: str, dict_creator: Callable) -> Collection[Any]:
data_list = Client.get_data_key_as_list(data, key)
return {} if len(data_list) == 0 else [dict_creator(value_data) for value_data in data_list]
@staticmethod
def is_indicator_malicious(data: dict) -> bool:
benign = Client.get_nested_data_key(data, 'benign', 'value')
return False if benign is None or benign is True else True
@staticmethod
def get_context_from_response(data: dict) -> dict:
result_dict = {
'ID': Client.get_data_key(data, 'id'),
'Indicator': Client.get_nested_data_key(data, 'value', 'name'),
'EvidenceCount': Client.get_data_key(data, 'reportCount'),
'Active': Client.get_data_key(data, 'active'),
'HitCount': Client.get_data_key(data, 'hitCount'),
'ConfidenceLevel': Client.get_nested_data_key(data, 'confidenceLevel', 'value'),
'FirstHit': Client.get_data_key_as_date(data, 'firstHit', '%Y-%m-%d'),
'LastHit': Client.get_data_key_as_date(data, 'lastHit', '%Y-%m-%d'),
'ReportedDates': Client.get_data_key_as_list_of_values(data, 'reportedDates', 'date'),
'ActivityDates': Client.get_data_key_as_list_of_values(data, 'activityDates', 'date'),
'Malwares': Client.get_data_key_as_list_of_dicts(data, 'malwares', lambda d: {'id': d['id'], 'name': d['name']}),
'Actors': Client.get_data_key_as_list_of_dicts(data, 'actors', lambda d: {'id': d['id'], 'name': d['name']}),
'Benign': Client.get_nested_data_key(data, 'benign', 'value'),
'IlluminateLink': None
}
links_list = Client.get_data_key_as_list(data, 'links')
result_dict['IlluminateLink'] = next((
link['href'].replace("api/1_0/indicator/", "indicators/")
for link in links_list
if 'rel' in link and link['rel'] == 'self' and 'href' in link
), None)
return result_dict
def build_client(demisto_params: dict) -> Client:
server: str = str(demisto_params.get('server'))
proxy: bool = demisto_params.get('proxy', False)
insecure: bool = demisto_params.get('insecure', False)
credentials: dict = demisto_params.get('credentials', {})
username: str = str(credentials.get('identifier'))
password: str = str(credentials.get('password'))
return Client(server, username, password, insecure, proxy)
''' COMMAND EXECUTION '''
def perform_test_module(client: Client):
client.perform_test_request()
def domain_command(client: Client, args: dict) -> List[EnrichmentOutput]:
domains: List[str] = argToList(args.get('domain'))
enrichment_data_list: List[EnrichmentOutput] = []
for domain in domains:
enrichment_data: EnrichmentOutput = client.enrich_indicator(domain, 'domain')
if enrichment_data.has_context_data():
extra_context = {}
ip_resolution = Client.get_nested_data_key(enrichment_data.raw_data, 'ipResolution', 'name')
if ip_resolution is not None:
enrichment_data.add_illuminate_context('IpResolution', ip_resolution)
extra_context['DNS'] = ip_resolution
enrichment_data.generate_reputation_context('Name', domain, 'domain', 'Domain', extra_context)
enrichment_data_list.append(enrichment_data)
return enrichment_data_list
def email_command(client: Client, args: dict) -> List[EnrichmentOutput]:
emails: List[str] = argToList(args.get('email'))
enrichment_data_list: List[EnrichmentOutput] = []
for email in emails:
enrichment_data: EnrichmentOutput = client.enrich_indicator(email, 'email')
if enrichment_data.has_context_data():
enrichment_data.generate_reputation_context('From', email, 'email', 'Email')
enrichment_data_list.append(enrichment_data)
return enrichment_data_list
def ip_command(client: Client, args: dict) -> List[EnrichmentOutput]:
ips: List[str] = argToList(args.get('ip'))
enrichment_data_list: List[EnrichmentOutput] = []
for ip in ips:
enrichment_data: EnrichmentOutput = client.enrich_indicator(ip, 'ip')
if enrichment_data.has_context_data():
enrichment_data.generate_reputation_context('Address', ip, 'ip', 'IP')
enrichment_data_list.append(enrichment_data)
return enrichment_data_list
def file_command(client: Client, args: dict) -> List[EnrichmentOutput]:
files: List[str] = argToList(args.get('file'))
enrichment_data_list: List[EnrichmentOutput] = []
for file in files:
enrichment_data: EnrichmentOutput = client.enrich_indicator(file, 'file')
if enrichment_data.has_context_data():
hash_type = get_hash_type(file)
if hash_type != 'Unknown':
enrichment_data.generate_reputation_context(hash_type.upper(), file, 'file', 'File')
enrichment_data_list.append(enrichment_data)
return enrichment_data_list
def illuminate_enrich_string_command(client: Client, args: dict) -> List[EnrichmentOutput]:
strings: List[str] = argToList(args.get('string'))
enrichment_data_list: List[EnrichmentOutput] = []
for string in strings:
enrichment_data_list.append(client.enrich_indicator(string, 'string'))
return enrichment_data_list
def illuminate_enrich_ipv6_command(client: Client, args: dict) -> List[EnrichmentOutput]:
ips: List[str] = argToList(args.get('ip'))
enrichment_data_list: List[EnrichmentOutput] = []
for ip in ips:
enrichment_data_list.append(client.enrich_indicator(ip, 'ipv6'))
return enrichment_data_list
def illuminate_enrich_mutex_command(client: Client, args: dict) -> List[EnrichmentOutput]:
mutexes: List[str] = argToList(args.get('mutex'))
enrichment_data_list: List[EnrichmentOutput] = []
for mutex in mutexes:
enrichment_data_list.append(client.enrich_indicator(mutex, 'mutex'))
return enrichment_data_list
def illuminate_enrich_http_request_command(client: Client, args: dict) -> List[EnrichmentOutput]:
http_requests: List[str] = argToList(args.get('http-request'))
enrichment_data_list: List[EnrichmentOutput] = []
for http_request in http_requests:
enrichment_data_list.append(client.enrich_indicator(http_request, 'httpRequest'))
return enrichment_data_list
def url_command(client: Client, args: dict) -> List[EnrichmentOutput]:
urls: List[str] = argToList(args.get('url'))
enrichment_data_list: List[EnrichmentOutput] = []
for url in urls:
enrichment_data: EnrichmentOutput = client.enrich_indicator(url, 'url')
if enrichment_data.has_context_data():
enrichment_data.generate_reputation_context('Data', url, 'url', 'URL')
enrichment_data_list.append(enrichment_data)
return enrichment_data_list
''' EXECUTION '''
def main():
commands = {
'domain': domain_command,
'email': email_command,
'file': file_command,
'ip': ip_command,
'url': url_command,
'illuminate-enrich-string': illuminate_enrich_string_command,
'illuminate-enrich-ipv6': illuminate_enrich_ipv6_command,
'illuminate-enrich-mutex': illuminate_enrich_mutex_command,
'illuminate-enrich-http-request': illuminate_enrich_http_request_command
}
command: str = demisto.command()
LOG(f'command is {command}')
try:
client = build_client(demisto.params())
if command == 'test-module':
perform_test_module(client)
demisto.results('ok')
elif command in commands:
enrichment_outputs: List[EnrichmentOutput] = commands[command](client, demisto.args())
[e.return_outputs() for e in enrichment_outputs]
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]\nTrace:\n{traceback.format_exc()}'
return_error(err_msg, error=e)
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| {
"content_hash": "d3b3ed0a7ed8365b2adc4fed683e6126",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 128,
"avg_line_length": 37.98186528497409,
"alnum_prop": 0.6383602755610122,
"repo_name": "demisto/content",
"id": "309e58746333fda0c2433503e512b035c1ea03d8",
"size": "14661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/illuminate/Integrations/illuminate/illuminate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from __future__ import division
from entity import Entity
class MashupEntity(Entity):
type_id = 5
def __init__(self, mashup=None):
Entity.__init__(self)
self.id = None
self.update(mashup)
@staticmethod
def create(*args):
entity = MashupEntity()
entity.load(*args)
return entity
def update(self, mashup):
self.mashup = mashup
if self.mashup is not None:
self.name = mashup.name \
if mashup.name else "Version #" + str(mashup.id)
self.user = 'testing'
self.mod_time = self.now()
self.create_time = self.now()
self.size = len(self.mashup.alias_list)
self.description = ""
self.url = 'test'
self.was_updated = True
# returns boolean, True if search input is satisfied else False
def match(self, search):
raise RuntimeError("Not implemented") | {
"content_hash": "6a4d85f3147fa34f94ec6d1add0ec7c4",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 67,
"avg_line_length": 27.970588235294116,
"alnum_prop": 0.5699263932702419,
"repo_name": "VisTrails/VisTrails",
"id": "0d06b1d3c2abfdc3a5bac1ab1bc2f96da2241a9d",
"size": "2865",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/core/collection/mashup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import uuid
from webob import exc
from murano.db.catalog import api
from murano.openstack.common.db import exception as db_exception
from murano.tests.unit import base
from murano.tests.unit import utils
class CatalogDBTestCase(base.MuranoWithDBTestCase):
def setUp(self):
super(CatalogDBTestCase, self).setUp()
self.tenant_id = str(uuid.uuid4())
self.context = utils.dummy_context(tenant_id=self.tenant_id)
def _create_categories(self):
api.category_add('cat1')
api.category_add('cat2')
def _stub_package(self):
return {
'archive': "archive blob here",
'fully_qualified_name': 'com.example.package',
'type': 'class',
'author': 'OpenStack',
'name': 'package',
'enabled': True,
'description': 'some text',
'is_public': False,
'tags': ['tag1', 'tag2'],
'logo': "logo blob here",
'ui_definition': '{}',
}
def test_list_empty_categories(self):
res = api.category_get_names()
self.assertEqual(0, len(res))
def test_add_list_categories(self):
self._create_categories()
res = api.categories_list()
self.assertEqual(2, len(res))
for cat in res:
self.assertTrue(cat.id is not None)
self.assertTrue(cat.name.startswith('cat'))
def test_package_upload(self):
self._create_categories()
values = self._stub_package()
package = api.package_upload(values, self.tenant_id)
self.assertIsNotNone(package.id)
for k in values.keys():
self.assertEqual(values[k], package[k])
def test_package_fqn_is_unique(self):
self._create_categories()
values = self._stub_package()
api.package_upload(values, self.tenant_id)
self.assertRaises(db_exception.DBDuplicateEntry,
api.package_upload, values, self.tenant_id)
def test_package_delete(self):
values = self._stub_package()
package = api.package_upload(values, self.tenant_id)
api.package_delete(package.id, self.context)
self.assertRaises(exc.HTTPNotFound,
api.package_get, package.id, self.context)
| {
"content_hash": "bac196aba60ef116446e820bac21d7ed",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 69,
"avg_line_length": 30.276315789473685,
"alnum_prop": 0.596697088222512,
"repo_name": "ativelkov/murano-api",
"id": "d95169d42f0ce4ad228335e512c93d1ea8063d05",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/tests/unit/db/test_catalog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "935905"
},
{
"name": "Shell",
"bytes": "21119"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import socket
import multiprocessing
import logging
logger = logging.getLogger(__name__)
def start_proxy():
from .handler.client import handle_client
from .conf import settings
PROXY_BIND = settings.get("PROXY_BIND")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(PROXY_BIND)
sock.listen(1)
logger.info("Ready. Listening on %s:%d", *PROXY_BIND)
while True:
conn, address = sock.accept()
logger.info("Got connection")
process = multiprocessing.Process(target=handle_client, args=(conn, address))
process.daemon = True
process.start()
logger.info("Started process %r", process) | {
"content_hash": "dd51e25b6ca6e7db37ab2f427058a205",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 85,
"avg_line_length": 28.035714285714285,
"alnum_prop": 0.6802547770700637,
"repo_name": "mrcrgl/gge-storage",
"id": "f988fec721045145f6c86a090ba00059f042fccd",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/proxy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10602"
},
{
"name": "HTML",
"bytes": "113866"
},
{
"name": "JavaScript",
"bytes": "73567"
},
{
"name": "Python",
"bytes": "331496"
}
],
"symlink_target": ""
} |
import sensor, image, pyb
RED_LED_PIN = 1
BLUE_LED_PIN = 3
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
pyb.LED(BLUE_LED_PIN).on()
print("You're on camera!")
img = sensor.snapshot()
img.morph(1, [+2, +1, +0,\
+1, +1, -1,\
+0, -1, -2]) # Emboss the image.
img.save("example.jpg") # or "example.bmp" (or others)
pyb.LED(BLUE_LED_PIN).off()
print("Done! Reset the camera to see the saved image.")
| {
"content_hash": "0461a6979573464435b9e2ada53ce628",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 66,
"avg_line_length": 27.444444444444443,
"alnum_prop": 0.6612685560053981,
"repo_name": "iabdalkader/openmv",
"id": "46e5c3b104b1c77ce51edfd80608af10ef01d4f8",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/examples/Arduino/Portenta-H7/05-Snapshot/emboss_snapshot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "569030"
},
{
"name": "C",
"bytes": "100413378"
},
{
"name": "C++",
"bytes": "97780"
},
{
"name": "CMake",
"bytes": "10173"
},
{
"name": "Dockerfile",
"bytes": "874"
},
{
"name": "Makefile",
"bytes": "72669"
},
{
"name": "Python",
"bytes": "1197447"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from test_haystack.core.models import AFifthMockModel, AnotherMockModel, CharPKMockModel, MockModel
from test_haystack.utils import unittest
from haystack import connection_router, connections, indexes, reset_search_queries
from haystack.backends import BaseSearchQuery, SQ
from haystack.exceptions import FacetingError
from haystack.models import SearchResult
from haystack.query import EmptySearchQuerySet, SearchQuerySet, ValuesListSearchQuerySet, ValuesSearchQuerySet
from haystack.utils.loading import UnifiedIndex
from .mocks import (CharPKMockSearchBackend, MixedMockSearchBackend, MOCK_SEARCH_RESULTS, MockSearchBackend,
MockSearchQuery, ReadQuerySetMockSearchBackend)
from .test_indexes import (GhettoAFifthMockModelSearchIndex, ReadQuerySetTestSearchIndex,
TextReadQuerySetTestSearchIndex)
from .test_views import BasicAnotherMockModelSearchIndex, BasicMockModelSearchIndex
test_pickling = True
try:
import pickle
except ImportError:
test_pickling = False
class SQTestCase(TestCase):
def test_split_expression(self):
sq = SQ(foo='bar')
self.assertEqual(sq.split_expression('foo'), ('foo', 'contains'))
self.assertEqual(sq.split_expression('foo__exact'), ('foo', 'exact'))
self.assertEqual(sq.split_expression('foo__contains'), ('foo', 'contains'))
self.assertEqual(sq.split_expression('foo__lt'), ('foo', 'lt'))
self.assertEqual(sq.split_expression('foo__lte'), ('foo', 'lte'))
self.assertEqual(sq.split_expression('foo__gt'), ('foo', 'gt'))
self.assertEqual(sq.split_expression('foo__gte'), ('foo', 'gte'))
self.assertEqual(sq.split_expression('foo__in'), ('foo', 'in'))
self.assertEqual(sq.split_expression('foo__startswith'), ('foo', 'startswith'))
self.assertEqual(sq.split_expression('foo__range'), ('foo', 'range'))
# Unrecognized filter. Fall back to exact.
self.assertEqual(sq.split_expression('foo__moof'), ('foo', 'contains'))
def test_repr(self):
self.assertEqual(repr(SQ(foo='bar')), '<SQ: AND foo__contains=bar>')
self.assertEqual(repr(SQ(foo=1)), '<SQ: AND foo__contains=1>')
self.assertEqual(repr(SQ(foo=datetime.datetime(2009, 5, 12, 23, 17))), '<SQ: AND foo__contains=2009-05-12 23:17:00>')
def test_simple_nesting(self):
sq1 = SQ(foo='bar')
sq2 = SQ(foo='bar')
bigger_sq = SQ(sq1 & sq2)
self.assertEqual(repr(bigger_sq), '<SQ: AND (foo__contains=bar AND foo__contains=bar)>')
another_bigger_sq = SQ(sq1 | sq2)
self.assertEqual(repr(another_bigger_sq), '<SQ: AND (foo__contains=bar OR foo__contains=bar)>')
one_more_bigger_sq = SQ(sq1 & ~sq2)
self.assertEqual(repr(one_more_bigger_sq), '<SQ: AND (foo__contains=bar AND NOT (foo__contains=bar))>')
mega_sq = SQ(bigger_sq & SQ(another_bigger_sq | ~one_more_bigger_sq))
self.assertEqual(repr(mega_sq), '<SQ: AND ((foo__contains=bar AND foo__contains=bar) AND ((foo__contains=bar OR foo__contains=bar) OR NOT ((foo__contains=bar AND NOT (foo__contains=bar)))))>')
class BaseSearchQueryTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(BaseSearchQueryTestCase, self).setUp()
self.bsq = BaseSearchQuery()
def test_get_count(self):
self.bsq.add_filter(SQ(foo='bar'))
self.assertRaises(NotImplementedError, self.bsq.get_count)
def test_build_query(self):
self.bsq.add_filter(SQ(foo='bar'))
self.assertRaises(NotImplementedError, self.bsq.build_query)
def test_add_filter(self):
self.assertEqual(len(self.bsq.query_filter), 0)
self.bsq.add_filter(SQ(foo='bar'))
self.assertEqual(len(self.bsq.query_filter), 1)
self.bsq.add_filter(SQ(foo__lt='10'))
self.bsq.add_filter(~SQ(claris='moof'))
self.bsq.add_filter(SQ(claris='moof'), use_or=True)
self.assertEqual(repr(self.bsq.query_filter), '<SQ: OR ((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof)>')
self.bsq.add_filter(SQ(claris='moof'))
self.assertEqual(repr(self.bsq.query_filter), '<SQ: AND (((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof) AND claris__contains=moof)>')
self.bsq.add_filter(SQ(claris='wtf mate'))
self.assertEqual(repr(self.bsq.query_filter), '<SQ: AND (((foo__contains=bar AND foo__lt=10 AND NOT (claris__contains=moof)) OR claris__contains=moof) AND claris__contains=moof AND claris__contains=wtf mate)>')
def test_add_order_by(self):
self.assertEqual(len(self.bsq.order_by), 0)
self.bsq.add_order_by('foo')
self.assertEqual(len(self.bsq.order_by), 1)
def test_clear_order_by(self):
self.bsq.add_order_by('foo')
self.assertEqual(len(self.bsq.order_by), 1)
self.bsq.clear_order_by()
self.assertEqual(len(self.bsq.order_by), 0)
def test_add_model(self):
self.assertEqual(len(self.bsq.models), 0)
self.assertRaises(AttributeError, self.bsq.add_model, object)
self.assertEqual(len(self.bsq.models), 0)
self.bsq.add_model(MockModel)
self.assertEqual(len(self.bsq.models), 1)
self.bsq.add_model(AnotherMockModel)
self.assertEqual(len(self.bsq.models), 2)
def test_set_limits(self):
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
def test_clear_limits(self):
self.bsq.set_limits(10, 50)
self.assertEqual(self.bsq.start_offset, 10)
self.assertEqual(self.bsq.end_offset, 50)
self.bsq.clear_limits()
self.assertEqual(self.bsq.start_offset, 0)
self.assertEqual(self.bsq.end_offset, None)
def test_add_boost(self):
self.assertEqual(self.bsq.boost, {})
self.bsq.add_boost('foo', 10)
self.assertEqual(self.bsq.boost, {'foo': 10})
def test_add_highlight(self):
self.assertEqual(self.bsq.highlight, False)
self.bsq.add_highlight()
self.assertEqual(self.bsq.highlight, True)
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
msq = MockSearchQuery()
msq.backend = MockSearchBackend('mlt')
ui = connections['default'].get_unified_index()
bmmsi = BasicMockModelSearchIndex()
ui.build(indexes=[bmmsi])
bmmsi.update()
msq.more_like_this(mock)
self.assertEqual(msq.get_count(), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
def test_add_field_facet(self):
self.bsq.add_field_facet('foo')
self.assertEqual(self.bsq.facets, {'foo': {}})
self.bsq.add_field_facet('bar')
self.assertEqual(self.bsq.facets, {'foo': {}, 'bar': {}})
def test_add_date_facet(self):
self.bsq.add_date_facet('foo', start_date=datetime.date(2009, 2, 25), end_date=datetime.date(2009, 3, 25), gap_by='day')
self.assertEqual(self.bsq.date_facets, {'foo': {'gap_by': 'day', 'start_date': datetime.date(2009, 2, 25), 'end_date': datetime.date(2009, 3, 25), 'gap_amount': 1}})
self.bsq.add_date_facet('bar', start_date=datetime.date(2008, 1, 1), end_date=datetime.date(2009, 12, 1), gap_by='month')
self.assertEqual(self.bsq.date_facets, {'foo': {'gap_by': 'day', 'start_date': datetime.date(2009, 2, 25), 'end_date': datetime.date(2009, 3, 25), 'gap_amount': 1}, 'bar': {'gap_by': 'month', 'start_date': datetime.date(2008, 1, 1), 'end_date': datetime.date(2009, 12, 1), 'gap_amount': 1}})
def test_add_query_facet(self):
self.bsq.add_query_facet('foo', 'bar')
self.assertEqual(self.bsq.query_facets, [('foo', 'bar')])
self.bsq.add_query_facet('moof', 'baz')
self.assertEqual(self.bsq.query_facets, [('foo', 'bar'), ('moof', 'baz')])
self.bsq.add_query_facet('foo', 'baz')
self.assertEqual(self.bsq.query_facets, [('foo', 'bar'), ('moof', 'baz'), ('foo', 'baz')])
def test_add_stats(self):
self.bsq.add_stats_query('foo',['bar'])
self.assertEqual(self.bsq.stats,{'foo':['bar']})
self.bsq.add_stats_query('moof',['bar','baz'])
self.assertEqual(self.bsq.stats,{'foo':['bar'],'moof':['bar','baz']})
def test_add_narrow_query(self):
self.bsq.add_narrow_query('foo:bar')
self.assertEqual(self.bsq.narrow_queries, set(['foo:bar']))
self.bsq.add_narrow_query('moof:baz')
self.assertEqual(self.bsq.narrow_queries, set(['foo:bar', 'moof:baz']))
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
self.bsq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.bsq.result_class, IttyBittyResult))
# Reset to default.
self.bsq.set_result_class(None)
self.assertTrue(issubclass(self.bsq.result_class, SearchResult))
def test_run(self):
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
msq = connections['default'].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(int(msq.get_results()[0].pk), MOCK_SEARCH_RESULTS[0].pk)
# Restore.
connections['default']._index = self.old_unified_index
def test_clone(self):
self.bsq.add_filter(SQ(foo='bar'))
self.bsq.add_filter(SQ(foo__lt='10'))
self.bsq.add_filter(~SQ(claris='moof'))
self.bsq.add_filter(SQ(claris='moof'), use_or=True)
self.bsq.add_order_by('foo')
self.bsq.add_model(MockModel)
self.bsq.add_boost('foo', 2)
self.bsq.add_highlight()
self.bsq.add_field_facet('foo')
self.bsq.add_date_facet('foo', start_date=datetime.date(2009, 1, 1), end_date=datetime.date(2009, 1, 31), gap_by='day')
self.bsq.add_query_facet('foo', 'bar')
self.bsq.add_stats_query('foo', 'bar')
self.bsq.add_narrow_query('foo:bar')
clone = self.bsq._clone()
self.assertTrue(isinstance(clone, BaseSearchQuery))
self.assertEqual(len(clone.query_filter), 2)
self.assertEqual(len(clone.order_by), 1)
self.assertEqual(len(clone.models), 1)
self.assertEqual(len(clone.boost), 1)
self.assertEqual(clone.highlight, True)
self.assertEqual(len(clone.facets), 1)
self.assertEqual(len(clone.date_facets), 1)
self.assertEqual(len(clone.query_facets), 1)
self.assertEqual(len(clone.narrow_queries), 1)
self.assertEqual(clone.start_offset, self.bsq.start_offset)
self.assertEqual(clone.end_offset, self.bsq.end_offset)
self.assertEqual(clone.backend.__class__, self.bsq.backend.__class__)
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
self.bmmsi.update()
with self.settings(DEBUG=False):
msq = connections['default'].get_query()
self.assertEqual(len(msq.get_results()), 23)
self.assertEqual(len(connections['default'].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
msq2 = connections['default'].get_query()
self.assertEqual(len(msq2.get_results()), 23)
self.assertEqual(len(connections['default'].queries), 1)
self.assertEqual(connections['default'].queries[0]['query_string'], '')
msq3 = connections['default'].get_query()
msq3.add_filter(SQ(foo='bar'))
len(msq3.get_results())
self.assertEqual(len(connections['default'].queries), 2)
self.assertEqual(connections['default'].queries[0]['query_string'], '')
self.assertEqual(connections['default'].queries[1]['query_string'], '')
# Restore.
connections['default']._index = self.old_unified_index
class CharPKMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='key')
def get_model(self):
return CharPKMockModel
@override_settings(DEBUG=True)
class SearchQuerySetTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SearchQuerySetTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections['default']._index = self.old_unified_index
super(SearchQuerySetTestCase, self).tearDown()
def test_len(self):
self.assertEqual(len(self.msqs), 23)
def test_repr(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
self.assertEqual(repr(self.msqs).replace("u'", "'"), "[<SearchResult: core.mockmodel (pk='1')>, <SearchResult: core.mockmodel (pk='2')>, <SearchResult: core.mockmodel (pk='3')>, <SearchResult: core.mockmodel (pk='4')>, <SearchResult: core.mockmodel (pk='5')>, <SearchResult: core.mockmodel (pk='6')>, <SearchResult: core.mockmodel (pk='7')>, <SearchResult: core.mockmodel (pk='8')>, <SearchResult: core.mockmodel (pk='9')>, <SearchResult: core.mockmodel (pk='10')>, <SearchResult: core.mockmodel (pk='11')>, <SearchResult: core.mockmodel (pk='12')>, <SearchResult: core.mockmodel (pk='13')>, <SearchResult: core.mockmodel (pk='14')>, <SearchResult: core.mockmodel (pk='15')>, <SearchResult: core.mockmodel (pk='16')>, <SearchResult: core.mockmodel (pk='17')>, <SearchResult: core.mockmodel (pk='18')>, <SearchResult: core.mockmodel (pk='19')>, '...(remaining elements truncated)...']")
self.assertEqual(len(connections['default'].queries), 1)
def test_iter(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
msqs = self.msqs.all()
results = [int(res.pk) for res in msqs]
self.assertEqual(results, [res.pk for res in MOCK_SEARCH_RESULTS[:23]])
self.assertEqual(len(connections['default'].queries), 3)
def test_slice(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.msqs.all()
self.assertEqual([int(res.pk) for res in results[1:11]], [res.pk for res in MOCK_SEARCH_RESULTS[1:11]])
self.assertEqual(len(connections['default'].queries), 1)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.msqs.all()
self.assertEqual(int(results[22].pk), MOCK_SEARCH_RESULTS[22].pk)
self.assertEqual(len(connections['default'].queries), 1)
def test_manual_iter(self):
results = self.msqs.all()
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
check = [result.pk for result in results._manual_iter()]
self.assertEqual(check, [u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8', u'9', u'10', u'11', u'12', u'13', u'14', u'15', u'16', u'17', u'18', u'19', u'20', u'21', u'22', u'23'])
self.assertEqual(len(connections['default'].queries), 3)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
# This will hang indefinitely if broken.
old_ui = self.ui
self.ui.build(indexes=[self.cpkmmsi])
connections['default']._index = self.ui
self.cpkmmsi.update()
results = self.msqs.all()
loaded = [result.pk for result in results._manual_iter()]
self.assertEqual(loaded, [u'sometext', u'1234'])
self.assertEqual(len(connections['default'].queries), 1)
connections['default']._index = old_ui
def test_fill_cache(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
results = self.msqs.all()
self.assertEqual(len(results._result_cache), 0)
self.assertEqual(len(connections['default'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 10)
self.assertEqual(len(connections['default'].queries), 1)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
self.assertEqual(len(connections['default'].queries), 2)
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
# Test to ensure we properly fill the cache, even if we get fewer
# results back (not a handled model) than the hit count indicates.
sqs = SearchQuerySet().all()
sqs.query.backend = MixedMockSearchBackend('default')
results = sqs
self.assertEqual(len([result for result in results._result_cache if result is not None]), 0)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [])
self.assertEqual(len(connections['default'].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 9)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10])
self.assertEqual(len(connections['default'].queries), 2)
results._fill_cache(10, 20)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 17)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20])
self.assertEqual(len(connections['default'].queries), 4)
results._fill_cache(20, 30)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 20)
self.assertEqual([int(result.pk) for result in results._result_cache if result is not None], [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(len(connections['default'].queries), 6)
def test_cache_is_full(self):
reset_search_queries()
self.assertEqual(len(connections['default'].queries), 0)
self.assertEqual(self.msqs._cache_is_full(), False)
results = self.msqs.all()
fire_the_iterator_and_fill_cache = [result for result in results]
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections['default'].queries), 3)
def test_all(self):
sqs = self.msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
def test_filter(self):
sqs = self.msqs.filter(content='foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_exclude(self):
sqs = self.msqs.exclude(content='foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_order_by(self):
sqs = self.msqs.order_by('foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertTrue('foo' in sqs.query.order_by)
def test_models(self):
# Stow.
old_unified_index = connections['default']._index
ui = UnifiedIndex()
bmmsi = BasicMockModelSearchIndex()
bammsi = BasicAnotherMockModelSearchIndex()
ui.build(indexes=[bmmsi, bammsi])
connections['default']._index = ui
msqs = SearchQuerySet()
sqs = msqs.all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 0)
sqs = msqs.models(MockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
sqs = msqs.models(MockModel, AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 2)
# This will produce a warning.
ui.build(indexes=[bmmsi])
sqs = msqs.models(AnotherMockModel)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.models), 1)
def test_result_class(self):
sqs = self.msqs.all()
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
# Custom class.
class IttyBittyResult(object):
pass
sqs = self.msqs.result_class(IttyBittyResult)
self.assertTrue(issubclass(sqs.query.result_class, IttyBittyResult))
# Reset to default.
sqs = self.msqs.result_class(None)
self.assertTrue(issubclass(sqs.query.result_class, SearchResult))
def test_boost(self):
sqs = self.msqs.boost('foo', 10)
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.boost.keys()), 1)
def test_highlight(self):
sqs = self.msqs.highlight()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.query.highlight, True)
def test_spelling(self):
# Test the case where spelling support is disabled.
sqs = self.msqs.filter(content='Indx')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(sqs.spelling_suggestion(), None)
self.assertEqual(sqs.spelling_suggestion('indexy'), None)
def test_raw_search(self):
self.assertEqual(len(self.msqs.raw_search('foo')), 23)
self.assertEqual(len(self.msqs.raw_search('(content__exact:hello AND content__exact:world)')), 23)
def test_load_all(self):
# Models with character primary keys.
sqs = SearchQuerySet()
sqs.query.backend = CharPKMockSearchBackend('charpk')
results = sqs.load_all().all()
self.assertEqual(len(results._result_cache), 0)
results._fill_cache(0, 2)
self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)
# If nothing is handled, you get nothing.
old_ui = connections['default']._index
ui = UnifiedIndex()
ui.build(indexes=[])
connections['default']._index = ui
sqs = self.msqs.load_all()
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs), 0)
connections['default']._index = old_ui
# For full tests, see the solr_backend.
def test_load_all_read_queryset(self):
# Stow.
old_ui = connections['default']._index
ui = UnifiedIndex()
gafmmsi = GhettoAFifthMockModelSearchIndex()
ui.build(indexes=[gafmmsi])
connections['default']._index = ui
gafmmsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend('default')
results._fill_cache(0, 2)
# The deleted result isn't returned
self.assertEqual(len([result for result in results._result_cache if result is not None]), 1)
# Register a SearchIndex with a read_queryset that returns deleted items
rqstsi = TextReadQuerySetTestSearchIndex()
ui.build(indexes=[rqstsi])
rqstsi.update()
sqs = SearchQuerySet()
results = sqs.load_all().all()
results.query.backend = ReadQuerySetMockSearchBackend('default')
results._fill_cache(0, 2)
# Both the deleted and not deleted items are returned
self.assertEqual(len([result for result in results._result_cache if result is not None]), 2)
# Restore.
connections['default']._index = old_ui
def test_auto_query(self):
sqs = self.msqs.auto_query('test search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test search -stuff>')
sqs = self.msqs.auto_query('test "my thing" search -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search -stuff>')
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' -stuff>')
sqs = self.msqs.auto_query('test "my thing" search \'moar quotes\' "foo -stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains=test "my thing" search \'moar quotes\' "foo -stuff>')
sqs = self.msqs.auto_query('test - stuff')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND content__contains=test - stuff>")
# Ensure bits in exact matches get escaped properly as well.
sqs = self.msqs.auto_query('"pants:rule"')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND content__contains="pants:rule">')
# Now with a different fieldname
sqs = self.msqs.auto_query('test search -stuff', fieldname='title')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), "<SQ: AND title__contains=test search -stuff>")
sqs = self.msqs.auto_query('test "my thing" search -stuff', fieldname='title')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(repr(sqs.query.query_filter), '<SQ: AND title__contains=test "my thing" search -stuff>')
def test_count(self):
self.assertEqual(self.msqs.count(), 23)
def test_facet_counts(self):
self.assertEqual(self.msqs.facet_counts(), {})
def test_best_match(self):
self.assertTrue(isinstance(self.msqs.best_match(), SearchResult))
def test_latest(self):
self.assertTrue(isinstance(self.msqs.latest('pub_date'), SearchResult))
def test_more_like_this(self):
mock = MockModel()
mock.id = 1
self.assertEqual(len(self.msqs.more_like_this(mock)), 23)
def test_facets(self):
sqs = self.msqs.facet('foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.facets), 1)
sqs2 = self.msqs.facet('foo').facet('bar')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.facets), 2)
def test_date_facets(self):
try:
sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='smarblaph')
self.fail()
except FacetingError as e:
self.assertEqual(str(e), "The gap_by ('smarblaph') must be one of the following: year, month, day, hour, minute, second.")
sqs = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.date_facets), 1)
sqs2 = self.msqs.date_facet('foo', start_date=datetime.date(2008, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='month').date_facet('bar', start_date=datetime.date(2007, 2, 25), end_date=datetime.date(2009, 2, 25), gap_by='year')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.date_facets), 2)
def test_query_facets(self):
sqs = self.msqs.query_facet('foo', '[bar TO *]')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_facets), 1)
sqs2 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.query_facets), 2)
# Test multiple query facets on a single field
sqs3 = self.msqs.query_facet('foo', '[bar TO *]').query_facet('bar', '[100 TO 499]').query_facet('foo', '[1000 TO 1499]')
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.query_facets), 3)
def test_stats(self):
sqs = self.msqs.stats_facet('foo','bar')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.stats),1)
sqs2 = self.msqs.stats_facet('foo','bar').stats_facet('foo','baz')
self.assertTrue(isinstance(sqs2, SearchQuerySet))
self.assertEqual(len(sqs2.query.stats),1)
sqs3 = self.msqs.stats_facet('foo','bar').stats_facet('moof','baz')
self.assertTrue(isinstance(sqs3, SearchQuerySet))
self.assertEqual(len(sqs3.query.stats),2)
def test_narrow(self):
sqs = self.msqs.narrow('foo:moof')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
def test_clone(self):
results = self.msqs.filter(foo='bar', foo__lt='10')
clone = results._clone()
self.assertTrue(isinstance(clone, SearchQuerySet))
self.assertEqual(str(clone.query), str(results.query))
self.assertEqual(clone._result_cache, [])
self.assertEqual(clone._result_count, None)
self.assertEqual(clone._cache_full, False)
self.assertEqual(clone._using, results._using)
def test_using(self):
sqs = SearchQuerySet(using='default')
self.assertNotEqual(sqs.query, None)
self.assertEqual(sqs.query._using, 'default')
def test_chaining(self):
sqs = self.msqs.filter(content='foo')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
# A second instance should inherit none of the changes from above.
sqs = self.msqs.filter(content='bar')
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 1)
def test_none(self):
sqs = self.msqs.none()
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test___and__(self):
sqs1 = self.msqs.filter(content='foo')
sqs2 = self.msqs.filter(content='bar')
sqs = sqs1 & sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test___or__(self):
sqs1 = self.msqs.filter(content='foo')
sqs2 = self.msqs.filter(content='bar')
sqs = sqs1 | sqs2
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.query_filter), 2)
def test_and_or(self):
"""
Combining AND queries with OR should give
AND(OR(a, b), OR(c, d))
"""
sqs1 = self.msqs.filter(content='foo').filter(content='oof')
sqs2 = self.msqs.filter(content='bar').filter(content='rab')
sqs = sqs1 | sqs2
self.assertEqual(sqs.query.query_filter.connector, 'OR')
self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter))
self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
def test_or_and(self):
"""
Combining OR queries with AND should give
OR(AND(a, b), AND(c, d))
"""
sqs1 = self.msqs.filter(content='foo').filter_or(content='oof')
sqs2 = self.msqs.filter(content='bar').filter_or(content='rab')
sqs = sqs1 & sqs2
self.assertEqual(sqs.query.query_filter.connector, 'AND')
self.assertEqual(repr(sqs.query.query_filter.children[0]), repr(sqs1.query.query_filter))
self.assertEqual(repr(sqs.query.query_filter.children[1]), repr(sqs2.query.query_filter))
class ValuesQuerySetTestCase(SearchQuerySetTestCase):
def test_values_sqs(self):
sqs = self.msqs.auto_query("test").values("id")
self.assert_(isinstance(sqs, ValuesSearchQuerySet))
# We'll do a basic test to confirm that slicing works as expected:
self.assert_(isinstance(sqs[0], dict))
self.assert_(isinstance(sqs[0:5][0], dict))
def test_valueslist_sqs(self):
sqs = self.msqs.auto_query("test").values_list("id")
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
self.assert_(isinstance(sqs[0], (list, tuple)))
self.assert_(isinstance(sqs[0:1][0], (list, tuple)))
self.assertRaises(TypeError, self.msqs.auto_query("test").values_list, "id", "score", flat=True)
flat_sqs = self.msqs.auto_query("test").values_list("id", flat=True)
self.assert_(isinstance(sqs, ValuesListSearchQuerySet))
# Note that this will actually be None because a mocked sqs lacks
# anything else:
self.assert_(flat_sqs[0] is None)
self.assert_(flat_sqs[0:1][0] is None)
class EmptySearchQuerySetTestCase(TestCase):
def setUp(self):
super(EmptySearchQuerySetTestCase, self).setUp()
self.esqs = EmptySearchQuerySet()
def test_get_count(self):
self.assertEqual(self.esqs.count(), 0)
self.assertEqual(len(self.esqs.all()), 0)
def test_filter(self):
sqs = self.esqs.filter(content='foo')
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_exclude(self):
sqs = self.esqs.exclude(content='foo')
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
def test_slice(self):
sqs = self.esqs.filter(content='foo')
self.assertTrue(isinstance(sqs, EmptySearchQuerySet))
self.assertEqual(len(sqs), 0)
self.assertEqual(sqs[:10], [])
try:
sqs[4]
self.fail()
except IndexError:
pass
def test_dictionary_lookup(self):
"""
Ensure doing a dictionary lookup raises a TypeError so
EmptySearchQuerySets can be used in templates.
"""
self.assertRaises(TypeError, lambda: self.esqs['count'])
@unittest.skipUnless(test_pickling, 'Skipping pickling tests')
@override_settings(DEBUG=True)
class PickleSearchQuerySetTestCase(TestCase):
def setUp(self):
super(PickleSearchQuerySetTestCase, self).setUp()
# Stow.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections['default']._index = self.ui
# Update the "index".
backend = connections['default'].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections['default']._index = self.old_unified_index
super(PickleSearchQuerySetTestCase, self).tearDown()
def test_pickling(self):
results = self.msqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
| {
"content_hash": "90766231deda1e70f2be67e1b4062f59",
"timestamp": "",
"source": "github",
"line_count": 888,
"max_line_length": 893,
"avg_line_length": 42.236486486486484,
"alnum_prop": 0.6397376419772837,
"repo_name": "comandrei/django-haystack",
"id": "c02edb20c58c5a781c4acba25aca55a2a0f5807b",
"size": "37530",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test_haystack/test_query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "752584"
},
{
"name": "Shell",
"bytes": "1809"
}
],
"symlink_target": ""
} |
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import ArrayType, TimestampType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return float(v + 1)
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf(ArrayType(TimestampType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v)).sort('plus_one(id)')
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v)).sort('plus_one(id)')
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
# groupby one expression and one python UDF
result6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum_udf(df.v)).sort(['(v % 2)', 'plus_one(id)']))
expected6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum(df.v)).sort(['(v % 2)', 'plus_one(id)']))
# groupby one expression and one scalar pandas UDF
result7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum_udf(df.v)).sort(['sum(v)', 'plus_two(id)']))
expected7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum(df.v)).sort(['sum(v)', 'plus_two(id)']))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEqual(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(),
'int', PandasUDFType.GROUPED_AGG)
result = df.groupBy('id').agg(f(df['x']).alias('sum')).collect()
self.assertEqual(result, expected)
def test_grouped_without_group_by_clause(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max_udf(v):
return v.max()
df = self.spark.range(0, 100)
self.spark.udf.register('max_udf', max_udf)
with self.tempView("table"):
df.createTempView('table')
agg1 = df.agg(max_udf(df['id']))
agg2 = self.spark.sql("select max_udf(id) from table")
assert_frame_equal(agg1.toPandas(), agg2.toPandas())
def test_no_predicate_pushdown_through(self):
# SPARK-30921: We should not pushdown predicates of PythonUDFs through Aggregate.
import numpy as np
@pandas_udf('float', PandasUDFType.GROUPED_AGG)
def mean(x):
return np.mean(x)
df = self.spark.createDataFrame([
Row(id=1, foo=42), Row(id=2, foo=1), Row(id=2, foo=2)
])
agg = df.groupBy('id').agg(mean('foo').alias("mean"))
filtered = agg.filter(agg['mean'] > 40.0)
assert(filtered.collect()[0]["mean"] == 42.0)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| {
"content_hash": "5d89de351dcd03289f12954b89829a8c",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 98,
"avg_line_length": 39.86166007905138,
"alnum_prop": 0.5287555775904809,
"repo_name": "wangmiao1981/spark",
"id": "0c78855cd9123da03708c97bfe0c2720f86da7ed",
"size": "20955",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50024"
},
{
"name": "Batchfile",
"bytes": "31352"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26836"
},
{
"name": "Dockerfile",
"bytes": "9014"
},
{
"name": "HTML",
"bytes": "41387"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4123699"
},
{
"name": "JavaScript",
"bytes": "203741"
},
{
"name": "Makefile",
"bytes": "7776"
},
{
"name": "PLpgSQL",
"bytes": "380540"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3130521"
},
{
"name": "R",
"bytes": "1186948"
},
{
"name": "Roff",
"bytes": "21950"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "31710454"
},
{
"name": "Shell",
"bytes": "203944"
},
{
"name": "TSQL",
"bytes": "466993"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
from distutils.core import setup, Extension;
# matey extension module
matey_module = Extension(
"_matey",
extra_compile_args=["-Wno-unused-variable"],
sources=["src/mateymodule.c", "src/matrix.c"]
);
# distribution
setup(name="matey",
version="0.0",
description="Numerical linear algebra for pyrates, aaarrggh!",
author="Benjamin R. Bray",
url="https://github.com/benrbray/matey",
ext_modules=[matey_module],
packages=["matey"]);
| {
"content_hash": "013d831ab0d3e733fd32ece00d7f41e4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 26.647058823529413,
"alnum_prop": 0.6975717439293598,
"repo_name": "benrbray/matey",
"id": "e659fa554e3c56cab7cdf36143ecb3ed2793298f",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matey_pyc/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23536"
},
{
"name": "C++",
"bytes": "748"
},
{
"name": "Jupyter Notebook",
"bytes": "9980"
},
{
"name": "Makefile",
"bytes": "262"
},
{
"name": "Python",
"bytes": "6762"
}
],
"symlink_target": ""
} |
"""Utility functions for X509.
"""
import calendar
import datetime
import errno
import logging
import re
import time
import OpenSSL
from ganeti import errors
from ganeti import constants
from ganeti import pathutils
from ganeti.utils import text as utils_text
from ganeti.utils import io as utils_io
from ganeti.utils import hash as utils_hash
HEX_CHAR_RE = r"[a-zA-Z0-9]"
VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
(re.escape(constants.X509_CERT_SIGNATURE_HEADER),
HEX_CHAR_RE, HEX_CHAR_RE),
re.S | re.I)
X509_CERT_SIGN_DIGEST = "SHA1"
# Certificate verification results
(CERT_WARNING,
CERT_ERROR) = range(1, 3)
#: ASN1 time regexp
_ASN1_TIME_REGEX = re.compile(r"^(\d+)([-+]\d\d)(\d\d)$")
def _ParseAsn1Generalizedtime(value):
"""Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
@type value: string
@param value: ASN1 GENERALIZEDTIME timestamp
@return: Seconds since the Epoch (1970-01-01 00:00:00 UTC)
"""
m = _ASN1_TIME_REGEX.match(value)
if m:
# We have an offset
asn1time = m.group(1)
hours = int(m.group(2))
minutes = int(m.group(3))
utcoffset = (60 * hours) + minutes
else:
if not value.endswith("Z"):
raise ValueError("Missing timezone")
asn1time = value[:-1]
utcoffset = 0
parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
return calendar.timegm(tt.utctimetuple())
def GetX509CertValidity(cert):
"""Returns the validity period of the certificate.
@type cert: OpenSSL.crypto.X509
@param cert: X509 certificate object
"""
# The get_notBefore and get_notAfter functions are only supported in
# pyOpenSSL 0.7 and above.
try:
get_notbefore_fn = cert.get_notBefore
except AttributeError:
not_before = None
else:
not_before_asn1 = get_notbefore_fn()
if not_before_asn1 is None:
not_before = None
else:
not_before = _ParseAsn1Generalizedtime(not_before_asn1)
try:
get_notafter_fn = cert.get_notAfter
except AttributeError:
not_after = None
else:
not_after_asn1 = get_notafter_fn()
if not_after_asn1 is None:
not_after = None
else:
not_after = _ParseAsn1Generalizedtime(not_after_asn1)
return (not_before, not_after)
def _VerifyCertificateInner(expired, not_before, not_after, now,
warn_days, error_days):
"""Verifies certificate validity.
@type expired: bool
@param expired: Whether pyOpenSSL considers the certificate as expired
@type not_before: number or None
@param not_before: Unix timestamp before which certificate is not valid
@type not_after: number or None
@param not_after: Unix timestamp after which certificate is invalid
@type now: number
@param now: Current time as Unix timestamp
@type warn_days: number or None
@param warn_days: How many days before expiration a warning should be reported
@type error_days: number or None
@param error_days: How many days before expiration an error should be reported
"""
if expired:
msg = "Certificate is expired"
if not_before is not None and not_after is not None:
msg += (" (valid from %s to %s)" %
(utils_text.FormatTime(not_before),
utils_text.FormatTime(not_after)))
elif not_before is not None:
msg += " (valid from %s)" % utils_text.FormatTime(not_before)
elif not_after is not None:
msg += " (valid until %s)" % utils_text.FormatTime(not_after)
return (CERT_ERROR, msg)
elif not_before is not None and not_before > now:
return (CERT_WARNING,
"Certificate not yet valid (valid from %s)" %
utils_text.FormatTime(not_before))
elif not_after is not None:
remaining_days = int((not_after - now) / (24 * 3600))
msg = "Certificate expires in about %d days" % remaining_days
if error_days is not None and remaining_days <= error_days:
return (CERT_ERROR, msg)
if warn_days is not None and remaining_days <= warn_days:
return (CERT_WARNING, msg)
return (None, None)
def VerifyX509Certificate(cert, warn_days, error_days):
"""Verifies a certificate for LUClusterVerify.
@type cert: OpenSSL.crypto.X509
@param cert: X509 certificate object
@type warn_days: number or None
@param warn_days: How many days before expiration a warning should be reported
@type error_days: number or None
@param error_days: How many days before expiration an error should be reported
"""
# Depending on the pyOpenSSL version, this can just return (None, None)
(not_before, not_after) = GetX509CertValidity(cert)
now = time.time() + constants.NODE_MAX_CLOCK_SKEW
return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
now, warn_days, error_days)
def SignX509Certificate(cert, key, salt):
"""Sign a X509 certificate.
An RFC822-like signature header is added in front of the certificate.
@type cert: OpenSSL.crypto.X509
@param cert: X509 certificate object
@type key: string
@param key: Key for HMAC
@type salt: string
@param salt: Salt for HMAC
@rtype: string
@return: Serialized and signed certificate in PEM format
"""
if not VALID_X509_SIGNATURE_SALT.match(salt):
raise errors.GenericError("Invalid salt: %r" % salt)
# Dumping as PEM here ensures the certificate is in a sane format
cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
return ("%s: %s/%s\n\n%s" %
(constants.X509_CERT_SIGNATURE_HEADER, salt,
utils_hash.Sha1Hmac(key, cert_pem, salt=salt),
cert_pem))
def _ExtractX509CertificateSignature(cert_pem):
"""Helper function to extract signature from X509 certificate.
"""
# Extract signature from original PEM data
for line in cert_pem.splitlines():
if line.startswith("---"):
break
m = X509_SIGNATURE.match(line.strip())
if m:
return (m.group("salt"), m.group("sign"))
raise errors.GenericError("X509 certificate signature is missing")
def LoadSignedX509Certificate(cert_pem, key):
"""Verifies a signed X509 certificate.
@type cert_pem: string
@param cert_pem: Certificate in PEM format and with signature header
@type key: string
@param key: Key for HMAC
@rtype: tuple; (OpenSSL.crypto.X509, string)
@return: X509 certificate object and salt
"""
(salt, signature) = _ExtractX509CertificateSignature(cert_pem)
# Load and dump certificate to ensure it's in a sane format
(cert, sane_pem) = ExtractX509Certificate(cert_pem)
if not utils_hash.VerifySha1Hmac(key, sane_pem, signature, salt=salt):
raise errors.GenericError("X509 certificate signature is invalid")
return (cert, salt)
def GenerateSelfSignedX509Cert(common_name, validity, serial_no):
"""Generates a self-signed X509 certificate.
@type common_name: string
@param common_name: commonName value
@type validity: int
@param validity: Validity for certificate in seconds
@return: a tuple of strings containing the PEM-encoded private key and
certificate
"""
# Create private and public key
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
# Create self-signed certificate
cert = OpenSSL.crypto.X509()
if common_name:
cert.get_subject().CN = common_name
cert.set_serial_number(serial_no)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(validity)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(key)
cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
return (key_pem, cert_pem)
def GenerateSelfSignedSslCert(filename, serial_no,
common_name=constants.X509_CERT_CN,
validity=constants.X509_CERT_DEFAULT_VALIDITY,
uid=-1, gid=-1):
"""Legacy function to generate self-signed X509 certificate.
@type filename: str
@param filename: path to write certificate to
@type common_name: string
@param common_name: commonName value
@type validity: int
@param validity: validity of certificate in number of days
@type uid: int
@param uid: the user ID of the user who will be owner of the certificate file
@type gid: int
@param gid: the group ID of the group who will own the certificate file
@return: a tuple of strings containing the PEM-encoded private key and
certificate
"""
# TODO: Investigate using the cluster name instead of X505_CERT_CN for
# common_name, as cluster-renames are very seldom, and it'd be nice if RAPI
# and node daemon certificates have the proper Subject/Issuer.
(key_pem, cert_pem) = GenerateSelfSignedX509Cert(
common_name, validity * 24 * 60 * 60, serial_no)
utils_io.WriteFile(filename, mode=0440, data=key_pem + cert_pem,
uid=uid, gid=gid)
return (key_pem, cert_pem)
def GenerateSignedX509Cert(common_name, validity, serial_no,
signing_cert_pem):
"""Generates a signed (but not self-signed) X509 certificate.
@type common_name: string
@param common_name: commonName value, should be hostname of the machine
@type validity: int
@param validity: Validity for certificate in seconds
@type signing_cert_pem: X509 key
@param signing_cert_pem: PEM-encoded private key of the signing certificate
@return: a tuple of strings containing the PEM-encoded private key and
certificate
"""
# Create key pair with private and public key.
key_pair = OpenSSL.crypto.PKey()
key_pair.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
# Create certificate sigining request.
req = OpenSSL.crypto.X509Req()
req.get_subject().CN = common_name
req.set_pubkey(key_pair)
req.sign(key_pair, X509_CERT_SIGN_DIGEST)
# Load the certificates used for signing.
signing_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM, signing_cert_pem)
signing_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, signing_cert_pem)
# Create a certificate and sign it.
cert = OpenSSL.crypto.X509()
cert.set_subject(req.get_subject())
cert.set_serial_number(serial_no)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(validity)
cert.set_issuer(signing_cert.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(signing_key, X509_CERT_SIGN_DIGEST)
# Encode the key and certificate in PEM format.
key_pem = OpenSSL.crypto.dump_privatekey(
OpenSSL.crypto.FILETYPE_PEM, key_pair)
cert_pem = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, cert)
return (key_pem, cert_pem)
def GenerateSignedSslCert(filename_cert, serial_no,
filename_signing_cert,
common_name=constants.X509_CERT_CN,
validity=constants.X509_CERT_DEFAULT_VALIDITY,
uid=-1, gid=-1):
signing_cert_pem = utils_io.ReadFile(filename_signing_cert)
(key_pem, cert_pem) = GenerateSignedX509Cert(
common_name, validity * 24 * 60 * 60, serial_no, signing_cert_pem)
utils_io.WriteFile(filename_cert, mode=0440, data=key_pem + cert_pem,
uid=uid, gid=gid, backup=True)
return (key_pem, cert_pem)
def ExtractX509Certificate(pem):
"""Extracts the certificate from a PEM-formatted string.
@type pem: string
@rtype: tuple; (OpenSSL.X509 object, string)
@return: Certificate object and PEM-formatted certificate
"""
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
return (cert,
OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
def X509CertKeyCheck(cert, key):
"""Function for verifying certificate with a certain private key.
@type key: OpenSSL.crypto.PKey
@param key: Private key object
@type cert: OpenSSL.crypto.X509
@param cert: X509 certificate object
@rtype: callable
@return: Callable doing the actual check; will raise C{OpenSSL.SSL.Error} if
certificate is not signed by given private key
"""
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
ctx.use_certificate(cert)
ctx.use_privatekey(key)
ctx.check_privatekey()
def CheckNodeCertificate(cert, _noded_cert_file=pathutils.NODED_CERT_FILE):
"""Checks the local node daemon certificate against given certificate.
Both certificates must be signed with the same key (as stored in the local
L{pathutils.NODED_CERT_FILE} file). No error is raised if no local
certificate can be found.
@type cert: OpenSSL.crypto.X509
@param cert: X509 certificate object
@raise errors.X509CertError: When an error related to X509 occurred
@raise errors.GenericError: When the verification failed
"""
try:
noded_pem = utils_io.ReadFile(_noded_cert_file)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
logging.debug("Node certificate file '%s' was not found", _noded_cert_file)
return
try:
noded_cert = \
OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, noded_pem)
except Exception, err:
raise errors.X509CertError(_noded_cert_file,
"Unable to load certificate: %s" % err)
try:
noded_key = \
OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, noded_pem)
except Exception, err:
raise errors.X509CertError(_noded_cert_file,
"Unable to load private key: %s" % err)
# Check consistency of server.pem file
try:
X509CertKeyCheck(noded_cert, noded_key)
except OpenSSL.SSL.Error:
# This should never happen as it would mean the certificate in server.pem
# is out of sync with the private key stored in the same file
raise errors.X509CertError(_noded_cert_file,
"Certificate does not match with private key")
# Check with supplied certificate with local key
try:
X509CertKeyCheck(cert, noded_key)
except OpenSSL.SSL.Error:
raise errors.GenericError("Given cluster certificate does not match"
" local key")
| {
"content_hash": "b70bc92f7bddd9b7e1b244d32d102a38",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 80,
"avg_line_length": 32.272930648769574,
"alnum_prop": 0.688201857756828,
"repo_name": "yiannist/ganeti",
"id": "2e4aa818400af51fdf4162591450832a4c990692",
"size": "15791",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/utils/x509.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2509723"
},
{
"name": "JavaScript",
"bytes": "8808"
},
{
"name": "M4",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "96586"
},
{
"name": "Python",
"bytes": "6231906"
},
{
"name": "Shell",
"bytes": "151065"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2015.01.20
'''
from top.api.base import RestApi
class UmpPromotionGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.channel_key = None
self.item_id = None
def getapiname(self):
return 'taobao.ump.promotion.get'
| {
"content_hash": "3ed9d341cf7afa02e6195e1d4b575c9a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6859756097560976,
"repo_name": "colaftc/webtool",
"id": "3d2c6c7fbb510bf20f1bc6f0a0867978ccbc910f",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top/api/rest/UmpPromotionGetRequest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12208"
},
{
"name": "HTML",
"bytes": "16773"
},
{
"name": "JavaScript",
"bytes": "2571"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "258023"
},
{
"name": "Ruby",
"bytes": "861"
},
{
"name": "VimL",
"bytes": "401921"
}
],
"symlink_target": ""
} |
"""
This defines the cmdset for the red_button. Here we have defined
the commands and the cmdset in the same module, but if you
have many different commands to merge it is often better
to define the cmdset separately, picking and choosing from
among the available commands as to what should be included in the
cmdset - this way you can often re-use the commands too.
"""
import random
from ev import Command, CmdSet
# Some simple commands for the red button
#------------------------------------------------------------
# Commands defined on the red button
#------------------------------------------------------------
class CmdNudge(Command):
"""
Try to nudge the button's lid
Usage:
nudge lid
This command will have you try to
push the lid of the button away.
"""
key = "nudge lid" # two-word command name!
aliases = ["nudge"]
locks = "cmd:all()"
def func(self):
"""
nudge the lid. Random chance of success to open it.
"""
rand = random.random()
if rand < 0.5:
self.caller.msg("You nudge at the lid. It seems stuck.")
elif 0.5 <= rand < 0.7:
self.caller.msg("You move the lid back and forth. It won't budge.")
else:
self.caller.msg("You manage to get a nail under the lid.")
self.caller.execute_cmd("open lid")
class CmdPush(Command):
"""
Push the red button
Usage:
push button
"""
key = "push button"
aliases = ["push", "press button", "press"]
locks = "cmd:all()"
def func(self):
"""
Note that we choose to implement this with checking for
if the lid is open/closed. This is because this command
is likely to be tried regardless of the state of the lid.
An alternative would be to make two versions of this command
and tuck them into the cmdset linked to the Open and Closed
lid-state respectively.
"""
if self.obj.db.lid_open:
string = "You reach out to press the big red button ..."
string += "\n\nA BOOM! A bright light blinds you!"
string += "\nThe world goes dark ..."
self.caller.msg(string)
self.caller.location.msg_contents("%s presses the button. BOOM! %s is blinded by a flash!" %
(self.caller.name, self.caller.name), exclude=self.caller)
# the button's method will handle all setup of scripts etc.
self.obj.press_button(self.caller)
else:
string = "You cannot push the button - there is a glass lid covering it."
self.caller.msg(string)
class CmdSmashGlass(Command):
"""
smash glass
Usage:
smash glass
Try to smash the glass of the button.
"""
key = "smash glass"
aliases = ["smash lid", "break lid", "smash"]
locks = "cmd:all()"
def func(self):
"""
The lid won't open, but there is a small chance
of causing the lamp to break.
"""
rand = random.random()
if rand < 0.2:
string = "You smash your hand against the glass"
string += " with all your might. The lid won't budge"
string += " but you cause quite the tremor through the button's mount."
string += "\nIt looks like the button's lamp stopped working for the time being."
self.obj.lamp_works = False
elif rand < 0.6:
string = "You hit the lid hard. It doesn't move an inch."
else:
string = "You place a well-aimed fist against the glass of the lid."
string += " Unfortunately all you get is a pain in your hand. Maybe"
string += " you should just try to open the lid instead?"
self.caller.msg(string)
self.caller.location.msg_contents("%s tries to smash the glass of the button." %
(self.caller.name), exclude=self.caller)
class CmdOpenLid(Command):
"""
open lid
Usage:
open lid
"""
key = "open lid"
aliases = ["open button", 'open']
locks = "cmd:all()"
def func(self):
"simply call the right function."
if self.obj.db.lid_locked:
self.caller.msg("This lid seems locked in place for the moment.")
return
string = "\nA ticking sound is heard, like a winding mechanism. Seems "
string += "the lid will soon close again."
self.caller.msg(string)
self.caller.location.msg_contents("%s opens the lid of the button." %
(self.caller.name), exclude=self.caller)
# add the relevant cmdsets to button
self.obj.cmdset.add(LidClosedCmdSet)
# call object method
self.obj.open_lid()
class CmdCloseLid(Command):
"""
close the lid
Usage:
close lid
Closes the lid of the red button.
"""
key = "close lid"
aliases = ["close"]
locks = "cmd:all()"
def func(self):
"Close the lid"
self.obj.close_lid()
# this will clean out scripts dependent on lid being open.
self.caller.msg("You close the button's lid. It clicks back into place.")
self.caller.location.msg_contents("%s closes the button's lid." %
(self.caller.name), exclude=self.caller)
class CmdBlindLook(Command):
"""
Looking around in darkness
Usage:
look <obj>
... not that there's much to see in the dark.
"""
key = "look"
aliases = ["l", "get", "examine", "ex", "feel", "listen"]
locks = "cmd:all()"
def func(self):
"This replaces all the senses when blinded."
# we decide what to reply based on which command was
# actually tried
if self.cmdstring == "get":
string = "You fumble around blindly without finding anything."
elif self.cmdstring == "examine":
string = "You try to examine your surroundings, but can't see a thing."
elif self.cmdstring == "listen":
string = "You are deafened by the boom."
elif self.cmdstring == "feel":
string = "You fumble around, hands outstretched. You bump your knee."
else:
# trying to look
string = "You are temporarily blinded by the flash. "
string += "Until it wears off, all you can do is feel around blindly."
self.caller.msg(string)
self.caller.location.msg_contents("%s stumbles around, blinded." %
(self.caller.name), exclude=self.caller)
class CmdBlindHelp(Command):
"""
Help function while in the blinded state
Usage:
help
"""
key = "help"
aliases = "h"
locks = "cmd:all()"
def func(self):
"Give a message."
self.caller.msg("You are beyond help ... until you can see again.")
#---------------------------------------------------------------
# Command sets for the red button
#---------------------------------------------------------------
# We next tuck these commands into their respective command sets.
# (note that we are overdoing the cdmset separation a bit here
# to show how it works).
class DefaultCmdSet(CmdSet):
"""
The default cmdset always sits
on the button object and whereas other
command sets may be added/merge onto it
and hide it, removing them will always
bring it back. It's added to the object
using obj.cmdset.add_default().
"""
key = "RedButtonDefault"
mergetype = "Union" # this is default, we don't really need to put it here.
def at_cmdset_creation(self):
"Init the cmdset"
self.add(CmdPush())
class LidClosedCmdSet(CmdSet):
"""
A simple cmdset tied to the redbutton object.
It contains the commands that launches the other
command sets, making the red button a self-contained
item (i.e. you don't have to manually add any
scripts etc to it when creating it).
"""
key = "LidClosedCmdSet"
# default Union is used *except* if we are adding to a
# cmdset named LidOpenCmdSet - this one we replace
# completely.
key_mergetype = {"LidOpenCmdSet": "Replace"}
def at_cmdset_creation(self):
"Populates the cmdset when it is instantiated."
self.add(CmdNudge())
self.add(CmdSmashGlass())
self.add(CmdOpenLid())
class LidOpenCmdSet(CmdSet):
"""
This is the opposite of the Closed cmdset.
"""
key = "LidOpenCmdSet"
# default Union is used *except* if we are adding to a
# cmdset named LidClosedCmdSet - this one we replace
# completely.
key_mergetype = {"LidClosedCmdSet": "Replace"}
def at_cmdset_creation(self):
"setup the cmdset (just one command)"
self.add(CmdCloseLid())
class BlindCmdSet(CmdSet):
"""
This is the cmdset added to the *player* when
the button is pushed.
"""
key = "BlindCmdSet"
# we want it to completely replace all normal commands
# until the timed script removes it again.
mergetype = "Replace"
# we want to stop the player from walking around
# in this blinded state, so we hide all exits too.
# (channel commands will still work).
no_exits = True # keep player in the same room
no_objs = True # don't allow object commands
def at_cmdset_creation(self):
"Setup the blind cmdset"
from src.commands.default.general import CmdSay
from src.commands.default.general import CmdPose
self.add(CmdSay())
self.add(CmdPose())
self.add(CmdBlindLook())
self.add(CmdBlindHelp())
| {
"content_hash": "ab7f2b980cc2c5783413e15661f4a4ae",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 104,
"avg_line_length": 31.105431309904155,
"alnum_prop": 0.5873048479868529,
"repo_name": "TaliesinSkye/evennia",
"id": "ba7c61b17cdccae6e62717929d6a23d840f864d0",
"size": "9736",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wintersoasis-master/commands/examples/cmdset_red_button.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "59698"
},
{
"name": "D",
"bytes": "9343933"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "JavaScript",
"bytes": "91190"
},
{
"name": "Python",
"bytes": "2840755"
},
{
"name": "Shell",
"bytes": "4577"
}
],
"symlink_target": ""
} |
import pickle
import numpy as np
"""
Implementation of time and energy averaged responses from 2-d
transfer functions.
"""
class TransferFunction(object):
"""
Create or retrieve a transfer function, and form
time and energy averaged responses.
Parameters
----------
data : numpy 2-d array / 2-d list
inner/first dimension (or column counts) represents time
and outer/second dimension (or rows counts) represents energy.
As an example, if you have you 2-d model defined by 'arr', then
arr[1][5] defines a time of 5(s) and energy of 1(keV) [assuming
'dt' and 'de' are 1 and 'tstart' and 'estart' are 0.]
Note that each row is a different energy channel starting from
the lowest to the highest.
dt : float, default 1
time interval
de : float, default 1
energy interval
tstart : float, default 0
initial time value across time axis
estart : float, default 0
initial energy value across energy axis
Attributes
----------
time : numpy.ndarray
energy-averaged/time-resolved response of 2-d transfer
function
energy : numpy.ndarray
time-averaged/energy-resolved response of 2-d transfer
function
"""
def __init__(self, data, dt=1, de=1, tstart=0, estart=0,
time=None, energy=None):
self.data = np.asarray(data)
self.dt = dt
self.de = de
self.tstart = tstart
self.estart = estart
self.time = None
self.energy = None
if len(data[0]) < 2:
raise ValueError('Number of columns should be greater than 1.')
if len(data[:]) < 2:
raise ValueError('Number of rows should be greater than 1.')
def time_response(self, e0=None, e1=None):
"""
Form an energy-averaged/time-resolved response of 2-d transfer
function.
Returns
-------
energy : numpy.ndarray
energy-averaged/time-resolved response of 2-d transfer function
e0: int
start value of energy interval to be averaged
e1: int
end value of energy interval to be averaged
"""
# Set start and stop values
if e0 is None:
start = 0
else:
start = int(self.estart + e0/self.de)
if e1 is None:
stop = len(self.data[:][0]) - 1
else:
stop = int(self.estart + e1/self.de)
# Ensure start and stop values are legal
if (start < 0) or (stop < 0):
raise ValueError('e0 and e1 must be positive.')
if (start > len(self.data[:][0])) or (stop > len(self.data[:][0])):
raise ValueError('One or both energy values are out of range.')
if start == stop:
raise ValueError('e0 and e1 must be separated by at least de.')
self.time = np.mean(self.data[start:stop, :], axis=0)
def energy_response(self):
"""
Form a time-averaged/energy-resolved response of 2-d transfer function.
Returns
-------
time : numpy.ndarray
time-averaged/energy-resolved response of 2-d transfer function
"""
self.energy = np.mean(self.data, axis=1)
def plot(self, response='2d', save=False, filename=None, show=False):
"""
Plot 'time', 'energy' or 2-d response using matplotlib.
In case of 1-d response, 'time' and 'energy' would appear
along x-axis and corresponding flux across y-axis. In case
of 2-d response, a spectrograph would be formed with 'time'
along x-axis and 'energy' along y-axis.
Parameters
----------
response : str
type of response - accepts 'time', 'energy', '2d'
filename : str
the name of file to save plot to. If a default of 'None' is
picked, plot is not saved.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
if response == 'time':
t = np.linspace(self.tstart, len(self.data[0])*self.dt,
len(self.data[0]))
figure = plt.plot(t, self.time)
plt.xlabel('Time')
plt.ylabel('Flux')
plt.title('Time-resolved Response')
elif response == 'energy':
e = np.linspace(self.estart, len(self.data[:])*self.de,
len(self.data[:]))
figure = plt.plot(e, self.energy)
plt.xlabel('Energy')
plt.ylabel('Flux')
plt.title('Energy-resolved Response')
elif response == '2d':
figure = plt.imshow(self.data, interpolation='nearest',
cmap='Oranges', origin='lower')
plt.xlabel('Time')
plt.ylabel('Energy')
plt.title('2-d Transfer Function')
plt.colorbar()
else:
raise ValueError("Response value is not recognized. Available"
"response types are 'time', 'energy', and '2d'.")
if save:
if filename is None:
plt.savefig('out.png')
else:
plt.savefig(filename)
if show:
plt.show()
else:
plt.close()
@staticmethod
def read(filename, fmt='pickle', format_=None):
"""
Reads transfer function from a 'pickle' file.
Parameter
---------
fmt : str
the format of the file to be retrieved - accepts 'pickle'.
Returns
-------
data : class instance
`TransferFunction` object
"""
if format_ is not None:
fmt = format_
if fmt == 'pickle':
with open(filename, "rb") as fobj:
return pickle.load(fobj)
else:
raise KeyError("Format not understood.")
def write(self, filename, fmt="pickle", format_=None):
"""
Writes a transfer function to 'pickle' file.
Parameters
----------
fmt : str
the format of the file to be saved - accepts 'pickle'
"""
if format_ is not None:
fmt = format_
if fmt == 'pickle':
with open(filename, "wb") as fobj:
pickle.dump(self, fobj)
else:
raise KeyError("Format not understood.")
"""
Implementation of artificial methods to create energy-averaged
responses for quick testing.
"""
def simple_ir(dt=0.125, start=0, width=1000, intensity=1):
"""
Construct a simple impulse response using start time,
width and scaling intensity.
To create a delta impulse response, set width to 1.
Parameters
----------
dt : float
Time resolution
start : int
start time of impulse response
width : int
width of impulse response
intensity : float
scaling parameter to set the intensity of delayed emission
corresponding to direct emission.
Returns
-------
h : numpy.ndarray
Constructed impulse response
"""
# Fill in 0 entries until the start time
h_zeros = np.zeros(int(start/dt))
# Define constant impulse response
h_ones = np.ones(int(width/dt)) * intensity
return np.append(h_zeros, h_ones)
def relativistic_ir(dt=0.125, t1=3, t2=4, t3=10, p1=1, p2=1.4, rise=0.6, decay=0.1):
"""
Construct a realistic impulse response considering the relativistic
effects.
Parameters
----------
dt : float
Time resolution
t1 : int
primary peak time
t2 : int
secondary peak time
t3 : int
end time
p1 : float
value of primary peak
p2 : float
value of secondary peak
rise : float
slope of rising exponential from primary peak to secondary peak
decay : float
slope of decaying exponential from secondary peak to end time
Returns
-------
h: numpy.ndarray
Constructed impulse response
"""
assert t2>t1, 'Secondary peak must be after primary peak.'
assert t3>t2, 'End time must be after secondary peak.'
assert p2>p1, 'Secondary peak must be greater than primary peak.'
# Append zeros before start time
h_primary = np.append(np.zeros(int(t1/dt)), p1)
# Create a rising exponential of user-provided slope
x = np.linspace(t1/dt, t2/dt, int((t2-t1)/dt))
h_rise = np.exp(rise*x)
# Evaluate a factor for scaling exponential
factor = np.max(h_rise)/(p2-p1)
h_secondary = (h_rise/factor) + p1
# Create a decaying exponential until the end time
x = np.linspace(t2/dt, t3/dt, int((t3-t2)/dt))
h_decay = (np.exp((-decay)*(x-4/dt)))
# Add the three responses
h = np.append(h_primary, h_secondary)
h = np.append(h, h_decay)
return h
| {
"content_hash": "a1c9b51126b81ab5e5aac21ed2f501b1",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 84,
"avg_line_length": 27.656346749226007,
"alnum_prop": 0.5652076570021269,
"repo_name": "StingraySoftware/stingray",
"id": "5dd577251c93a4e130b232fdaea4b7c98d1611df",
"size": "8933",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stingray/simulator/transfer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1200124"
},
{
"name": "Python",
"bytes": "1465633"
},
{
"name": "TeX",
"bytes": "8716"
}
],
"symlink_target": ""
} |
from django.views import generic
from django.shortcuts import redirect
class IndexView(generic.ListView):
template_name = 'Dashboard.html'
def get_queryset(self):
pass
def home(request):
return redirect('/privacy/') | {
"content_hash": "4ed8ba74d0974a64bd9e750115f1dfd5",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 37,
"avg_line_length": 20.272727272727273,
"alnum_prop": 0.7713004484304933,
"repo_name": "JiaMingLin/de-identification",
"id": "f7de6ca651b4c265ec76a0917b323e1517f7c925",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "48304"
},
{
"name": "HTML",
"bytes": "109133"
},
{
"name": "Java",
"bytes": "1349"
},
{
"name": "JavaScript",
"bytes": "219387"
},
{
"name": "Lasso",
"bytes": "2327"
},
{
"name": "PHP",
"bytes": "1398"
},
{
"name": "Perl",
"bytes": "2901"
},
{
"name": "Python",
"bytes": "116489"
},
{
"name": "R",
"bytes": "25501"
},
{
"name": "Ruby",
"bytes": "1337"
},
{
"name": "Shell",
"bytes": "735"
}
],
"symlink_target": ""
} |
"""Implementation of gcloud genomics datasets update.
"""
from googlecloudsdk.api_lib import genomics as lib
from googlecloudsdk.api_lib.genomics import genomics_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
class Update(base.Command):
"""Updates a dataset name.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument('id',
type=int,
help='The ID of the dataset to be updated.')
parser.add_argument('--name',
help='The new name of the dataset.',
required=True)
@genomics_util.ReraiseHttpException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace, All the arguments that were provided to this
command invocation.
Raises:
HttpException: An http error response was received while executing api
request.
Returns:
None
"""
apitools_client = self.context[lib.GENOMICS_APITOOLS_CLIENT_KEY]
genomics_messages = self.context[lib.GENOMICS_MESSAGES_MODULE_KEY]
request = genomics_messages.GenomicsDatasetsPatchRequest(
dataset=genomics_messages.Dataset(
name=args.name,
),
datasetId=str(args.id),
)
return apitools_client.datasets.Patch(request)
def Display(self, args, dataset):
"""This method is called to print the result of the Run() method.
Args:
args: The arguments that command was run with.
dataset: The value returned from the Run() method.
"""
if dataset:
log.Print('Updated dataset {0}, name: {1}'.format(
dataset.id, dataset.name))
| {
"content_hash": "d4a2893cb862a37c52e978179d90a161",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 29.915254237288135,
"alnum_prop": 0.6492917847025496,
"repo_name": "flgiordano/netcash",
"id": "e99de266c62b7fd60450b3601461fe2b63215728",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/surface/genomics/datasets/update.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
from booster import app, db
import bcrypt
class User(db.Model):
''' The only user is an administrator, so there aren't roles
'''
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(128), unique=True)
email = db.Column(db.String(256), unique=True)
smash = db.Column(db.String(64))
is_super = db.Column(db.Boolean)
def __init__(self, login, password, email, is_super=False):
self.login = login
self.email = email
self.smash = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt(12))
self.is_super = is_super
def authorize(self, password):
if self.smash == bcrypt.hashpw(password.encode('utf-8'), self.smash.encode('utf-8')):
return True
return False
| {
"content_hash": "bbf7669a9e1ad072daaf8f8ffd2bea68",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 93,
"avg_line_length": 32.25,
"alnum_prop": 0.6317829457364341,
"repo_name": "zknight/booster",
"id": "2c47e656030a8d61c3517671ef56d45a6eee7e53",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "booster/models/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8304"
},
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "37825"
}
],
"symlink_target": ""
} |
from PyQt5.QtCore import QSettings
from PyQt5.QtCore import QTextCodec
class QmSetting(QSettings):
def __init__(self, *args, **kwargs):
super(QmSetting, self).__init__(*args, **kwargs)
"""
def
## settings
code = QTextCodec.codecForName('utf-8')
self.settings = QSettings('.tmp/settings.ini', QSettings.IniFormat)
self.settings.setIniCodec(code)
self.settings.setValue('data/c1', 'test')
"""
def load_ini(path, encoding='utf-8'):
code = QTextCodec.codecForName(encoding)
settings = QSettings(path, QSettings.IniFormat)
settings.setIniCodec(code)
return settings
| {
"content_hash": "fbdf4f60cb06fcd717b7b811d5b11304",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 23.884615384615383,
"alnum_prop": 0.6827697262479872,
"repo_name": "kilfu0701/linne-tool",
"id": "35468affdcd76fc76cff8477db4b1faad634a66e",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gui/converter/util/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6569"
}
],
"symlink_target": ""
} |
import time
import json
import requests
from threading import Thread
from adnpy.models import (SimpleValueModel, Post, User, Channel, Message, Token, File, StreamingMeta)
class StreamListener(object):
"""
The StreamListener Object
for every message type (post, star, user_follow, mute, block, stream_marker, message, channel, channel_subscription,
token, file) you can define a on_<message_type> method to handle those messages.
Example::
class MyStreamListener(StreamListener):
def on_post(self, post, meta):
if meta.is_deleted:
return
print post.text
def on_star(self, post, user, meta):
...
"""
def __init__(self, api):
self.api = api
def prepare_post(self, data):
if not data:
return tuple()
return (
Post.from_response_data(data, self.api),
)
def prepare_star(self, data):
return (
Post.from_response_data(data.get('post', {}), self.api),
User.from_response_data(data.get('user', {}), self.api),
)
def prepare_user_follow(self, data):
return (
User.from_response_data(data.get('follows_user', {}), self.api),
User.from_response_data(data.get('user', {}), self.api),
)
def prepare_mute(self, data):
return (
User.from_response_data(data.get('muted_user', {}), self.api),
User.from_response_data(data.get('user', {}), self.api),
)
def prepare_block(self, data):
return (
User.from_response_data(data.get('blocked_user', {}), self.api),
User.from_response_data(data.get('user', {}), self.api),
)
def prepare_stream_marker(self, data):
return (
SimpleValueModel.from_response_data(data.get('marker', {}), self.api),
User.from_response_data(data.get('user', {}), self.api),
)
def prepare_message(self, data):
return (
Message.from_response_data(data, self.api),
)
def prepare_channel(self, data):
return (
Channel.from_response_data(data, self.api),
)
def prepare_channel_subscription(self, data):
return (
Channel.from_response_data(data.get('channel', {}), self.api),
User.from_response_data(data.get('user', {}), self.api),
)
def prepare_token(self, data):
return (
Token.from_response_data(data, self.api),
)
def prepare_file(self, data):
return (
File.from_response_data(data, self.api),
)
def prepare_fallback(self, data):
return (
SimpleValueModel.from_response_data(data, self.api),
)
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
pass
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
message_type = data['meta'].get('type')
prepare_method = 'prepare_%s' % (message_type)
args = getattr(self, prepare_method, self.prepare_fallback)(data.get('data'))
method_name = 'on_%s' % (message_type,)
func = getattr(self, method_name, self.on_fallback)
func(*args, meta=StreamingMeta.from_response_data(data.get('meta'), self.api))
def on_fallback(self, data, meta):
"""Called when there is no specific method for handling an object type"""
return
def on_error(self, status_code):
"""Called when a non-200 status code is returned"""
return False
def on_timeout(self):
"""Called when stream connection times out"""
return
class Stream(object):
"""
The Stream Object
Example::
from adnpy.stream import Stream, StreamListener
from adnpy.utils import get_app_access_token
app_access_token, token = get_app_access_token(client_id, client_secret)
# Define a stream
stream_def = {
"object_types": [
"post"
],
"type": "long_poll",
"key": "post_stream"
}
# Create a stream
class MyStreamListener(StreamListener):
def on_post(self, post, meta):
if meta.is_deleted:
return
print post.text
my_api = adnpy.API.build_api(access_token=app_access_token)
stream = Stream(my_api, stream_def, MyStreamListener)
stream.start()
"""
def __init__(self, api, stream_defenition, listener_class, **options):
self.api = api
self.listener = listener_class(api)
self.stream_defenition = stream_defenition
self.running = False
self.timeout = options.get("timeout", 600.0)
self.retry_count = options.get("retry_count", 10)
self.retry_time = options.get("retry_time", 10.0)
self.snooze_time = options.get("snooze_time", 5.0)
def get_streaming_endpoint(self, endpoint):
return endpoint
def _run(self):
app_stream = None
app_streams, meta = self.api.get_streams(key=self.stream_defenition['key'])
if app_streams:
app_stream = app_streams[0]
if not app_stream:
app_stream, meta = self.api.create_stream(data=self.stream_defenition)
# For alerting the url
streaming_endpoint = self.get_streaming_endpoint(app_stream.endpoint)
# Connect and process the stream
error_counter = 0
exception = None
while self.running:
if self.retry_count is not None and error_counter > self.retry_count:
# quit if error count greater than retry count
break
try:
resp = requests.get(streaming_endpoint, stream=True, timeout=self.timeout)
resp.raise_for_status()
if resp.status_code != 200:
if self.listener.on_error(resp.status_code) is False:
break
error_counter += 1
time.sleep(self.retry_time)
else:
error_counter = 0
self.listener.on_connect()
self._read_loop(resp)
except Exception, exception:
# any other exception is fatal, so kill loop
break
# cleanup
self.running = False
if exception:
raise
def _data(self, data):
if self.listener.on_data(data) is False:
self.running = False
def _read_loop(self, resp):
while self.running:
for line in resp.iter_lines(chunk_size=1):
if line:
self._data(line)
def start(self, async=False):
self.running = True
if async:
Thread(target=self._run).start()
else:
self._run()
def disconnect(self):
if self.running is False:
return
self.running = False
| {
"content_hash": "8e53a6d7d5238569a9c192d38ce888ef",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 120,
"avg_line_length": 30.100401606425702,
"alnum_prop": 0.5607738492328219,
"repo_name": "appdotnet/ADNpy",
"id": "5b8d800bc79d4f11ef6de650acbd7e5575ccc43a",
"size": "7495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adnpy/stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72195"
},
{
"name": "Shell",
"bytes": "5266"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from hashlib import md5
import os
from urllib import urlopen, urlencode, quote, unquote
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import Model
from django.template import (Context, Node, TextNode, Template,
TemplateSyntaxError, TOKEN_TEXT, TOKEN_VAR,
TOKEN_COMMENT, TOKEN_BLOCK)
from django.template.loader import get_template
from django.utils.html import strip_tags
from django.utils.simplejson import loads
from django.utils.text import capfirst
from PIL import Image, ImageOps
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.inclusion_tag("includes/form_fields.html", takes_context=True)
def fields_for(context, form):
"""
Renders fields for a form.
"""
context["form_for_fields"] = form
return context
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
if app.strip("\"'") not in settings.INSTALLED_APPS:
while True:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
parser.tokens.insert(0, token)
break
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model using the bit.ly
credentials if they have been specified and saves it.
"""
obj = context[token.split_contents()[1]]
request = context["request"]
if getattr(obj, "short_url") is None:
obj.short_url = request.build_absolute_uri(request.path)
args = {
"login": context["settings"].BLOG_BITLY_USER,
"apiKey": context["settings"].BLOG_BITLY_KEY,
"longUrl": obj.short_url,
}
if args["login"] and args["apiKey"]:
url = "http://api.bit.ly/v3/shorten?%s" % urlencode(args)
response = loads(urlopen(url).read())
if response["status_code"] == 200:
obj.short_url = response["data"]["url"]
obj.save()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
email_hash = md5(email).hexdigest()
return "http://www.gravatar.com/avatar/%s?s=%s" % (email_hash, size)
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return strip_tags(decode_entities(parsed))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page):
"""
Include the pagination template and data for persisting querystring in
pagination links.
"""
querystring = context["request"].GET.copy()
if "page" in querystring:
del querystring["page"]
querystring = querystring.urlencode()
return {"current_page": current_page, "querystring": querystring}
@register.simple_tag
def thumbnail(image_url, width, height, quality=95):
"""
Given the URL to an image, resizes the image using the given width and
height on the first time it is requested, and returns the URL to the new
resized image. if width or height are zero then original ratio is
maintained.
"""
if not image_url:
return ""
image_url = unquote(unicode(image_url))
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s%s" % (image_prefix, width, height, image_ext)
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
image = Image.open(default_storage.open(image_url))
image_info = image.info
width = int(width)
height = int(height)
# If already right size, don't do anything.
if width == image.size[0] and height == image.size[1]:
return image_url
# Set dimensions.
if width == 0:
width = image.size[0] * height / image.size[1]
elif height == 0:
height = image.size[1] * width / image.size[0]
if image.mode not in ("L", "RGBA"):
image = image.convert("RGBA")
try:
image = ImageOps.fit(image, (width, height), Image.ANTIALIAS)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "r") as f:
default_storage.save(thumb_url, File(f))
except:
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filter(content):
"""
This template filter takes a string value and passes it through the
function specified by the RICHTEXT_FILTER setting.
"""
if settings.RICHTEXT_FILTER:
func = import_dotted_path(settings.RICHTEXT_FILTER)
else:
func = lambda s: s
return func(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing, such as
the icon and edit form if the object is deemed to be editable - either it
has an ``editable`` method which returns ``True``, or the logged in user
has change permissions for the model.
"""
def parse_field(field):
field = field.split(".")
obj = context[field.pop(0)]
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([unicode(getattr(*field)) for field in fields])
except AttributeError:
pass
if fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for url
names in admin templates as these won't resolve when admin tests are
running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``. Returns a
list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
menu_order = [(x[0], list(x[1])) for x in settings.ADMIN_MENU_ORDER]
found_items = set()
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if in_menu and request.user.has_module_perms(opts.app_label):
perms = model_admin.get_model_perms(request)
admin_url_name = ""
if perms["change"]:
admin_url_name = "changelist"
elif perms["add"]:
admin_url_name = "add"
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
for (name, items) in menu_order:
try:
index = list(items).index(model_label)
except ValueError:
pass
else:
found_items.add(model_label)
app_title = name
break
else:
index = None
app_title = opts.app_label
model_dict = {
"index": index,
"perms": model_admin.get_model_perms(request),
"name": capfirst(model._meta.verbose_name_plural),
"admin_url": admin_url(model, admin_url_name),
}
app_title = app_title.title()
if app_title in app_dict:
app_dict[app_title]["models"].append(model_dict)
else:
try:
titles = [x[0] for x in settings.ADMIN_MENU_ORDER]
index = titles.index(app_title)
except ValueError:
index = None
app_dict[app_title] = {
"index": index,
"name": app_title,
"models": [model_dict],
}
for (i, (name, items)) in enumerate(menu_order):
name = unicode(name)
for unfound_item in set(items) - found_items:
if isinstance(unfound_item, (list, tuple)):
item_name, item_url = unfound_item[0], unfound_item[1]
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if name not in app_dict:
app_dict[name] = {
"index": i,
"name": name,
"models": [],
}
app_dict[name]["models"].append({
"index": items.index(unfound_item),
"perms": {"custom": True},
"name": item_name,
"admin_url": item_url,
})
app_list = app_dict.values()
sort = lambda x: x["name"] if x["index"] is None else x["index"]
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
context["dropdown_menu_sites"] = list(Site.objects.all())
context["dropdown_menu_selected_site_id"] = current_site_id()
return context
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(Context(context)))
return "".join(output)
| {
"content_hash": "f332f9c67e364a2229acc9f0a7d8c8c3",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 79,
"avg_line_length": 35.51452282157676,
"alnum_prop": 0.6004790279238229,
"repo_name": "sachingupta006/Mezzanine",
"id": "5dd8e64ffc6b41c80ebd4a7738717881c4d8638c",
"size": "17119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/core/templatetags/mezzanine_tags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "63548"
},
{
"name": "Python",
"bytes": "754016"
}
],
"symlink_target": ""
} |
import numpy as np
def parse_raw_data(path):
input_sentences = []
target_sentences = []
with open(path) as f:
in_sentence = []
target_sentence = []
for line in f:
if line != "\n":
in_target = line.split('\t')
in_sentence.append(in_target[0])
target_sentence.append(in_target[1].strip())
else:
input_sentences.append(in_sentence)
target_sentences.append(target_sentence)
in_sentence = []
target_sentence = []
input_data = []
output_data = []
for sentence_idx in range(len(input_sentences)):
sentence = input_sentences[sentence_idx]
sentence_in_data = np.zeros([50, 70, 20], dtype=np.float32)
sentence_out_data = np.zeros([12, 50], dtype=np.float32)
word_idx = 0
for word in sentence:
if word_idx >= 50:
break
# handle target output
target_symbol_index = 0 # 0 PASS
if ("company" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 1
elif ("facility" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 2
elif ("geo-loc" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 3
elif ("movie" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 4
elif ("musicartist" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 5
elif ("other" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 6
elif ("person" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 7
elif ("product" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 8
elif ("sportsteam" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 9
elif ("tvshow" in target_sentences[sentence_idx][word_idx]) is True:
target_symbol_index = 10
sentence_out_data[target_symbol_index, word_idx] = 1
# handle input word
col_idx = 0
for char in word.upper(): # upper this
if col_idx >= 20:
break
char_dec = ord(char)
row_idx = 68 # represent other unknown symbols
if 96 >= char_dec >= 33:
row_idx = char_dec - 33
elif 126 >= char_dec >= 123:
row_idx = char_dec - 33 - 26
sentence_in_data[word_idx, 0:row_idx, col_idx] = 1
col_idx += 1
word_idx += 1
sentence_in_data[word_idx:, 69:, :] = 1 # PAD
sentence_out_data[11, word_idx:] = 1 # PAD
input_data.append(sentence_in_data)
output_data.append(sentence_out_data)
return np.array(input_data), np.array(output_data)
def save_to_disk(train_data, evl_data):
train_in, train_out = parse_raw_data(train_data)
np.save(train_data + "_in_np_v4", train_in)
np.save(train_data + "_out_np_v4", train_out)
evl_in, evl_out = parse_raw_data(evl_data)
np.save(evl_data + "_in_np_v4", evl_in)
np.save(evl_data + "_out_np_v4", evl_out)
def final_evaluate(test_output, target_output):
total_token = 0
class_tokens_total = np.zeros([11], dtype=np.int64)
class_tokens_TP = np.zeros([11], dtype=np.int64)
class_tokens_TN = np.zeros([11], dtype=np.int64)
class_tokens_FP = np.zeros([11], dtype=np.int64)
class_tokens_FN = np.zeros([11], dtype=np.int64)
for s_index in range(len(test_output)):
sentence = test_output[s_index]
sentence_target = target_output[s_index]
for w_index in range(len(sentence)):
output_label = np.argmax(sentence[:, w_index])
target_label = np.argmax(sentence_target[:, w_index])
if target_label == 11:
break # skip left if reach PAD
total_token += 1 # add total token
class_tokens_total[target_label] += 1
if target_label == output_label:
class_tokens_TP[output_label] += 1
class_tokens_TN[:] += 1
class_tokens_TN[output_label] -= 1
if target_label != output_label:
class_tokens_FN[target_label] += 1
if output_label != 11:
class_tokens_FP[output_label] += 1
# Output Table
print ("--------------------------------------------------")
for i in range(11):
print ("%d TP: %d, TN: %d, FP: %d, FN: %d, Total: %d" % (i,
class_tokens_TP[i],
class_tokens_TN[i],
class_tokens_FP[i],
class_tokens_FN[i],
class_tokens_total[i]))
class DataManager(object):
def __init__(self, train_data_in, train_data_out, evl_data_in, evl_data_out, batch_size):
print ("Start loading data ...")
self._train_data_in = np.load(train_data_in)
self._train_data_out = np.load(train_data_out)
self._evl_data_in = np.load(evl_data_in)
self._evl_data_out = np.load(evl_data_out)
self._batch_size = batch_size
self._batch_index = 0
print ("Data loaded !")
def get_one_sample(self, index=0, source="test"):
if source != "test":
return self._train_data_in[index, :, :, :], self._train_data_out[index, :, :]
else:
return self._evl_data_in[index, :, :, :], self._evl_data_out[index, :, :]
def get_data(self, source="test"):
if source == "test":
return self._evl_data_in, self._evl_data_out
else:
return self._train_data_in, self._train_data_out
def get_batch(self):
epoch_end = False
self._batch_index += self._batch_size
if self._batch_index > len(self._train_data_in):
epoch_end = True
randomize = np.arange(len(self._train_data_in))
np.random.shuffle(randomize)
self._train_data_in = self._train_data_in[randomize]
self._train_data_out = self._train_data_out[randomize]
self._batch_index = self._batch_size
batch_input = self._train_data_in[self._batch_index - self._batch_size:self._batch_index]
batch_output = self._train_data_out[self._batch_index - self._batch_size:self._batch_index]
return batch_input, batch_output, epoch_end
| {
"content_hash": "8750b90ebcd8617858b81e843e629ebf",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 99,
"avg_line_length": 43.5,
"alnum_prop": 0.5219827586206897,
"repo_name": "Lucklyric/NLP-NER-CNN",
"id": "b21b84758ee4c8ef717ef56ffacf71a79f61f787",
"size": "6960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/data_util_v4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5211"
},
{
"name": "Python",
"bytes": "74752"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides a common test case base for Python Spark tests"""
from .utils import add_pyspark_path, quiet_py4j
import unittest2
from pyspark.context import SparkContext
import os
class SparkTestingBaseTestCase(unittest2.TestCase):
"""Basic common test case for Spark. Provides a Spark context as sc.
For non local mode testing you can either override sparkMaster
or set the enviroment property SPARK_MASTER for non-local mode testing."""
@classmethod
def getMaster(cls):
return os.getenv('SPARK_MASTER', "local[4]")
def setUp(self):
"""Setup a basic Spark context for testing"""
self.sc = SparkContext(self.getMaster())
quiet_py4j()
def tearDown(self):
"""
Tear down the basic panda spark test case. This stops the running
context and does a hack to prevent Akka rebinding on the same port.
"""
self.sc.stop()
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
self.sc._jvm.System.clearProperty("spark.driver.port")
class SparkTestingBaseReuse(unittest2.TestCase):
"""Basic common test case for Spark. Provides a Spark context as sc.
For non local mode testing you can either override sparkMaster
or set the enviroment property SPARK_MASTER for non-local mode testing."""
@classmethod
def getMaster(cls):
return os.getenv('SPARK_MASTER', "local[4]")
@classmethod
def setUpClass(cls):
"""Setup a basic Spark context for testing"""
class_name = cls.__name__
cls.sc = SparkContext(cls.getMaster(), appName=class_name)
quiet_py4j()
@classmethod
def tearDownClass(cls):
"""
Tear down the basic panda spark test case. This stops the running
context and does a hack to prevent Akka rebinding on the same port.
"""
print("stopping class")
cls.sc.stop()
# To avoid Akka rebinding to the same port, since it doesn't unbind
# immediately on shutdown
cls.sc._jvm.System.clearProperty("spark.driver.port")
if __name__ == "__main__":
unittest2.main()
| {
"content_hash": "725085389c396b25f6194f76aba10eec",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 35.38823529411765,
"alnum_prop": 0.6944813829787234,
"repo_name": "snithish/spark-testing-base",
"id": "cd622d85aa469b86dd0eb2133647f926e78476ab",
"size": "3008",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/sparktestingbase/testcase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "18606"
},
{
"name": "Perl",
"bytes": "2353"
},
{
"name": "Python",
"bytes": "23938"
},
{
"name": "Scala",
"bytes": "190906"
},
{
"name": "Shell",
"bytes": "2872"
}
],
"symlink_target": ""
} |
from unittest import skipIf
from django.conf import settings
skipIfSpatialite = skipIf('spatialite' in settings.DATABASES['default']['ENGINE'], "Spatialite not supported")
| {
"content_hash": "3ca2f4b31494a0f884787677364986f6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 110,
"avg_line_length": 43.25,
"alnum_prop": 0.7976878612716763,
"repo_name": "ocadotechnology/django-tastypie",
"id": "264785d147c1498f06fca561010e3a400e23d505",
"size": "173",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tests/gis/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "988"
},
{
"name": "Python",
"bytes": "803225"
},
{
"name": "Shell",
"bytes": "1162"
}
],
"symlink_target": ""
} |
import os.path
import shutil
import tempfile
import lmdb
import numpy as np
from . import analyze_db
from digits import test_utils
# Must import after importing digits.config
import caffe.io
import caffe_pb2
test_utils.skipIfNotFramework('none')
class BaseTestWithDB(object):
SAME_SHAPE = True
PASS_DEFAULTS = True
PASS_FORCE = True
PASS_COUNT = True
@classmethod
def setUpClass(cls):
cls._data_dir = tempfile.mkdtemp()
cls.db = lmdb.open(os.path.join(cls._data_dir, 'db'))
for i in xrange(2):
if cls.SAME_SHAPE:
width = 10
else:
width = 10+i
datum = cls.create_datum(10,width,3)
with cls.db.begin(write=True) as txn:
txn.put(str(i), datum.SerializeToString())
@classmethod
def tearDownClass(cls):
cls.db.close()
shutil.rmtree(cls._data_dir)
@staticmethod
def create_datum(*shape):
"""
Creates a datum with an image of the given shape
"""
image = np.ones(shape, dtype='uint8')
return caffe.io.array_to_datum(image)
def test_defaults(self):
assert analyze_db.analyze_db(self.db.path()) == self.PASS_DEFAULTS
def test_force_shape(self):
assert analyze_db.analyze_db(self.db.path(), force_same_shape=True) == self.PASS_FORCE
class TestSameShape(BaseTestWithDB):
pass
class TestDifferentShape(BaseTestWithDB):
SAME_SHAPE = False
PASS_FORCE = False
| {
"content_hash": "9fb829201181651f0ac17a8bc07f2f88",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 94,
"avg_line_length": 23.984126984126984,
"alnum_prop": 0.6254136333553938,
"repo_name": "TimZaman/DIGITS",
"id": "ddf7356fa83a6c5e6a43f26cbd12bb0fe4e403f8",
"size": "1581",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "digits/tools/test_analyze_db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4032"
},
{
"name": "HTML",
"bytes": "285736"
},
{
"name": "JavaScript",
"bytes": "45826"
},
{
"name": "Lua",
"bytes": "110640"
},
{
"name": "Makefile",
"bytes": "87"
},
{
"name": "Protocol Buffer",
"bytes": "384"
},
{
"name": "Python",
"bytes": "933415"
},
{
"name": "Shell",
"bytes": "12431"
}
],
"symlink_target": ""
} |
import numpy as np
import visvis as vv
from visvis.pypoints import Pointset, is_Point, is_Pointset
from visvis import PolarLine
def makeArray(data):
if isinstance(data, np.ndarray):
return data
else:
# create numpy array
try:
l = len(data)
a = np.empty((l, 1))
for i in range(len(data)):
a[i] = data[i]
return a
except TypeError:
raise Exception("Cannot plot %s" % data.__class__.__name__)
def _SetLimitsAfterDraw(event):
""" To be able to set the limits after the first draw. """
# Set limits
fig = event.owner
for axis in fig.FindObjects(vv.axises.PolarAxis2D):
limits = axis.GetLimits()
axis.SetLimits(rangeTheta=limits[0], rangeR=limits[1])
# Unsubscribe and redraw
fig.eventAfterDraw.Unbind(_SetLimitsAfterDraw)
fig.Draw()
def polarplot(data1, data2=None, inRadians=False,
lw=1, lc='b', ls="-", mw=7, mc='b', ms='', mew=1, mec='k',
alpha=1, axesAdjust=True, axes=None, **kwargs):
""" polarplot(*args, inRadians=False,
lw=1, lc='b', ls="-", mw=7, mc='b', ms='', mew=1, mec='k',
alpha=1, axesAdjust=True, axes=None):
Plot 2D polar data, using a polar axis to draw a polar grid.
Usage
-----
* plot(Y, ...) plots a 1D polar signal.
* plot(X, Y, ...) also supplies angular coordinates
* plot(P, ...) plots using a Point or Pointset instance
Keyword arguments
-----------------
(The longer names for the line properties can also be used)
lw : scalar
lineWidth. The width of the line. If zero, no line is drawn.
mw : scalar
markerWidth. The width of the marker. If zero, no marker is drawn.
mew : scalar
markerEdgeWidth. The width of the edge of the marker.
lc : 3-element tuple or char
lineColor. The color of the line. A tuple should represent the RGB
values between 0 and 1. If a char is given it must be
one of 'rgbmcywk', for reg, green, blue, magenta, cyan, yellow,
white, black, respectively.
mc : 3-element tuple or char
markerColor. The color of the marker. See lineColor.
mec : 3-element tuple or char
markerEdgeColor. The color of the edge of the marker.
ls : string
lineStyle. The style of the line. (See below)
ms : string
markerStyle. The style of the marker. (See below)
axesAdjust : bool
If axesAdjust==True, this function will call axes.SetLimits(), and set
the camera type to 2D.
axes : Axes instance
Display the image in this axes, or the current axes if not given.
Line styles
-----------
* Solid line: '-'
* Dotted line: ':'
* Dashed line: '--'
* Dash-dot line: '-.' or '.-'
* A line that is drawn between each pair of points: '+'
* No line: '' or None.
Marker styles
-------------
* Plus: '+'
* Cross: 'x'
* Square: 's'
* Diamond: 'd'
* Triangle (pointing up, down, left, right): '^', 'v', '<', '>'
* Pentagram star: 'p' or '*'
* Hexgram: 'h'
* Point/cirle: 'o' or '.'
* No marker: '' or None
Polar axis
----------
This polar axis has a few specialized methods for adjusting the polar
plot. Access these via vv.gca().axis.
* SetLimits(thetaRange, radialRange)
* thetaRange, radialRange = GetLimits()
* angularRefPos: Get and Set methods for the relative screen
angle of the 0 degree polar reference. Default is 0 degs
which corresponds to the positive x-axis (y =0)
* isCW: Get and Set methods for the sense of rotation CCW or
CW. This method takes/returns a bool (True if the default CW).
Interaction
-----------
* Drag mouse up/down to translate radial axis.
* Drag mouse left/right to rotate angular ref position.
* Drag mouse + shift key up/down to rescale radial axis (min R fixed).
"""
# create a dict from the properties and combine with kwargs
tmp = {'lineWidth': lw, 'lineColor': lc, 'lineStyle': ls,
'markerWidth': mw, 'markerColor': mc, 'markerStyle': ms,
'markerEdgeWidth': mew, 'markerEdgeColor': mec}
for i in tmp:
if not i in kwargs:
kwargs[i] = tmp[i]
## create the data
if is_Pointset(data1):
pp = data1
elif is_Point(data1):
pp = Pointset(data1.ndim)
pp.append(data1)
else:
if data1 is None:
raise ValueError("The first argument cannot be None!")
data1 = makeArray(data1)
if data2 is None:
# R data is given, thetadata must be
# a range starting from 0 degrees
data2 = data1
data1 = np.arange(0, data2.shape[0])
else:
data2 = makeArray(data2)
# check dimensions
L = data1.size
if L != data2.size:
raise ValueError("Array dimensions do not match! %i vs %i " %
(data1.size, data2.size))
# build points
data1 = data1.reshape((data1.size, 1))
data2 = data2.reshape((data2.size, 1))
if not inRadians:
data1 = np.pi * data1 / 180.0
## create the line
if axes is None:
axes = vv.gca()
axes.axisType = 'polar'
fig = axes.GetFigure()
l = PolarLine(axes, data1, data2)
l.lw = kwargs['lineWidth']
l.lc = kwargs['lineColor']
l.ls = kwargs['lineStyle']
l.mw = kwargs['markerWidth']
l.mc = kwargs['markerColor']
l.ms = kwargs['markerStyle']
l.mew = kwargs['markerEdgeWidth']
l.mec = kwargs['markerEdgeColor']
l.alpha = alpha
## almost done...
# Init axis
# axes.axis.SetLimits()
if axesAdjust:
if axes.daspectAuto is None:
axes.daspectAuto = True
axes.cameraType = '2d'
axes.SetLimits()
# Subsribe after-draw event handler
# (unsubscribe first in case we do multiple plots)
fig.eventAfterDraw.Unbind(_SetLimitsAfterDraw)
fig.eventAfterDraw.Bind(_SetLimitsAfterDraw)
# Return
axes.Draw()
return l
if __name__ == '__main__':
# Make data
import numpy as np
angs = 0.1 + np.linspace(-90, 90, 181) # 0.1+ get rid of singularity
angsRads = np.pi * angs / 180.0
mag = 10 * np.log10(np.abs(np.sin(10 * angsRads) / angsRads)) + angsRads
mag = mag - np.max(mag)
# Show data
vv.polarplot( angs, mag, lc='b')
vv.polarplot(angs+20, mag, lc='r', lw=2)
axes = vv.gca()
| {
"content_hash": "94d21e933c34c1582846f64ce068f990",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 78,
"avg_line_length": 32.042857142857144,
"alnum_prop": 0.5706642888987963,
"repo_name": "chiluf/visvis.dev",
"id": "417c7b23d64ea7e584865ea9fd24cc236e76ac7a",
"size": "6979",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "functions/polarplot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1168090"
}
],
"symlink_target": ""
} |
"""WebHelpers used in tgapp-stroller2."""
#from webhelpers import date, feedgenerator, html, number, misc, text
from markupsafe import Markup
from tgext.pluggable import plug_url
def bold(text):
return Markup('<strong>%s</strong>' % text)
def stroller2_product_url(product):
return plug_url('stroller2', '/product/%s' % product.slug)
def stroller2_product_share_url(product):
return plug_url('stroller2', '/product/share/%s' % product.slug) | {
"content_hash": "c08a367528f303d59778f8ab624eab7b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 28.625,
"alnum_prop": 0.7248908296943232,
"repo_name": "gasbasd/tgapp-stroller2",
"id": "7424b077f1bdb42496f96baca2ea9f9159c1ab9b",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stroller2/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3359"
},
{
"name": "Python",
"bytes": "74545"
}
],
"symlink_target": ""
} |
conf_neutron_conf = """[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = True
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = ml2
# Example: core_plugin = ml2
# (StrOpt) Neutron IPAM (IP address management) driver to be loaded from the
# neutron.ipam_drivers namespace. See setup.cfg for the entry point names.
# If ipam_driver is not set (default behavior), no ipam driver is used.
# Example: ipam_driver =
# In order to use the reference implementation of neutron ipam driver, use
# 'internal'.
# Example: ipam_driver = internal
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering,qos
# Paste configuration file
# api_paste_config = api-paste.ini
# (StrOpt) Hostname to be used by the neutron server, agents and services
# running on this machine. All the agents and services running on this machine
# must use the same host value.
# The default value is hostname of the machine.
#
# host =
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Domain to use for building the hostnames
# dns_domain = openstacklocal
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
# force_gateway_on_subnet = True
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# Default Subnet Pool to be used for IPv4 subnet-allocation.
# Specifies by UUID the pool to be used in case of subnet-create being called
# without a subnet-pool ID. The default of None means that no pool will be
# used unless passed explicitly to subnet create. If no pool is used, then a
# CIDR must be passed to create a subnet and that subnet will not be allocated
# from any pool; it will be considered part of the tenant's private address
# space.
# default_ipv4_subnet_pool =
# Default Subnet Pool to be used for IPv6 subnet-allocation.
# Specifies by UUID the pool to be used in case of subnet-create being
# called without a subnet-pool ID. Set to "prefix_delegation"
# to enable IPv6 Prefix Delegation in a PD-capable environment.
# See the description for default_ipv4_subnet_pool for more information.
# default_ipv6_subnet_pool =
# =========== items for MTU selection and advertisement =============
# Advertise MTU. If True, effort is made to advertise MTU
# settings to VMs via network methods (ie. DHCP and RA MTU options)
# when the network's preferred MTU is known.
# advertise_mtu = False
# ======== end of items for MTU selection and advertisement =========
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# Agent starts with admin_state_up=False when enable_new_agents=False.
# In the case, user's resources will not be scheduled automatically to the
# agent until admin changes admin_state_up to True.
# enable_new_agents = True
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# (StrOpt) Representing the resource type whose load is being reported by
# the agent.
# This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
# the server will extract particular load sent as part of its agent configuration object
# from the agent report state, which is the number of resources being consumed, at
# every report_interval.
# dhcp_load_type can be used in combination with network_scheduler_driver =
# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
# be configured to represent the choice for the resource being balanced.
# Example: dhcp_load_type = networks
# Values:
# networks - number of networks hosted on the agent
# subnets - number of subnets associated with the networks hosted on the agent
# ports - number of ports associated with the networks hosted on the agent
# dhcp_load_type = networks
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Allow automatic rescheduling of routers from dead L3 agents with
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
# Allow automatic removal of networks from dead DHCP agents with
# admin_state_up set to True.
# Networks could then be rescheduled if network_auto_schedule is True
# allow_automatic_dhcp_failover = True
# Number of DHCP agents scheduled to host a tenant network.
# If this number is greater than 1, the scheduler automatically
# assigns multiple DHCP agents for a given tenant network,
# providing high availability for DHCP service.
# dhcp_agents_per_network = 1
# Enable services on agents with admin_state_up False.
# If this option is False, when admin_state_up of an agent is turned to
# False, services on it will be disabled. If this option is True, services
# on agents with admin_state_up False keep available and manual scheduling
# to such agents is available. Agents with admin_state_up False are not
# selected for automatic scheduling regardless of this option.
# enable_services_on_agents_with_admin_state_down = False
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
# Enable high availability for virtual routers.
# l3_ha = False
#
# Maximum number of l3 agents which a HA router will be scheduled on. If it
# is set to 0 the router will be scheduled on every agent.
# max_l3_agents_per_router = 3
#
# Minimum number of l3 agents which a HA router will be scheduled on. The
# default value is 2.
# min_l3_agents_per_router = 2
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
#
# Enable snat by default on external gateway when available
# enable_snat_by_default = True
#
# The network type to use when creating the HA network for an HA router.
# By default or if empty, the first 'tenant_network_types'
# is used. This is helpful when the VRRP traffic should use a specific
# network which not the default one.
# ha_network_type =
# Example: ha_network_type = flat
#
# The physical network name with which the HA network can be created.
# ha_network_physical_name =
# Example: ha_network_physical_name = physnet1
# =========== end of items for l3 extension =======
# =========== items for metadata proxy configuration ==============
# User (uid or name) running metadata proxy after its initialization
# (if empty: agent effective user)
# metadata_proxy_user =
# Group (gid or name) running metadata proxy after its initialization
# (if empty: agent effective group)
# metadata_proxy_group =
# Enable/Disable log watch by metadata proxy, it should be disabled when
# metadata_proxy_user/group is not allowed to read/write its log file and
# 'copytruncate' logrotate option must be used if logrotate is enabled on
# metadata proxy log files. Option default value is deduced from
# metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
# effective user id/name.
# metadata_proxy_watch_log =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# =========== end of items for metadata proxy configuration ==============
# ========== items for VLAN trunking networks ==========
# Setting this flag to True will allow plugins that support it to
# create VLAN transparent networks. This flag has no effect for
# plugins that do not support VLAN transparent networks.
# vlan_transparent = False
# ========== end of items for VLAN trunking networks ==========
# =========== WSGI parameters related to the API server ==============
# Number of separate API worker processes to spawn. If not specified or < 1,
# the default value is equal to the number of CPUs available.
# api_workers = <number of CPUs>
# Number of separate RPC worker processes to spawn. If not specified or < 1,
# a single RPC worker process is spawned by the parent process.
# rpc_workers = 1
# Timeout for client connections socket operations. If an
# incoming connection is idle for this number of seconds it
# will be closed. A value of '0' means wait forever. (integer
# value)
# client_socket_timeout = 900
# wsgi keepalive option. Determines if connections are allowed to be held open
# by clients after a request is fulfilled. A value of False will ensure that
# the socket connection will be explicitly closed once a response has been
# sent to the client.
# wsgi_keep_alive = True
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# The name of the admin nova tenant. If the uuid of the admin nova tenant
# is set, this is optional. Useful for cases where the uuid of the admin
# nova tenant is not available when configuration is being done.
# nova_admin_tenant_name =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
#
# Options defined in oslo.messaging
#
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
# amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
# rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
# qpid_hostname=localhost
# Qpid broker port. (integer value)
# qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
# qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# qpid_username=
# Password for Qpid connection. (string value)
# qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
# qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
# qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
# qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
# backwards-incompatible changes that allow broker federation
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
# qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
# kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
# kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
# kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
# kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
# kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
# rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
# rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
# rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
# rabbit_userid=guest
# The RabbitMQ password. (string value)
# rabbit_password=guest
# the RabbitMQ login method (string value)
# rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
# rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
# rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
# rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
# rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
# rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
# fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
# rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
# rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
# rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
# rpc_zmq_topic_backlog=
# Directory for holding IPC sockets. (string value)
# rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
# rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
# rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
# matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
# matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
# rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
# notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
# rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
# transport_url=
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
# control_exchange=openstack
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
# host=127.0.0.1
# Use this port to connect to redis host. (integer value)
# port=6379
# Password for Redis server (optional). (string value)
# password=
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
# ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota.driver.DbQuotaDriver
# Resource name(s) that are supported in quota features
# This option is deprecated for removal in the M release, please refrain from using it
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of loadbalancers allowed per tenant. A negative value means unlimited.
# quota_loadbalancer = 10
# Number of listeners allowed per tenant. A negative value means unlimited.
# quota_listener = -1
# Number of v2 health monitors allowed per tenant. A negative value means
# unlimited. These health monitors exist under the lbaas v2 API
# quota_healthmonitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
# Default number of RBAC entries allowed per tenant. A negative value means
# unlimited.
# quota_rbac_policy = 10
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the command directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# Set to true to add comments to generated iptables rules that describe
# each rule's purpose. (System must support the iptables comments module.)
# comment_iptables_rules = True
# Root helper daemon application to use when possible.
# root_helper_daemon =
# Use the root helper when listing the namespaces on a system. This may not
# be required depending on the security configuration. If the root helper is
# not required, set this to False for a performance improvement.
# use_helper_for_ns_read = True
# The interval to check external processes for failure in seconds (0=disabled)
# check_child_processes_interval = 60
# Action to take when an external process spawned by an agent dies
# Values:
# respawn - Respawns the external process
# exit - Exits the agent
# check_child_processes_action = respawn
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[cors]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
[cors.subdomain]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
[keystone_authtoken]
auth_uri = {{ auth_uri }}
auth_url = {{ auth_url }}
memcached_servers = {{ memcached_servers }}
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = {{ neutron_pass }}
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql+pymysql://root:[email protected]:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
#connection = sqlite:////var/lib/neutron/neutron.sqlite
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[nova]
# Name of the plugin to load
# auth_plugin =
# Config Section from which to load plugin specific options
# auth_section =
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# cafile =
# PEM encoded client certificate cert file
# certfile =
# Verify HTTPS connections.
# insecure = False
# PEM encoded client certificate key file
# keyfile =
# Name of nova region to use. Useful if keystone manages more than one region.
# region_name =
# Timeout value for http requests
# timeout =
[oslo_concurrency]
# Directory to use for lock files. For security, the specified directory should
# only be writable by the user running the processes that need locking.
# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
# a lock path must be set.
lock_path = $state_path/lock
# Enables or disables inter-process locks.
# disable_process_locking = False
[oslo_policy]
# The JSON file that defines policies.
# policy_file = policy.json
# Default rule. Enforced when a requested rule is not found.
# policy_default_rule = default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path defined by the
# config_dir option, or absolute paths. The file defined by policy_file
# must exist for these directories to be searched. Missing or empty
# directories are ignored.
# policy_dirs = policy.d
[oslo_messaging_amqp]
#
# From oslo.messaging
#
# Address prefix used when sending to a specific server (string value)
# Deprecated group/name - [amqp1]/server_request_prefix
# server_request_prefix = exclusive
# Address prefix used when broadcasting to all servers (string value)
# Deprecated group/name - [amqp1]/broadcast_prefix
# broadcast_prefix = broadcast
# Address prefix when sending to any server in group (string value)
# Deprecated group/name - [amqp1]/group_request_prefix
# group_request_prefix = unicast
# Name for the AMQP container (string value)
# Deprecated group/name - [amqp1]/container_name
# container_name =
# Timeout for inactive connections (in seconds) (integer value)
# Deprecated group/name - [amqp1]/idle_timeout
# idle_timeout = 0
# Debug: dump AMQP frames to stdout (boolean value)
# Deprecated group/name - [amqp1]/trace
# trace = false
# CA certificate PEM file for verifing server certificate (string value)
# Deprecated group/name - [amqp1]/ssl_ca_file
# ssl_ca_file =
# Identifying certificate PEM file to present to clients (string value)
# Deprecated group/name - [amqp1]/ssl_cert_file
# ssl_cert_file =
# Private key PEM file used to sign cert_file certificate (string value)
# Deprecated group/name - [amqp1]/ssl_key_file
# ssl_key_file =
# Password for decrypting ssl_key_file (if encrypted) (string value)
# Deprecated group/name - [amqp1]/ssl_key_password
# ssl_key_password =
# Accept clients using either SSL or plain TCP (boolean value)
# Deprecated group/name - [amqp1]/allow_insecure_clients
# allow_insecure_clients = false
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Drivers(s) to handle sending notifications. Possible values are
# messaging, messagingv2, routing, log, test, noop (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for notifications. If not set,
# we fall back to the same configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
[oslo_messaging_qpid]
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
# amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
# rpc_conn_pool_size = 30
# Qpid broker hostname. (string value)
# Deprecated group/name - [DEFAULT]/qpid_hostname
# qpid_hostname = localhost
# Qpid broker port. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_port
# qpid_port = 5672
# Qpid HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/qpid_hosts
# qpid_hosts = $qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_username
# qpid_username =
# Password for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_password
# qpid_password =
# Space separated list of SASL mechanisms to use for auth. (string value)
# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
# qpid_sasl_mechanisms =
# Seconds between connection keepalive heartbeats. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_heartbeat
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# Deprecated group/name - [DEFAULT]/qpid_protocol
# qpid_protocol = tcp
# Whether to disable the Nagle algorithm. (boolean value)
# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
# qpid_tcp_nodelay = true
# The number of prefetched messages held by receiver. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
# qpid_receiver_capacity = 1
# The qpid topology version to use. Version 1 is what was originally used by
# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
# broker federation to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_topology_version
# qpid_topology_version = 1
[oslo_messaging_rabbit]
rabbit_hosts = {{ rabbit_hosts }}
rabbit_userid = {{ rabbit_user }}
rabbit_password = {{ rabbit_password }}
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
# amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
# rpc_conn_pool_size = 30
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_version
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
# kombu_ssl_ca_certs =
# How long to wait before reconnecting in response to an AMQP consumer cancel
# notification. (floating point value)
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
# kombu_reconnect_delay = 1.0
# The RabbitMQ broker address where a single node is used. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_host
# rabbit_host = localhost
# The RabbitMQ broker port where a single node is used. (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_port
# rabbit_port = 5672
# RabbitMQ HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/rabbit_hosts
# rabbit_hosts = $rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
# rabbit_use_ssl = false
# The RabbitMQ userid. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_userid
# rabbit_userid = guest
# The RabbitMQ password. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_password
# rabbit_password = guest
# The RabbitMQ login method. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_login_method
# rabbit_login_method = AMQPLAIN
# The RabbitMQ virtual host. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
# rabbit_virtual_host = /
# How frequently to retry connecting with RabbitMQ. (integer value)
# rabbit_retry_interval = 1
# How long to backoff for between retries when connecting to RabbitMQ. (integer
# value)
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
# rabbit_retry_backoff = 2
# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
# count). (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
# rabbit_max_retries = 0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
# must wipe the RabbitMQ database. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
# rabbit_ha_queues = false
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
# Deprecated group/name - [DEFAULT]/fake_rabbit
# fake_rabbit = false
[qos]
# Drivers list to use to send the update notification
# notification_drivers = message_queue
[ssl]
#
# From oslo.service.sslutils
#
# CA certificate file to use to verify connecting clients. (string value)
# Deprecated group/name - [DEFAULT]/ssl_ca_file
#ca_file = <None>
# Certificate file to use when starting the server securely. (string value)
# Deprecated group/name - [DEFAULT]/ssl_cert_file
#cert_file = <None>
# Private key file to use when starting the server securely. (string value)
# Deprecated group/name - [DEFAULT]/ssl_key_file
#key_file = <None>
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
#version = <None>
# Sets the list of available ciphers. value should be a string in the OpenSSL
# cipher list format. (string value)
#ciphers = <None>
"""
| {
"content_hash": "7a0dca9bf87fe62686c4c01826aaf29a",
"timestamp": "",
"source": "github",
"line_count": 1156,
"max_line_length": 118,
"avg_line_length": 34.24134948096886,
"alnum_prop": 0.744132582169113,
"repo_name": "jiasir/playback",
"id": "89858f1a3c790c1b55823b30a5949f98ef9f87d1",
"size": "39583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "playback/templates/neutron_conf_for_agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "766232"
}
],
"symlink_target": ""
} |
import json
import random
f = open('databases/reviewsAll.json', 'r')
reviewers = json.loads(f.read())
print "Analyzing results"
#----------------BASIC STATS------------------------------
duplicates = {}
train_size = 22000
train_reviews = []
test_reviews = []
total_set = []
for reviewer in reviewers:
for review in reviewers[reviewer]:
total_set.append(review)
random.shuffle(total_set)
c = train_size
for review in total_set:
if(c > 0):
train_reviews.append(review)
c-=1
else:
test_reviews.append(review)
print str(len(test_reviews) + len(train_reviews)) + " reviews found."
for reviewer in reviewers:
if(len(reviewers[reviewer]) > 4):
duplicates[reviewer] = reviewers[reviewer]
print str(len(duplicates)) + " reviewers with more than 4 reviews"
print "\n"
#---------------------AVERAGES---------------------------
print "Let's start by just checking to see review averages."
total = 0
num = 0
for reviewer in reviewers:
for review in reviewers[reviewer]:
total += int(review['score'])
num += 1
mean = float(total)/num
print "Mean: " + str(mean)
#print "Median: " + str()
varience = 0
mode = {}
modePercentage = []
import math
for reviewer in reviewers:
for review in reviewers[reviewer]:
if(review['score'] not in mode): mode[review['score']] = 0
mode[review['score']] += 1
varience += math.pow(float(review['score']) - mean, 2)
dev = math.sqrt(varience/total)
print "Standard Deviation: " + str(dev)
for score in range(0,11):
print(str(score) + " stars: " + str(mode[str(score)]) + ", " + str(int(float(mode[str(score)])/num*100)))
#Actually important.
for score in range(0,11):
modePercentage.append(float(mode[str(score)])/num)
print '\n'
from Model import Model
test_model = Model()
test_model.UpdateBaselines(train_reviews)
print "Mean: " + str(test_model.mean)
print('training model')
from nltk.corpus import stopwords
test_model.exclude = stopwords.words('english')
test_model.Train(train_reviews)
print('done, running evaluations.\n')
from Evaluation import evaluate_rigorous_dist
evaluate_rigorous_dist(test_reviews, test_model.wordReviewNet, modePercentage, test_model)
#test_model.Guess(test_reviews[0]["review"], test_reviews[0]["score"], True)
### Generate confusion matrix of scores
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
score_true = []
score_guess = []
for i, review in enumerate(test_reviews):
score_true.append(int(review['score']))
score_guess.append(test_model.GuessScore(review['review']))
cm = confusion_matrix(score_true, score_guess)
plt.matshow(cm)
plt.colorbar()
plt.xlabel('Predicted Score')
plt.ylabel('True Score')
### Precision Recall F1
from sklearn.metrics import precision_recall_fscore_support
prfs = precision_recall_fscore_support(score_true, score_guess)
| {
"content_hash": "0b264b000f2b1cd9f6349698adba5144",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 106,
"avg_line_length": 22.48,
"alnum_prop": 0.6914590747330961,
"repo_name": "danShumway/Game-Review-Analyzation",
"id": "19788e8318eb6d2f0f3099ae1295f3c1ec3d7cac",
"size": "2810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AnalyzeResults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15844"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import time
import logging
import threading
import logging.config
import numpy as np
from datetime import datetime
from collections import defaultdict
from .io import discover_hosts, io_from_host, Ws
from .modules import name2mod
from anytree import AnyNode, RenderTree
def run_from_unittest():
return 'unittest' in sys.modules
known_host = {
'ergo': ['/dev/cu.usbserial-DN2AAOVK', '/dev/cu.usbserial-DN2YEFLN'],
'handy': ['/dev/cu.usbserial-DN2X236E'],
'eddy': ['pi-gate.local'],
}
class moduList(list):
def __repr__(self):
s = '-------------------------------------------------\n'
s += '{:<20s}{:<20s}{:<5s}\n'.format("Type", "Alias", "ID")
s += '-------------------------------------------------\n'
for elem in self:
s += '{:<20s}{:<20s}{:<5d}\n'.format(elem.type, elem.alias, elem.id)
return s
class nodeList(list):
def __repr__(self):
# Display the topology
s = ''
for pre, fill, node in RenderTree(self[0]):
if (node.parent == None):
branch = " root : "
else:
l_port_id = [i for i,x in enumerate(node.parent.port_table) if x == node.modules[0].id]
r_port_id = node.port_table.index(min(node.port_table))
branch = str(l_port_id[0]) + "<=>" + str(r_port_id) + " : "
s += "%s%s%s\n" % (pre, branch, node.id)
s += fill + " | " + '{:<20s}{:<20s}{:<5s}\n'.format("Type", "Alias", "ID")
for y,elem in enumerate(node.modules):
s += fill + " └> " + '{:<20s}{:<20s}{:<5d}\n'.format(elem.type, elem.alias, elem.id)
return s
class Device(object):
_heartbeat_timeout = 5 # in sec.
_max_alias_length = 15
_base_log_conf = os.path.join(os.path.dirname(__file__),
'logging_conf.json')
@classmethod
def discover(cls):
hosts = discover_hosts()
possibilities = {
k: [h for h in v if h in hosts]
for k, v in known_host.items()
}
return possibilities
def __init__(self, host,
IO=None,
log_conf=_base_log_conf,
test_mode=False,
*args, **kwargs):
if IO is not None:
self._io = IO(host=host, *args, **kwargs)
else:
self._io = io_from_host(host=host,
*args, **kwargs)
if os.path.exists(log_conf):
with open(log_conf) as f:
config = json.load(f)
logging.config.dictConfig(config)
self.logger = logging.getLogger(__name__)
self.logger.info('Connected to "{}".'.format(host))
self._send_lock = threading.Lock()
self._cmd_lock = threading.Lock()
# We force a first poll to setup our model.
self._setup()
self.logger.info('Device setup.')
self._last_update = time.time()
self._running = True
self._pause = False
# Setup both poll/push synchronization loops.
self._poll_bg = threading.Thread(target=self._poll_and_up)
self._poll_bg.daemon = True
self._poll_bg.start()
self._baudrate = 1000000
def close(self):
self._running = False
self._poll_bg.join()
self._io.close()
@property
def baudrate(self):
return self._baudrate
@baudrate.setter
def baudrate(self, baudrate):
self._send({'baudrate': baudrate})
self._baudrate = baudrate
time.sleep(0.01)
def benchmark(self, target_id, data, repetition):
data = np.array(data, dtype=np.uint8)
self._bench_settings = {'benchmark': {'target': target_id, 'repetitions': repetition, 'data': [len(data)]}}
self._bench_Data = data.tobytes()
self._write( json.dumps(self._bench_settings).encode() + '\r'.encode() + self._bench_Data)
state = self._poll_once()
while ('benchmark' not in state):
state = self._poll_once()
#self._pause = False
return (state['benchmark']['data_rate'], state['benchmark']['fail_rate'])
def pause(self):
self._pause = True
time.sleep(1)
def play(self):
self._pause = False
def _setup(self):
self.logger.info('Sending detection signal.')
self._send({'detection': {}})
self.logger.info('Waiting for route table...')
startTime = time.time()
state = self._poll_once()
while ('route_table' not in state):
state = self._poll_once()
if (time.time()-startTime > 1):
self._send({'detection': {}})
startTime = time.time()
# Create nodes
self._modules = []
self._nodes = []
for i, node in enumerate(state['route_table']):
parent_elem = None
# find a parent and create a link
if (min(node["port_table"]) < node["modules"][0]["id"]):
parent_id = min(node["port_table"])
for elem in self._nodes:
for module in elem.modules:
if (module.id == parent_id):
parent_elem = elem
break;
# create the node
self._nodes.append(AnyNode(id=node["uuid"], parent=parent_elem, port_table=node["port_table"]))
filtered_modules = moduList([mod for mod in node["modules"]
if 'type' in mod and mod['type'] in name2mod.keys()])
# Create a list of modules in the node
self._nodes[i].modules = [
name2mod[mod['type']](id=mod['id'],
alias=mod['alias'],
device=self)
for mod in filtered_modules
if 'type' in mod and 'id' in mod and 'alias' in mod
]
# Create a list of modules of the entire device
self._modules = self._modules + self._nodes[i].modules
for mod in self._nodes[i].modules:
setattr(self, mod.alias, mod)
self._cmd = defaultdict(lambda: defaultdict(lambda: None))
self._cmd_data = []
self._binary = []
# We push our current state to make sure that
# both our model and the hardware are synced.
self._push_once()
@property
def modules(self):
return moduList(self._modules)
@property
def nodes(self):
return nodeList(self._nodes)
# Poll state from hardware.
def _poll_once(self):
self._state = self._io.read()
self._state['timestamp'] = time.time()
return self._state
def _poll_and_up(self):
while self._running:
if not self._pause :
state = self._poll_once()
self._update(state)
self._push_once()
else :
time.sleep(0.1)
# Update our model with the new state.
def _update(self, new_state):
if 'dead_module' in new_state :
#we have lost a module put a flag on this module
alias = new_state['dead_module']
if hasattr(self, alias):
getattr(self, alias)._kill()
if 'modules' not in new_state:
return
for alias, mod in new_state['modules'].items():
if hasattr(self, alias):
getattr(self, alias)._update(mod)
self._last_update = time.time()
def update_cmd(self, alias, key, val):
with self._cmd_lock:
self._cmd[alias][key] = val
def update_data(self, alias, key, val, data):
with self._cmd_lock:
self._cmd_data.append({alias: {key: val}})
self._binary.append(data.tobytes())
def _push_once(self):
with self._cmd_lock:
if self._cmd:
self._write( json.dumps({'modules': self._cmd}).encode())
self._cmd = defaultdict(lambda: defaultdict(lambda: None))
for cmd, binary in zip(self._cmd_data, self._binary):
time.sleep(0.01)
self._write( json.dumps({'modules': cmd}).encode() + '\r'.encode() + binary)
self._cmd_data = []
self._binary = []
def _send(self, msg):
with self._send_lock:
self._io.send(msg)
def _write(self, data):
with self._send_lock:
self._io.write(data)
| {
"content_hash": "828d9acd25ea338c0c91ea1c96e790c5",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 115,
"avg_line_length": 32.665399239543724,
"alnum_prop": 0.5133279012920499,
"repo_name": "pollen/pyrobus",
"id": "25bef08bd19409f19a96c78f80ece9e16b4109c5",
"size": "8593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyluos/device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40451"
}
],
"symlink_target": ""
} |
import os
from converter.avcodecs import video_codec_list, audio_codec_list, subtitle_codec_list
from converter.formats import format_list
from converter.ffmpeg import FFMpeg, FFMpegError, FFMpegConvertError
class ConverterError(Exception):
pass
class Converter(object):
"""
Converter class, encapsulates formats and codecs.
>>> c = Converter()
"""
def __init__(self, ffmpeg_path=None, ffprobe_path=None):
"""
Initialize a new Converter object.
"""
self.ffmpeg = FFMpeg(ffmpeg_path=ffmpeg_path,
ffprobe_path=ffprobe_path)
self.video_codecs = {}
self.audio_codecs = {}
self.subtitle_codecs = {}
self.formats = {}
for cls in audio_codec_list:
name = cls.codec_name
self.audio_codecs[name] = cls
for cls in video_codec_list:
name = cls.codec_name
self.video_codecs[name] = cls
for cls in subtitle_codec_list:
name = cls.codec_name
self.subtitle_codecs[name] = cls
for cls in format_list:
name = cls.format_name
self.formats[name] = cls
def parse_options(self, opt, twopass=None):
"""
Parse format/codec options and prepare raw ffmpeg option list.
"""
format_options = None
audio_options = []
video_options = []
subtitle_options = []
if not isinstance(opt, dict):
raise ConverterError('Invalid output specification')
if 'format' not in opt:
raise ConverterError('Format not specified')
f = opt['format']
if f not in self.formats:
raise ConverterError('Requested unknown format: ' + str(f))
format_options = self.formats[f]().parse_options(opt)
if format_options is None:
raise ConverterError('Unknown container format error')
if 'audio' not in opt and 'video' not in opt and 'subtitle' not in opt:
raise ConverterError('Neither audio nor video nor subtitle streams requested')
if 'audio' not in opt:
opt['audio'] = {'codec': None}
if 'subtitle' not in opt:
opt['subtitle'] = {'codec': None}
# Audio
y = opt['audio']
# Creates the new nested dictionary to preserve backwards compatability
try:
first = list(y.values())[0]
if not isinstance(first, dict):
y = {0: y}
except IndexError:
pass
for n in y:
x = y[n]
if not isinstance(x, dict) or 'codec' not in x:
raise ConverterError('Invalid audio codec specification')
if 'path' in x and 'source' not in x:
raise ConverterError('Cannot specify audio path without FFMPEG source number')
if 'source' in x and 'path' not in x:
raise ConverterError('Cannot specify alternate input source without a path')
c = x['codec']
if c not in self.audio_codecs:
raise ConverterError('Requested unknown audio codec ' + str(c))
audio_options.extend(self.audio_codecs[c]().parse_options(x, n))
if audio_options is None:
raise ConverterError('Unknown audio codec error')
# Subtitle
y = opt['subtitle']
# Creates the new nested dictionary to preserve backwards compatability
try:
first = list(y.values())[0]
if not isinstance(first, dict):
y = {0: y}
except IndexError:
pass
for n in y:
x = y[n]
if not isinstance(x, dict) or 'codec' not in x:
raise ConverterError('Invalid subtitle codec specification')
if 'path' in x and 'source' not in x:
raise ConverterError('Cannot specify subtitle path without FFMPEG source number')
if 'source' in x and 'path' not in x:
raise ConverterError('Cannot specify alternate input source without a path')
c = x['codec']
if c not in self.subtitle_codecs:
raise ConverterError('Requested unknown subtitle codec ' + str(c))
subtitle_options.extend(self.subtitle_codecs[c]().parse_options(x, n))
if subtitle_options is None:
raise ConverterError('Unknown subtitle codec error')
if 'video' in opt:
x = opt['video']
if not isinstance(x, dict) or 'codec' not in x:
raise ConverterError('Invalid video codec specification')
c = x['codec']
if c not in self.video_codecs:
raise ConverterError('Requested unknown video codec ' + str(c))
video_options = self.video_codecs[c]().parse_options(x)
if video_options is None:
raise ConverterError('Unknown video codec error')
# aggregate all options
optlist = video_options + audio_options + subtitle_options + format_options
if twopass == 1:
optlist.extend(['-pass', '1'])
elif twopass == 2:
optlist.extend(['-pass', '2'])
return optlist
def convert(self, infile, outfile, options, twopass=False, timeout=10, preopts=None, postopts=None):
"""
Convert media file (infile) according to specified options, and
save it to outfile. For two-pass encoding, specify the pass (1 or 2)
in the twopass parameter.
Options should be passed as a dictionary. The keys are:
* format (mandatory, string) - container format; see
formats.BaseFormat for list of supported formats
* audio (optional, dict) - audio codec and options; see
avcodecs.AudioCodec for list of supported options
* video (optional, dict) - video codec and options; see
avcodecs.VideoCodec for list of supported options
* map (optional, int) - can be used to map all content of stream 0
Multiple audio/video streams are not supported. The output has to
have at least an audio or a video stream (or both).
Convert returns a generator that needs to be iterated to drive the
conversion process. The generator will periodically yield timecode
of currently processed part of the file (ie. at which second in the
content is the conversion process currently).
The optional timeout argument specifies how long should the operation
be blocked in case ffmpeg gets stuck and doesn't report back. This
doesn't limit the total conversion time, just the amount of time
Converter will wait for each update from ffmpeg. As it's usually
less than a second, the default of 10 is a reasonable default. To
disable the timeout, set it to None. You may need to do this if
using Converter in a threading environment, since the way the
timeout is handled (using signals) has special restriction when
using threads.
>>> conv = Converter().convert('test1.ogg', '/tmp/output.mkv', {
... 'format': 'mkv',
... 'audio': { 'codec': 'aac' },
... 'video': { 'codec': 'h264' }
... })
>>> for timecode in conv:
... pass # can be used to inform the user about the progress
"""
if not isinstance(options, dict):
raise ConverterError('Invalid options')
if not os.path.exists(infile):
raise ConverterError("Source file doesn't exist: " + infile)
info = self.ffmpeg.probe(infile)
if info is None:
raise ConverterError("Can't get information about source file")
if not info.video and not info.audio:
raise ConverterError('Source file has no audio or video streams')
if info.video and 'video' in options:
options = options.copy()
v = options['video'] = options['video'].copy()
v['src_width'] = info.video.video_width
v['src_height'] = info.video.video_height
if info.format.duration < 0.01:
raise ConverterError('Zero-length media')
if twopass:
optlist1 = self.parse_options(options, 1)
for timecode in self.ffmpeg.convert(infile, outfile, optlist1,
timeout=timeout, preopts=preopts, postopts=postopts):
yield int((50.0 * timecode) / info.format.duration)
optlist2 = self.parse_options(options, 2)
for timecode in self.ffmpeg.convert(infile, outfile, optlist2,
timeout=timeout, preopts=preopts, postopts=postopts):
yield int(50.0 + (50.0 * timecode) / info.format.duration)
else:
optlist = self.parse_options(options, twopass)
for timecode in self.ffmpeg.convert(infile, outfile, optlist,
timeout=timeout, preopts=preopts, postopts=postopts):
yield int((100.0 * timecode) / info.format.duration)
def probe(self, fname, posters_as_video=True):
"""
Examine the media file. See the documentation of
converter.FFMpeg.probe() for details.
:param posters_as_video: Take poster images (mainly for audio files) as
A video stream, defaults to True
"""
return self.ffmpeg.probe(fname, posters_as_video)
def thumbnail(self, fname, time, outfile, size=None, quality=FFMpeg.DEFAULT_JPEG_QUALITY):
"""
Create a thumbnail of the media file. See the documentation of
converter.FFMpeg.thumbnail() for details.
"""
return self.ffmpeg.thumbnail(fname, time, outfile, size, quality)
def thumbnails(self, fname, option_list):
"""
Create one or more thumbnail of the media file. See the documentation
of converter.FFMpeg.thumbnails() for details.
"""
return self.ffmpeg.thumbnails(fname, option_list)
| {
"content_hash": "abaff9ebd87669064fccb308f3121603",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 104,
"avg_line_length": 38.22932330827068,
"alnum_prop": 0.590520208476743,
"repo_name": "Filechaser/sickbeard_mp4_automator",
"id": "5036b350f6aed880dabe861d8c5dcb600b624a7b",
"size": "10188",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "converter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "639"
},
{
"name": "Python",
"bytes": "431318"
},
{
"name": "Shell",
"bytes": "4590"
}
],
"symlink_target": ""
} |
import json
import logging as log
import urllib, urllib2
from httplib import HTTPException
from cookielib import CookieJar
from bs4 import BeautifulSoup
from google.appengine.api import urlfetch
from pprint import pprint
class Binary():
def __init__(self, auto_login):
urlfetch.set_default_fetch_deadline(60)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(CookieJar()))
self.opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:36.0) Gecko/20100101 Firefox/36.0')]
self.url_login = 'https://www.binary.com/login?l=EN'
self.url_statement = 'https://www.binary.com/user/statement?l=EN'
self.url_profit_table = 'https://www.binary.com/d/profit_table.cgi?l=EN'
self.url_prices = 'https://www.binary.com/d/trade_price.cgi'
self.url_purchase = 'https://vr-deal01.binary.com/c/trade.cgi'
self.username = 'VRTC609286'
self.password = 'binary2com'
if auto_login:
self.login()
def login(self):
log.info('Binary logging in...')
formdata = {
'loginid': self.username,
'password': self.password,
}
data_encoded = urllib.urlencode(formdata)
for _ in range(5):
try:
response = self.opener.open(self.url_login, data_encoded)
log.info('Binary auth response {0}'.format(response.getcode()))
if '<span class="clientid">VRTC609286</span>' not in response.read():
raise Exception('Could not log into Binary.com')
break
except HTTPException as e:
log.warn('Could not log in...')
else:
raise e
log.info('Binary logged in')
def getStatement(self):
log.info('Binary: statement retrieving...')
response = self.opener.open(self.url_statement)
html = BeautifulSoup(response.read())
table = html.find('div', id='statement-table')
if not table:
raise Exception('No html table in statement')
statement = {}
for row in table.find_all('div', class_='table-body'):
divs = row.find_all('div', recursive=False)
# log.info('{0} divs in row'.format(len(divs)))
ref = divs[0].find_all('div')[-1].text
# log.info('{0}'.format(ref))
payout = divs[2].find('span').text
# log.info('payout div {0}'.format(divs[2]))
# log.info('Ref {0} payout {1}'.format(ref, payout))
statement[ref] = payout
log.info('Binary: statement retrieved {0}'.format(len(statement)))
return statement
def getProfitTable(self):
log.info('Binary: profit table retrieving...')
response = self.opener.open(self.url_profit_table)
html = BeautifulSoup(response.read())
table = html.find('div', id='profit-table')
if not table:
raise Exception('No html table found')
profit_table = {}
for row in table.find_all('div', class_='table-body'):
divs = row.find_all('div', recursive=False)
# log.info('{0} divs in row'.format(len(divs)))
ref = divs[0].find_all('div')[1].text
# log.info('{0}'.format(ref))
profit_loss = float(divs[2].find_all('div')[-1].text.replace(',', '').strip())
# log.info('payout div {0}'.format(divs[2]))
# log.info('Ref {0} profit/loss {1}'.format(ref, profit_loss))
profit_table[ref] = profit_loss
log.info('Binary: profit table retrieved {0}'.format(len(profit_table)))
return profit_table
def createNew(self, run):
log.info('Binary trade creating...')
for _ in xrange(5):
# get prices
prices = self.getPrices(run)
item = self.filterTradeFromPrices(run, prices)
# update payout if martingale
if run.step > 1:
profit_required = abs(run.profit_parent) + (1 / float(run.step))
# calculate correct payout with probabilities
run.payout = round(profit_required / (1 - item['payload']['prob']), 2)
log.info('Payout updated to {0:.2f} for required profit of {1:.2f}'.format(run.payout, profit_required))
# get price with updated payout
prices = self.getPrices(run)
item = self.filterTradeFromPrices(run, prices)
run.probability = item['payload']['prob']
# create request
req = urllib2.Request(item['url'], data=urllib.urlencode(item['payload']))
# submit
res = self.purchaseTrade(req)
if not res['success']:
log.info('Create purchase try {0} failed'.format(_))
continue
# finished
run.binary_ref = res['ref']
run.stake = res['stake']
return True
log.info('Binary trade creation failed')
return False
def getPrices(self, run):
log.info('Binary prices retrieving...')
payload = {
'l': 'EN',
'submarket': 'major_pairs',
'date_start': 'now',
'expiry_type': 'duration',
'duration_units': 'm',
'expiry_date': '',
'expiry_time': '',
'pip_size': '',
'amount_type': 'payout',
'currency': 'USD',
'barrier_type': 'relative',
'market': 'forex',
'showohlc': 'yes',
'controller_action': 'price_box',
'form_name': 'risefall',
'form_parent': 'risefall',
'extratab': 'intradayprices',
't': '%PREFIX%',
'ajax_only': 1,
'price_only': 1,
}
payload['underlying_symbol'] = 'frx{0}'.format('EURUSD')
payload['st'] = 'frx{0}'.format('EURUSD')
payload['duration_amount'] = run.time_frame
payload['expiry'] = '{0}m'.format(run.time_frame)
payload['amount'] = run.payout
log.info('Params: {0}'.format(payload))
data_encoded = urllib.urlencode(payload)
res = self.opener.open(self.url_prices, data_encoded)
# log.info(res.read())
html = BeautifulSoup(res.read())
html_forms = html.find_all('form', class_='orderform')
# log.info('html forms {0}'.format(html_forms))
data = []
for form in html_forms:
item = {
'url': form['action'],
'payload': {
'ajax_only': 1,
},
}
for input in form.find_all('input'):
val = input['value'] if input['name'] not in ['payout', 'price', 'prob', 'opposite_prob'] else float(input['value'])
item['payload'][input['name']] = val
log.info('Binary prices form {0}'.format(item))
data.append(item)
log.info('Binary {0} prices retrieved'.format(len(data)))
return data
def purchaseTrade(self, req):
log.info('Binary trade purchasing...')
res = self.opener.open(req).read()
# pprint(res)
# decode
res = json.loads(res)
# pprint(res)
# error?
if 'error' in res:
log.warn(res['error'])
return {'success': False, 'error': res['error']}
ref = res['trade_ref']
html = BeautifulSoup(res['display'])
stake = float(html.find('span', id='contract-outcome-buyprice').text)
log.info('Binary trade purchased {0} with stake {1:.2f}'.format(ref, stake))
return {'success': True, 'ref': ref, 'stake': stake}
def filterTradeFromPrices(self, run, prices):
log.info('Binary filtering prices...')
payouts = [prices[0]['payload']['payout'], prices[1]['payload']['payout']]
# selection is based on trade (base and aim)
if run.trade_base == 'payout':
# aim: highest = > payout
if run.trade_aim == 'higher':
if payouts[0] > payouts[1]:
item = prices[0]
log.info('{0} & {1} = {2:.2f} on rise'.format(run.trade_base, run.trade_aim, payouts[0]))
else:
item = prices[1]
log.info('{0} & {1} = {2:.2f} on fall'.format(run.trade_base, run.trade_aim, payouts[1]))
else:
if payouts[0] > payouts[1]:
item = prices[1]
log.info('{0} & {1} = {2:.2f} on rise'.format(run.trade_base, run.trade_aim, payouts[1]))
else:
item = prices[0]
log.info('{0} & {1} = {2:.2f} on fall'.format(run.trade_base, run.trade_aim, payouts[0]))
elif run.trade_base == 'directional':
# aim: highest => rise
if run.trade_aim == 'higher':
item = prices[0]
log.info('{0} & {1} = {2:.2f} on rise'.format(run.trade_base, run.trade_aim, payouts[0]))
else:
item = prices[1]
log.info('{0} & {1} = {2:.2f} on fall'.format(run.trade_base, run.trade_aim, payouts[1]))
else:
raise Exception('Unknown trade base {0}'.format(run.trade_base))
log.info('Binary filtered prices')
return item
| {
"content_hash": "c3dc457f239cb5bacb43851884fa76de",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 132,
"avg_line_length": 37.29644268774704,
"alnum_prop": 0.5292496820686732,
"repo_name": "Tjorriemorrie/trading",
"id": "307a9d64554f902b62ba7d9c491b10928b311b4f",
"size": "9436",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "21_gae_kelly/binary/binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "586"
},
{
"name": "HTML",
"bytes": "10059"
},
{
"name": "JavaScript",
"bytes": "1812"
},
{
"name": "Jupyter Notebook",
"bytes": "682876"
},
{
"name": "Less",
"bytes": "671"
},
{
"name": "M4",
"bytes": "18975"
},
{
"name": "Python",
"bytes": "636401"
},
{
"name": "Shell",
"bytes": "670"
},
{
"name": "q",
"bytes": "478327533"
}
],
"symlink_target": ""
} |
import functools
from django.http import Http404
from django.views.defaults import page_not_found, server_error
from django.views.generic import TemplateView
from .data import EXTRA_DATA
from .utils import (
TemplateExistanceStatusResponse,
collect_language_codes,
)
class IndexView(TemplateView):
template_name = 'index.html'
class FlatPageView(TemplateView):
response_class = TemplateExistanceStatusResponse
def get(self, request, *args, **kwargs):
path = self.kwargs['path'].strip('/')
if not path or any(c.startswith('_') for c in path.split('/')):
raise Http404
self.path = path
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
data.update(EXTRA_DATA.get(self.path, {}))
return data
def get_template_names(self):
"""Look up template from path.
Template name is built from the path, with leading and trailing
slashes stripped, language code and "contents/" prepended,
and ".html" appended.
Examples:
* "/speaking/cfp/" -> "contents/<lang>/speaking/cfp.html"
* "overview/pycontw/" -> "contents/<lang>/overview/pycontw.html"
If a matching template is not found, HTTP 404 will be raised.
To get a URL to a particular page, use something like
``{% url 'page' path='speaking/cfp' %}`` or
``reverse('page', kwargs={'path': 'speaking/cfp'})``
For implementation convinience, template paths with any component
starting with an underscore will be ignored, and cannot be accessed
here. This avoids the visitor seeing (accidentally) pages like
"speaking/base.html".
"""
template_names = [
'/'.join(['contents', code, self.path + '.html'])
for code in collect_language_codes(self.request.LANGUAGE_CODE)
]
return template_names
index = IndexView.as_view()
flat_page = FlatPageView.as_view()
def error_page(request, code):
"""A proxy view displaying error pages.
"""
try:
view_func = {
'404': functools.partial(page_not_found, exception=Http404()),
'500': server_error,
}[code]
except KeyError:
raise Http404
return view_func(request)
| {
"content_hash": "3ca8e33b7d62e0a93e334acb8f22e401",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 75,
"avg_line_length": 30.307692307692307,
"alnum_prop": 0.6324027072758037,
"repo_name": "uranusjr/pycontw2016",
"id": "edfad4c9183726ea2795ba80e3a351cb8f4db84e",
"size": "2364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "59719"
},
{
"name": "HTML",
"bytes": "141751"
},
{
"name": "JavaScript",
"bytes": "4475"
},
{
"name": "Python",
"bytes": "229546"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
import json
import networkx as nx
import matplotlib.pyplot as plt
def get_structures(fname):
with open(fname) as struct_files:
structures = json.loads(struct_files.read())
return structures
def get_colors():
colors = {
'blood_pressure' : '#57B4E6',
'blood_glucose' : '#57B4E6',
'full_blood_count' : '#57B4E6',
'lipids' : '#57B4E6',
'liver_function': '#57B4E6',
'thyroid' : '#57B4E6',
'urea_and_electrolytes' : '#57B4E6',
'urin_analysis' : '#57B4E6',
'root' : '#464F4F'
}
for i in xrange(20):
colors['composition.lbl-%05d' % i] = '#943B3B'
return colors
def create_graph(graph, parent, structure, colors, child=0):
try:
parent_index = parent.split('@')[1]
except (IndexError, AttributeError):
parent_index = 0
if isinstance(structure, dict):
for node_name, node_children in structure.iteritems():
node_type = node_name
if parent is None:
node_name = 'composition_root'
node_type = 'root'
else:
node_name = "%s@%s.%s" % (node_name, parent_index, child)
if not node_name.startswith('composition'):
raise ValueError('Container type %s unknown' % node_name)
graph.add_node(node_name, color=colors[node_type])
if parent:
graph.add_edge(parent, node_name)
for i, child in enumerate(node_children):
create_graph(graph, node_name, child, colors, i)
return graph
else:
node_type = structure
node_name = "%s@%s.%s" % (structure, parent_index, child)
graph.add_node(node_name, color=colors[node_type])
if parent:
graph.add_edge(parent, node_name)
return graph
def draw_graph(data, label):
#p=nx.single_source_shortest_path_length(G,ncenter)
#for i in xrange(0, graphs_num):
G = nx.DiGraph()
create_graph(G, None, data, get_colors(), 0)
node_colors = [G.node[k]['color'] for k in G.nodes()]
# same layout using matplotlib with no labels
plt.title("draw_networkx")
prog = 'dot'
pos = nx.graphviz_layout(G, prog=prog)
nx.draw(G, pos=pos, node_size=1000, with_labels=False,
arrows=False, node_color=node_colors, K=10)
nx.write_dot(G, 'test%s.dot' % i)
# plt.show()
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
plt.savefig('nx_test%s%s.png' % (label, prog))
plt.clf()
if __name__ == '__main__':
strs = get_structures('structures_file.json')
graphs_num = 10
for i, g in enumerate(strs[:graphs_num]):
draw_graph(g, i)
# draw_graph(strs[1], 1)
# draw_graph(strs[50], 50) | {
"content_hash": "d6105953a166a32a28bbc775a379d25b",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 32.21176470588235,
"alnum_prop": 0.5774287801314828,
"repo_name": "lucalianas/pyEHR",
"id": "bb1077ca56721c2bad5df420a2c5ab0b054c9c30",
"size": "2738",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "test/misc/test_query_performance/old_stuff/graph_drawer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "489304"
},
{
"name": "Shell",
"bytes": "2461"
}
],
"symlink_target": ""
} |
import os
import shutil
import zipfile
from prefs.zippref import ZipPref
from log import logger
log = logger.getLogger(__name__)
class ZipBuild:
__zipPref = ZipPref()
def __init__(self):
# init logger
self.__logger = log
def setPref(self, pref):
self.__zipPref = pref
def __getAllFilePath(self, folderPath):
if not os.path.exists(folderPath):
self.__logger.error('path not existed: ' + folderPath)
return []
allFilePath = []
allFilePath.append(folderPath)
fileNames = os.listdir(folderPath)
if len(fileNames) == 0:
return allFilePath
for fileName in fileNames:
filePath = os.path.join(folderPath, fileName)
if os.path.isfile(filePath):
allFilePath.append(filePath)
elif os.path.isdir(filePath):
if fileName == '.svn': # filter
continue
subFiles = self.__getAllFilePath(filePath)
if len(subFiles) > 0:
for f in subFiles:
allFilePath.append(f)
return allFilePath
def __zipFolderFiles(self, zipFilePath, folderPath):
if not os.path.exists(folderPath):
self.__logger.error('path not existed: ' + folderPath)
return False
if folderPath.endswith('/') or folderPath.endswith('\\'):
folderPath = folderPath[:-1]
if os.path.exists(zipFilePath):
os.remove(zipFilePath)
filePaths = self.__getAllFilePath(folderPath)
zipfiles = zipfile.ZipFile(zipFilePath, 'w', compression=zipfile.ZIP_DEFLATED)
for filePath in filePaths:
arcFilePath = filePath[len(folderPath):]
zipfiles.write(filePath, arcFilePath)
zipfiles.close()
self.__logger.debug('zip file success')
return True
def build(self):
self.__logger.debug('begin build process')
srcCodePath = self.__zipPref.getSourceCodeRootPath()
if not os.path.exists(srcCodePath):
self.__logger.error('source cod path is not existed: ' + srcCodePath)
return False
# clean old
self.__logger.info('\nclean old builds...')
buildPath = self.__zipPref.getRestoreBuildPath()
if len(buildPath) == 0:
self.__logger.error('build path is empty')
return False
if not os.path.exists(buildPath):
os.makedirs(buildPath)
else:
shutil.rmtree(buildPath)
os.makedirs(buildPath)
# build new
self.__logger.info('\npack zip...')
for module in self.__zipPref.getModulesByDeployIndex():
folderPath = self.__zipPref.getModuleSourceFolderByModuleName(module['module'])
zipFilePath = self.__zipPref.getZipFilePathByModuleName(module['module'])
if not self.__zipFolderFiles(zipFilePath, folderPath):
self.__logger.error('pack zip file error: ' + module['module'])
return False
self.__logger.info('build process done...')
return True
| {
"content_hash": "7204c0eceebd7e193ff94c5fe4e211a2",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 91,
"avg_line_length": 34,
"alnum_prop": 0.5850727387729285,
"repo_name": "yu757371316/MySQL",
"id": "0bfcdc2d229ae168612708056181f1c4a44e7ec5",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user_manager/materials/script/comp/zipbuild.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "10982"
},
{
"name": "PLpgSQL",
"bytes": "802726"
},
{
"name": "Python",
"bytes": "54730"
},
{
"name": "SQLPL",
"bytes": "579866"
},
{
"name": "Shell",
"bytes": "22615"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HM5_then2_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM5_then2_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM5_then2_CompleteLHS, self).__init__(name='HM5_then2_CompleteLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = []
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'M5_then2')
# Nodes that represent match classes
#Nodes that represent apply classes
# match class System() node
self.add_node()
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_subtypes__"] = []
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__System"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class SoftwareComposition() node
self.add_node()
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__SoftwareComposition"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class SoftwareComposition() node
self.add_node()
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__SoftwareComposition"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the match associations of the property.
# Nodes that represent the apply associations of the property.
# apply association System--softwareComposition-->SoftwareComposition node
self.add_node()
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "softwareComposition"
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__directLink_T"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc3')
# apply association System--softwareComposition-->SoftwareComposition node
self.add_node()
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "softwareComposition"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_T"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# Nodes that represent trace relations
# Add the edges
self.add_edges([
(0,3), # apply_class System() -> association softwareComposition
(3,1), # association softwareComposition -> apply_class SoftwareComposition()
(0,4), # apply_class System() -> association softwareComposition
(4,2), # association softwareComposition -> apply_class SoftwareComposition()
])
# Add the attribute equations
self["equations"] = [((0,'pivot'),('constant','ELEMENT1')), ((1,'pivot'),('constant','ELEMENT2')), ]
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "softwareComposition"
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "softwareComposition"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| {
"content_hash": "e2b608ca91e2fdd6df394e32d23b17c0",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 125,
"avg_line_length": 52.58536585365854,
"alnum_prop": 0.4653679653679654,
"repo_name": "levilucio/SyVOLT",
"id": "2a3c40f8f07cbe43e2dcb5af636b99fd4c95d6f3",
"size": "12936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/Properties/from_eclipse/HM5_then2_CompleteLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import unittest
import logging
from airflow.exceptions import AirflowException
try:
from airflow.operators.docker_operator import DockerOperator
from airflow.hooks.docker_hook import DockerHook
from docker import APIClient
except ImportError:
pass
from tests.compat import mock
class DockerOperatorTestCase(unittest.TestCase):
@mock.patch('airflow.utils.file.mkdtemp')
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute(self, client_class_mock, mkdtemp_mock):
host_config = mock.Mock()
mkdtemp_mock.return_value = '/mkdtemp'
client_mock = mock.Mock(spec=APIClient)
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.create_host_config.return_value = host_config
client_mock.images.return_value = []
client_mock.attach.return_value = ['container log']
client_mock.logs.return_value = ['container log']
client_mock.pull.return_value = [b'{"status":"pull log"}']
client_mock.wait.return_value = {"StatusCode": 0}
client_class_mock.return_value = client_mock
operator = DockerOperator(api_version='1.19', command='env', environment={'UNIT': 'TEST'},
image='ubuntu:latest', network_mode='bridge', owner='unittest',
task_id='unittest', volumes=['/host/path:/container/path'],
working_dir='/container/path', shm_size=1000,
host_tmp_dir='/host/airflow', container_name='test_container',
tty=True)
operator.execute(None)
client_class_mock.assert_called_with(base_url='unix://var/run/docker.sock', tls=None,
version='1.19')
client_mock.create_container.assert_called_with(command='env',
name='test_container',
environment={
'AIRFLOW_TMP_DIR': '/tmp/airflow',
'UNIT': 'TEST'
},
host_config=host_config,
image='ubuntu:latest',
user=None,
working_dir='/container/path',
tty=True
)
client_mock.create_host_config.assert_called_with(binds=['/host/path:/container/path',
'/mkdtemp:/tmp/airflow'],
network_mode='bridge',
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None)
mkdtemp_mock.assert_called_with(dir='/host/airflow', prefix='airflowtmp', suffix='')
client_mock.images.assert_called_with(name='ubuntu:latest')
client_mock.attach.assert_called_with(container='some_id', stdout=True,
stderr=True, stream=True)
client_mock.pull.assert_called_with('ubuntu:latest', stream=True,
decode=True)
client_mock.wait.assert_called_with('some_id')
@mock.patch('airflow.operators.docker_operator.tls.TLSConfig')
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute_tls(self, client_class_mock, tls_class_mock):
client_mock = mock.Mock(spec=APIClient)
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.create_host_config.return_value = mock.Mock()
client_mock.images.return_value = []
client_mock.attach.return_value = []
client_mock.pull.return_value = []
client_mock.wait.return_value = {"StatusCode": 0}
client_class_mock.return_value = client_mock
tls_mock = mock.Mock()
tls_class_mock.return_value = tls_mock
operator = DockerOperator(docker_url='tcp://127.0.0.1:2376', image='ubuntu',
owner='unittest', task_id='unittest', tls_client_cert='cert.pem',
tls_ca_cert='ca.pem', tls_client_key='key.pem')
operator.execute(None)
tls_class_mock.assert_called_with(assert_hostname=None, ca_cert='ca.pem',
client_cert=('cert.pem', 'key.pem'),
ssl_version=None, verify=True)
client_class_mock.assert_called_with(base_url='https://127.0.0.1:2376',
tls=tls_mock, version=None)
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute_unicode_logs(self, client_class_mock):
client_mock = mock.Mock(spec=APIClient)
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.create_host_config.return_value = mock.Mock()
client_mock.images.return_value = []
client_mock.attach.return_value = ['unicode container log 😁']
client_mock.pull.return_value = []
client_mock.wait.return_value = {"StatusCode": 0}
client_class_mock.return_value = client_mock
originalRaiseExceptions = logging.raiseExceptions # pylint: disable=invalid-name
logging.raiseExceptions = True
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest')
with mock.patch('traceback.print_exception') as print_exception_mock:
operator.execute(None)
logging.raiseExceptions = originalRaiseExceptions
print_exception_mock.assert_not_called()
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute_container_fails(self, client_class_mock):
client_mock = mock.Mock(spec=APIClient)
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.create_host_config.return_value = mock.Mock()
client_mock.images.return_value = []
client_mock.attach.return_value = []
client_mock.pull.return_value = []
client_mock.wait.return_value = {"StatusCode": 1}
client_class_mock.return_value = client_mock
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest')
with self.assertRaises(AirflowException):
operator.execute(None)
@staticmethod
def test_on_kill():
client_mock = mock.Mock(spec=APIClient)
operator = DockerOperator(image='ubuntu', owner='unittest', task_id='unittest')
operator.cli = client_mock
operator.container = {'Id': 'some_id'}
operator.on_kill()
client_mock.stop.assert_called_with('some_id')
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute_no_docker_conn_id_no_hook(self, operator_client_mock):
# Mock out a Docker client, so operations don't raise errors
client_mock = mock.Mock(name='DockerOperator.APIClient mock', spec=APIClient)
client_mock.images.return_value = []
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.attach.return_value = []
client_mock.pull.return_value = []
client_mock.wait.return_value = {"StatusCode": 0}
operator_client_mock.return_value = client_mock
# Create the DockerOperator
operator = DockerOperator(
image='publicregistry/someimage',
owner='unittest',
task_id='unittest'
)
# Mock out the DockerHook
hook_mock = mock.Mock(name='DockerHook mock', spec=DockerHook)
hook_mock.get_conn.return_value = client_mock
operator.get_hook = mock.Mock(
name='DockerOperator.get_hook mock',
spec=DockerOperator.get_hook,
return_value=hook_mock
)
operator.execute(None)
self.assertEqual(
operator.get_hook.call_count, 0,
'Hook called though no docker_conn_id configured'
)
@mock.patch('airflow.operators.docker_operator.DockerHook')
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute_with_docker_conn_id_use_hook(self, operator_client_mock,
operator_docker_hook):
# Mock out a Docker client, so operations don't raise errors
client_mock = mock.Mock(name='DockerOperator.APIClient mock', spec=APIClient)
client_mock.images.return_value = []
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.attach.return_value = []
client_mock.pull.return_value = []
client_mock.wait.return_value = {"StatusCode": 0}
operator_client_mock.return_value = client_mock
# Create the DockerOperator
operator = DockerOperator(
image='publicregistry/someimage',
owner='unittest',
task_id='unittest',
docker_conn_id='some_conn_id'
)
# Mock out the DockerHook
hook_mock = mock.Mock(name='DockerHook mock', spec=DockerHook)
hook_mock.get_conn.return_value = client_mock
operator_docker_hook.return_value = hook_mock
operator.execute(None)
self.assertEqual(
operator_client_mock.call_count, 0,
'Client was called on the operator instead of the hook'
)
self.assertEqual(
operator_docker_hook.call_count, 1,
'Hook was not called although docker_conn_id configured'
)
self.assertEqual(
client_mock.pull.call_count, 1,
'Image was not pulled using operator client'
)
@mock.patch('airflow.operators.docker_operator.TemporaryDirectory')
@mock.patch('airflow.operators.docker_operator.APIClient')
def test_execute_xcom_behavior(self, client_class_mock, tempdir_mock):
tempdir_mock.return_value.__enter__.return_value = '/mkdtemp'
client_mock = mock.Mock(spec=APIClient)
client_mock.images.return_value = []
client_mock.create_container.return_value = {'Id': 'some_id'}
client_mock.attach.return_value = ['container log']
client_mock.pull.return_value = [b'{"status":"pull log"}']
client_mock.wait.return_value = {"StatusCode": 0}
client_class_mock.return_value = client_mock
kwargs = {
'api_version': '1.19',
'command': 'env',
'environment': {'UNIT': 'TEST'},
'image': 'ubuntu:latest',
'network_mode': 'bridge',
'owner': 'unittest',
'task_id': 'unittest',
'volumes': ['/host/path:/container/path'],
'working_dir': '/container/path',
'shm_size': 1000,
'host_tmp_dir': '/host/airflow',
'container_name': 'test_container',
'tty': True,
}
xcom_push_operator = DockerOperator(xcom_push=True, **kwargs)
no_xcom_push_operator = DockerOperator(xcom_push=False, **kwargs)
xcom_push_result = xcom_push_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
self.assertEqual(xcom_push_result, 'container log')
self.assertIs(no_xcom_push_result, None)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "162d740615839d050cff56e885802a1f",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 99,
"avg_line_length": 45.28947368421053,
"alnum_prop": 0.5588943305387233,
"repo_name": "owlabs/incubator-airflow",
"id": "ca4709ab9b1aace9d3ab53f61ffe140cf33f24f2",
"size": "12862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/operators/test_docker_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
} |
from ._service_operations import ServiceOperations
from ._file_system_operations import FileSystemOperations
from ._path_operations import PathOperations
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ServiceOperations",
"FileSystemOperations",
"PathOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "a855317f6b8976de2c09accfa3b8170b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 32.2,
"alnum_prop": 0.7204968944099379,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1ecb9f322a1b7bad4dadc1b00b4f4b170d54e4dd",
"size": "951",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
import sys
import shutil
import tempfile
import subprocess
from distutils.core import setup, Command
from distutils.dir_util import remove_tree
MODULE_NAME = "binwalk"
SCRIPT_NAME = MODULE_NAME
# Python2/3 compliance
try:
raw_input
except NameError:
raw_input = input
# cd into the src directory, no matter where setup.py was invoked from
#os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "src"))
def which(command):
# /usr/local/bin is usually the default install path, though it may not be in $PATH
usr_local_bin = os.path.sep.join([os.path.sep, 'usr', 'local', 'bin', command])
try:
location = subprocess.Popen(["which", command], shell=False, stdout=subprocess.PIPE).communicate()[0].strip()
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
if not location and os.path.exists(usr_local_bin):
location = usr_local_bin
return location
def find_binwalk_module_paths():
paths = []
try:
import binwalk
paths = binwalk.__path__
except KeyboardInterrupt as e:
raise e
except Exception:
pass
return paths
def remove_binwalk_module(pydir=None, pybin=None):
if pydir:
module_paths = [pydir]
else:
module_paths = find_binwalk_module_paths()
for path in module_paths:
try:
remove_tree(path)
except OSError as e:
pass
if not pybin:
pybin = which(MODULE_NAME)
if pybin:
try:
sys.stdout.write("removing '%s'\n" % pybin)
os.remove(pybin)
except KeyboardInterrupt as e:
pass
except Exception as e:
pass
class IDAUnInstallCommand(Command):
description = "Uninstalls the binwalk IDA plugin module"
user_options = [
('idadir=', None, 'Specify the path to your IDA install directory.'),
]
def initialize_options(self):
self.idadir = None
self.mydir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "src")
def finalize_options(self):
pass
def run(self):
if self.idadir is None:
sys.stderr.write("Please specify the path to your IDA install directory with the '--idadir' option!\n")
return
binida_dst_path = os.path.join(self.idadir, 'plugins', 'binida.py')
binwalk_dst_path = os.path.join(self.idadir, 'python', 'binwalk')
if os.path.exists(binida_dst_path):
sys.stdout.write("removing %s\n" % binida_dst_path)
os.remove(binida_dst_path)
if os.path.exists(binwalk_dst_path):
sys.stdout.write("removing %s\n" % binwalk_dst_path)
shutil.rmtree(binwalk_dst_path)
class IDAInstallCommand(Command):
description = "Installs the binwalk IDA plugin module"
user_options = [
('idadir=', None, 'Specify the path to your IDA install directory.'),
]
def initialize_options(self):
self.idadir = None
self.mydir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "src")
def finalize_options(self):
pass
def run(self):
if self.idadir is None:
sys.stderr.write("Please specify the path to your IDA install directory with the '--idadir' option!\n")
return
binida_src_path = os.path.join(self.mydir, 'scripts', 'binida.py')
binida_dst_path = os.path.join(self.idadir, 'plugins')
if not os.path.exists(binida_src_path):
sys.stderr.write("ERROR: could not locate IDA plugin file '%s'!\n" % binida_src_path)
return
if not os.path.exists(binida_dst_path):
sys.stderr.write("ERROR: could not locate the IDA plugins directory '%s'! Check your --idadir option.\n" % binida_dst_path)
return
binwalk_src_path = os.path.join(self.mydir, 'binwalk')
binwalk_dst_path = os.path.join(self.idadir, 'python')
if not os.path.exists(binwalk_src_path):
sys.stderr.write("ERROR: could not locate binwalk source directory '%s'!\n" % binwalk_src_path)
return
if not os.path.exists(binwalk_dst_path):
sys.stderr.write("ERROR: could not locate the IDA python directory '%s'! Check your --idadir option.\n" % binwalk_dst_path)
return
binida_dst_path = os.path.join(binida_dst_path, 'binida.py')
binwalk_dst_path = os.path.join(binwalk_dst_path, 'binwalk')
if os.path.exists(binida_dst_path):
os.remove(binida_dst_path)
if os.path.exists(binwalk_dst_path):
shutil.rmtree(binwalk_dst_path)
sys.stdout.write("copying %s -> %s\n" % (binida_src_path, binida_dst_path))
shutil.copyfile(binida_src_path, binida_dst_path)
sys.stdout.write("copying %s -> %s\n" % (binwalk_src_path, binwalk_dst_path))
shutil.copytree(binwalk_src_path, binwalk_dst_path)
class UninstallCommand(Command):
description = "Uninstalls the Python module"
user_options = [
('pydir=', None, 'Specify the path to the binwalk python module to be removed.'),
('pybin=', None, 'Specify the path to the binwalk executable to be removed.'),
]
def initialize_options(self):
self.pydir = None
self.pybin = None
def finalize_options(self):
pass
def run(self):
remove_binwalk_module(self.pydir, self.pybin)
class CleanCommand(Command):
description = "Clean Python build directories"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
remove_tree("build")
except KeyboardInterrupt as e:
raise e
except Exception:
pass
try:
remove_tree("dist")
except KeyboardInterrupt as e:
raise e
except Exception:
pass
# The data files to install along with the module
install_data_files = []
for data_dir in ["magic", "config", "plugins", "modules", "core"]:
install_data_files.append("%s%s*" % (data_dir, os.path.sep))
# Install the module, script, and support files
setup(name = MODULE_NAME,
version = "2.1.2b",
description = "Firmware analysis tool",
author = "Craig Heffner",
url = "https://github.com/devttys0/%s" % MODULE_NAME,
requires = [],
package_dir = {"" : "src"},
packages = [MODULE_NAME],
package_data = {MODULE_NAME : install_data_files},
scripts = [os.path.join("src", "scripts", SCRIPT_NAME)],
cmdclass = {'clean' : CleanCommand, 'uninstall' : UninstallCommand, 'idainstall' : IDAInstallCommand, 'idauninstall' : IDAUnInstallCommand}
)
| {
"content_hash": "fa6f606731646ee835e5f4dac1a6a1d7",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 145,
"avg_line_length": 31.59259259259259,
"alnum_prop": 0.6131301289566237,
"repo_name": "cappiewu/binwalk",
"id": "6673f36e3ae2387c17882a57c95116227c833b49",
"size": "6846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "283820"
},
{
"name": "Shell",
"bytes": "4763"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from pdbuddy.matchers.base import BaseMatcher
class AnyMatcher(BaseMatcher):
"""A Matcher that matches any trace"""
def __call__(self, context):
return True
| {
"content_hash": "d50471457eace04378cf2cd6ec1b3285",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 21.6,
"alnum_prop": 0.6990740740740741,
"repo_name": "emou/pdbuddy",
"id": "32ef61f18b9a125e4969fa900314c5a96f0b937b",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdbuddy/matchers/match_any.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17288"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
import pytest
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
@pytest.fixture(scope='session')
def bigips(mgmt_root):
return [mgmt_root]
class TestEsd(object):
def test_invalid_esd_name(self, bigips):
processor = EsdTagProcessor('tests/functional/esd/json/')
processor.process_esd(bigips)
# validate that invalid ESD name is handled correctly
assert processor.get_esd('abc') is None
def test_valid_esd_name(self, bigips):
processor = EsdTagProcessor('tests/functional/esd/json/')
processor.process_esd(bigips)
# app_type_1 should be valid
app_type_1 = processor.get_esd('app_type_1')
assert app_type_1 is not None
assert 'lbaas_cssl_profile' in app_type_1
def test_invalid_tag_value(self, bigips):
processor = EsdTagProcessor('tests/functional/esd/json/')
processor.process_esd(bigips)
# app_type_2 only has one tag with an invalid value; should not be
# in final set of ESDs
assert processor.get_esd('app_type_2') is None
def test_invalid_tag_name(self, bigips):
processor = EsdTagProcessor('tests/functional/esd/json/')
processor.process_esd(bigips)
# app_type_4 has a mix of both valid and invalid tag values
app_type_4 = processor.get_esd('app_type_4')
assert app_type_4 is not None
# invalid tag name
assert 'lbaas_invalid_tag_name' not in app_type_4
def test_valid_tag_values(self, bigips):
processor = EsdTagProcessor('tests/functional/esd/json/')
processor.process_esd(bigips)
# app_type_4 has a mix of both valid and invalid tag values
app_type_4 = processor.get_esd('app_type_4')
assert app_type_4 is not None
# valid tag value
assert 'lbaas_sssl_profile' in app_type_4
assert 'lbaas_cssl_chain_cert' in app_type_4
| {
"content_hash": "23708eea614397c8695861d703bd5a85",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 33.58620689655172,
"alnum_prop": 0.6570841889117043,
"repo_name": "richbrowne/f5-openstack-agent",
"id": "62d5c8c11a5123338a6c5a610b18c6ddee67d006",
"size": "2545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_openstack_agent/tests/functional/esd/test_esd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "1388158"
},
{
"name": "Ruby",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "20290"
}
],
"symlink_target": ""
} |
from .function_base import *
from .stride_tricks import *
from .shape_base import *
from .scimath import *
from .type_check import *
| {
"content_hash": "9358824a721b5166dd4dd9b265b842a8",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 28,
"avg_line_length": 26.6,
"alnum_prop": 0.7443609022556391,
"repo_name": "daurer/afnumpy",
"id": "cbbfa18c83824f398e810c51de463d64a733e1dc",
"size": "133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afnumpy/lib/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "104"
},
{
"name": "Python",
"bytes": "145243"
}
],
"symlink_target": ""
} |
from mcinfo import cli, normal_info, nbt, recipes
__all__ = ["cli", "normal_info", "nbt", "recipes"]
| {
"content_hash": "d761c464d156264aa2efff3b63370ecc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 50,
"avg_line_length": 34,
"alnum_prop": 0.6372549019607843,
"repo_name": "randomdude999/mcinfo",
"id": "70a7c3bb1ef7e4791df77bff38a16e9ea90df0cf",
"size": "102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcinfo/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27351"
}
],
"symlink_target": ""
} |
import datetime
from django.contrib.auth import get_user_model
from factory import DjangoModelFactory, Sequence, \
SubFactory, fuzzy, PostGenerationMethodCall, post_generation, compat
from arctic.models import Role, UserRole
from articles.models import Article, Category, Tag
class CategoryFactory(DjangoModelFactory):
name = Sequence(lambda n: 'Category {}'.format(n))
class Meta:
model = Category
class TagFactory(DjangoModelFactory):
term = Sequence(lambda n: 'Tag {}'.format(n))
class Meta:
model = Tag
class ArticleFactory(DjangoModelFactory):
title = Sequence(lambda n: 'Article {}'.format(n))
category = SubFactory(CategoryFactory)
updated_at = fuzzy.FuzzyDateTime(datetime.datetime(2016, 1, 1,
tzinfo=compat.UTC))
class Meta:
model = Article
class UserFactory(DjangoModelFactory):
email = Sequence(lambda n: 'user{}@test.com'.format(n))
username = Sequence(lambda n: 'user{}'.format(n))
password = PostGenerationMethodCall('set_password', 'password')
is_active = True
@post_generation
def set_urole(self, create, *args, **kwargs):
if create:
UserRole.objects.create(
user=self, role=Role.objects.get(name='editor'))
class Meta:
model = get_user_model()
| {
"content_hash": "ec8609771d4dea4a23f9c77d1495ce85",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 28.395833333333332,
"alnum_prop": 0.6588407923697726,
"repo_name": "dgbc/django-arctic",
"id": "4a46dc38677656e0003419d2bc84a4769d0b3427",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65213"
},
{
"name": "HTML",
"bytes": "59271"
},
{
"name": "JavaScript",
"bytes": "32924"
},
{
"name": "Python",
"bytes": "152886"
}
],
"symlink_target": ""
} |
"""
Handles all requests relating to volumes.
"""
import collections
import datetime
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from cinder import context
from cinder.db import base
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import glance
from cinder import keymgr
from cinder import objects
from cinder.objects import base as objects_base
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
allow_force_upload = cfg.BoolOpt('enable_force_upload',
default=False,
help='Enables the Force option on '
'upload_to_image. This enables '
'running upload_volume on in-use '
'volumes for backends that support it.')
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
az_cache_time_opt = cfg.IntOpt('az_cache_duration',
default=3600,
help='Cache volume availability zones in '
'memory for the provided duration in '
'seconds')
CONF = cfg.CONF
CONF.register_opt(allow_force_upload)
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.register_opt(az_cache_time_opt)
CONF.import_opt('glance_core_properties', 'cinder.image.glance')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if isinstance(target_obj, objects_base.CinderObject):
# Turn object into dict so target.update can work
target.update(objects_base.obj_to_primitive(target_obj) or {})
else:
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zones = []
self.availability_zones_last_fetched = None
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def list_availability_zones(self, enable_cache=False):
"""Describe the known availability zones
:retval tuple of dicts, each with a 'name' and 'available' key
"""
refresh_cache = False
if enable_cache:
if self.availability_zones_last_fetched is None:
refresh_cache = True
else:
cache_age = timeutils.delta_seconds(
self.availability_zones_last_fetched,
timeutils.utcnow())
if cache_age >= CONF.az_cache_duration:
refresh_cache = True
if refresh_cache or not enable_cache:
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled'])
for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
if refresh_cache:
now = timeutils.utcnow()
self.availability_zones = azs
self.availability_zones_last_fetched = now
LOG.debug("Availability zone cache updated, next update will"
" occur around %s.", now + datetime.timedelta(
seconds=CONF.az_cache_duration))
else:
azs = self.availability_zones
LOG.info(_LI("Availability Zones retrieved successfully."))
return tuple(azs)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None,
source_replica=None, consistencygroup=None,
cgsnapshot=None, multiattach=False):
# NOTE(jdg): we can have a create without size if we're
# doing a create from snap or volume. Currently
# the taskflow api will handle this and pull in the
# size from the source.
# NOTE(jdg): cinderclient sends in a string representation
# of the size value. BUT there is a possibility that somebody
# could call the API directly so the is_int_like check
# handles both cases (string representation of true float or int).
if size and (not utils.is_int_like(size) or int(size) <= 0):
msg = _('Invalid volume size provided for create request: %s '
'(size argument must be an integer (or string '
'representation of an integer) and greater '
'than zero).') % size
raise exception.InvalidInput(reason=msg)
if consistencygroup and not cgsnapshot:
if not volume_type:
msg = _("volume_type must be provided when creating "
"a volume in a consistency group.")
raise exception.InvalidInput(reason=msg)
cg_voltypeids = consistencygroup.get('volume_type_id')
if volume_type.get('id') not in cg_voltypeids:
msg = _("Invalid volume_type provided: %s (requested "
"type must be supported by this consistency "
"group).") % volume_type
raise exception.InvalidInput(reason=msg)
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
msg = _("Invalid volume_type provided: %s (requested type "
"must match source volume, "
"or be omitted).") % volume_type
raise exception.InvalidInput(reason=msg)
# When cloning replica (for testing), volume type must be omitted
if source_replica and volume_type:
msg = _("No volume_type should be provided when creating test "
"replica.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
msg = _("Invalid volume_type provided: %s (requested "
"type must match source snapshot, or be "
"omitted).") % volume_type
raise exception.InvalidInput(reason=msg)
# Determine the valid availability zones that the volume could be
# created in (a task in the flow will/can use this information to
# ensure that the availability zone requested is valid).
raw_zones = self.list_availability_zones(enable_cache=True)
availability_zones = set([az['name'] for az in raw_zones])
if CONF.storage_availability_zone:
availability_zones.add(CONF.storage_availability_zone)
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'source_replica': source_replica,
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
'multiattach': multiattach,
}
try:
if cgsnapshot:
flow_engine = create_volume.get_flow_no_rpc(self.db,
self.image_service,
availability_zones,
create_what)
else:
flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
self.volume_rpcapi,
self.db,
self.image_service,
availability_zones,
create_what)
except Exception:
msg = _('Failed to create api volume flow.')
LOG.exception(msg)
raise exception.CinderException(msg)
# Attaching this listener will capture all of the notifications that
# taskflow sends out and redirect them to a more useful log for
# cinders debugging (or error reporting) usage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vref = flow_engine.storage.fetch('volume')
LOG.info(_LI("Volume created successfully."), resource=vref)
return vref
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update quota while "
"deleting volume."))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
LOG.info(_LI("Delete volume request issued successfully."),
resource={'type': 'volume',
'id': volume_id})
return
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
LOG.info(_LI('Unable to delete volume: %s, '
'volume is attached.'), volume['id'])
raise exception.VolumeAttached(volume_id=volume_id)
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s.") % volume['status']
LOG.info(_LI('Unable to delete volume: %(vol_id)s, '
'volume must be available or '
'error, but is %(vol_status)s.'),
{'vol_id': volume['id'],
'vol_status': volume['status']})
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently migrating.'), volume['id'])
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
if volume['consistencygroup_id'] is not None:
msg = _("Volume cannot be deleted while in a consistency group.")
LOG.info(_LI('Unable to delete volume: %s, '
'volume is currently part of a '
'consistency group.'), volume['id'])
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
LOG.info(_LI('Unable to delete volume: %s, '
'volume currently has snapshots.'), volume['id'])
msg = _("Volume still has %d dependent "
"snapshots.") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
vref = self.db.volume_update(context,
volume_id,
{'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
LOG.info(_LI("Delete volume request issued successfully."),
resource=vref)
@wrap_check_policy
def update(self, context, volume, fields):
vref = self.db.volume_update(context, volume['id'], fields)
LOG.info(_LI("Volume updated successfully."), resource=vref)
def get(self, context, volume_id, viewable_admin_meta=False):
if viewable_admin_meta:
ctxt = context.elevated()
else:
ctxt = context
rv = self.db.volume_get(ctxt, volume_id)
volume = dict(rv.iteritems())
try:
check_policy(context, 'get', volume)
except exception.PolicyNotAuthorized:
# raise VolumeNotFound instead to make sure Cinder behaves
# as it used to
raise exception.VolumeNotFound(volume_id=volume_id)
LOG.info(_LI("Volume info retrieved successfully."), resource=rv)
return volume
def _get_all_tenants_value(self, filters):
"""Returns a Boolean for the value of filters['all_tenants'].
False is returned if 'all_tenants' is not in the filters dictionary.
An InvalidInput exception is thrown for invalid values.
"""
b = False
if 'all_tenants' in filters:
val = six.text_type(filters['all_tenants']).lower()
if val in ['true', '1']:
b = True
elif val in ['false', '0']:
b = False
else:
msg = _('all_tenants param must be 0 or 1')
raise exception.InvalidInput(reason=msg)
return b
def get_all(self, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, viewable_admin_meta=False):
check_policy(context, 'get_all')
if filters is None:
filters = {}
allTenants = self._get_all_tenants_value(filters)
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug("Searching by: %s.", six.text_type(filters))
if context.is_admin and allTenants:
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
else:
if viewable_admin_meta:
context = context.elevated()
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
filters=filters)
LOG.info(_LI("Get all volumes completed successfully."))
return volumes
def get_snapshot(self, context, snapshot_id):
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
# FIXME(jdg): The objects don't have the db name entries
# so build the resource tag manually for now.
LOG.info(_LI("Snapshot retrieved successfully."),
resource={'type': 'snapshot',
'id': snapshot['id']})
return snapshot
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
vref = self.db.volume_get(context, volume_id)
LOG.info(_LI("Volume retrieved successfully."), resource=vref)
return dict(vref.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug("Searching by: %s", search_opts)
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
LOG.info(_LI("Get all snaphsots completed successfully."))
return snapshots
@wrap_check_policy
def reserve_volume(self, context, volume):
# NOTE(jdg): check for Race condition bug 1096983
# explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
elif volume['status'] == 'in-use':
if volume['multiattach']:
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume must be multiattachable to reserve again.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
else:
msg = _("Volume status must be available to reserve.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Reserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def unreserve_volume(self, context, volume):
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'attaching':
attaches = self.db.volume_attachment_get_used_by_volume_id(
context, volume['id'])
if attaches:
self.update(context, volume, {"status": "in-use"})
else:
self.update(context, volume, {"status": "available"})
LOG.info(_LI("Unreserve volume completed successfully."),
resource=volume)
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if volume['migration_status']:
return
if (volume['status'] != 'in-use' or
volume['attach_status'] != 'attached'):
msg = (_("Unable to detach volume. Volume status must be 'in-use' "
"and attach_status must be 'attached' to detach. "
"Currently: status: '%(status)s', "
"attach_status: '%(attach_status)s.'") %
{'status': volume['status'],
'attach_status': volume['attach_status']})
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self.update(context, volume, {"status": "detaching"})
LOG.info(_LI("Begin detaching volume completed successfully."),
resource=volume)
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
LOG.info(_LI("Roll detaching of volume completed successfully."),
resource=volume)
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
attach_results = self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attach_results
@wrap_check_policy
def detach(self, context, volume, attachment_id):
detach_results = self.volume_rpcapi.detach_volume(context, volume,
attachment_id)
LOG.info(_LI("Detach volume completed successfully."),
resource=volume)
return detach_results
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
init_results = self.volume_rpcapi.initialize_connection(context,
volume,
connector)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return init_results
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
results = self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
return results
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
results = self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume)
return results
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None,
cgsnapshot_id=None):
snapshot = self.create_snapshot_in_db(
context, volume, name,
description, force, metadata, cgsnapshot_id)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot_in_db(self, context,
volume, name, description,
force, metadata,
cgsnapshot_id):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if volume['status'].startswith('replica_'):
# Can't snapshot secondary replica
msg = _("Snapshot of secondary replica is not allowed.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Volume %(vol_id)s status must be available, "
"but current status is: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warn(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warn(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
snapshot = None
try:
kwargs = {
'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata or {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
if hasattr(snapshot, 'id'):
snapshot.destroy()
finally:
QUOTAS.rollback(context, reservations)
return snapshot
def create_snapshots_in_db(self, context,
volume_list,
name, description,
force, cgsnapshot_id):
snapshot_list = []
for volume in volume_list:
self._create_snapshot_in_db_validate(context, volume, force)
reservations = self._create_snapshots_in_db_reserve(
context, volume_list)
options_list = []
for volume in volume_list:
options = self._create_snapshot_in_db_options(
context, volume, name, description, cgsnapshot_id)
options_list.append(options)
try:
for options in options_list:
snapshot = self.db.snapshot_create(context, options)
snapshot_list.append(snapshot)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
for snap in snapshot_list:
self.db.snapshot_destroy(context, snap['id'])
finally:
QUOTAS.rollback(context, reservations)
return snapshot_list
def _create_snapshot_in_db_validate(self, context, volume, force):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating.")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("Snapshot cannot be created because volume %(vol_id)s "
"is not available, current volume status: "
"%(vol_status)s.") % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
def _create_snapshots_in_db_reserve(self, context, volume_list):
reserve_opts_list = []
total_reserve_opts = {}
try:
for volume in volume_list:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1,
'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reserve_opts_list.append(reserve_opts)
for reserve_opts in reserve_opts_list:
for (key, value) in reserve_opts.items():
if key not in total_reserve_opts.keys():
total_reserve_opts[key] = value
else:
total_reserve_opts[key] = \
total_reserve_opts[key] + value
reservations = QUOTAS.reserve(context, **total_reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed).")
LOG.warning(msg, {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
return reservations
def _create_snapshot_in_db_options(self, context, volume,
name, description,
cgsnapshot_id):
options = {'volume_id': volume['id'],
'cgsnapshot_id': cgsnapshot_id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id']}
return options
def create_snapshot(self, context,
volume, name, description,
metadata=None, cgsnapshot_id=None):
result = self._create_snapshot(context, volume, name, description,
False, metadata, cgsnapshot_id)
LOG.info(_LI("Snapshot create request issued successfully."),
resource=result)
return result
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
result = self._create_snapshot(context, volume, name, description,
True, metadata)
LOG.info(_LI("Snapshot force create request issued successfully."),
resource=result)
return result
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
LOG.error(_LE('Unable to delete snapshot: %(snap_id)s, '
'due to invalid status. '
'Status must be available or '
'error, not %(snap_status)s.'),
{'snap_id': snapshot['id'],
'snap_status': snapshot['status']})
msg = _("Volume Snapshot status must be available or error.")
raise exception.InvalidSnapshot(reason=msg)
cgsnapshot_id = snapshot.get('cgsnapshot_id', None)
if cgsnapshot_id:
msg = _('Unable to delete snapshot %s because it is part of a '
'consistency group.') % snapshot['id']
LOG.error(msg)
raise exception.InvalidSnapshot(reason=msg)
snapshot_obj = self.get_snapshot(context, snapshot['id'])
snapshot_obj.status = 'deleting'
snapshot_obj.save(context)
volume = self.db.volume_get(context, snapshot_obj.volume_id)
self.volume_rpcapi.delete_snapshot(context, snapshot_obj,
volume['host'])
LOG.info(_LI("Snapshot delete request issued successfully."),
resource=snapshot)
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
snapshot.update(fields)
snapshot.save(context)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume metadata completed successfully."),
resource=volume)
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
LOG.info(_LI("Delete volume metadata completed successfully."),
resource=volume)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank.")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters.")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters.")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume metadata completed successfully."),
resource=volume)
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
LOG.info(_LI("Get volume metadata key completed successfully."),
resource=volume)
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume admin metadata completed successfully."),
resource=volume)
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
LOG.info(_LI("Delete volume admin metadata completed successfully."),
resource=volume)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update volume admin metadata completed successfully."),
resource=volume)
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot['id'])
LOG.info(_LI("Get snapshot metadata completed successfully."),
resource=snapshot)
return snapshot_obj.metadata
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
snapshot_obj = self.get_snapshot(context, snapshot['id'])
snapshot_obj.delete_metadata_key(context, key)
LOG.info(_LI("Delete snapshot metadata completed successfully."),
resource=snapshot)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = snapshot.metadata
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
snapshot.metadata = _metadata
snapshot.save(context)
# TODO(jdg): Implement an RPC call for drivers that may use this info
LOG.info(_LI("Update snapshot metadata completed successfully."),
resource=snapshot)
return snapshot.metadata
def get_snapshot_metadata_value(self, snapshot, key):
LOG.info(_LI("Get snapshot metadata value not implemented."),
resource=snapshot)
# FIXME(jdg): Huh? Pass?
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
LOG.info(_LI("Get volume image-metadata completed successfully."),
resource=volume)
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be '
'available or in-use, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
if not CONF.enable_force_upload and force:
LOG.info(_LI("Force upload to image is disabled, "
"Force option will be ignored."),
resource={'type': 'volume', 'id': volume['id']})
force = False
self._check_volume_availability(volume, force)
glance_core_properties = CONF.glance_core_properties
if glance_core_properties:
try:
volume_image_metadata = self.get_volume_image_metadata(context,
volume)
custom_property_set = (set(volume_image_metadata).difference
(set(glance_core_properties)))
if custom_property_set:
metadata.update(dict(properties=dict((custom_property,
volume_image_metadata
[custom_property])
for custom_property
in custom_property_set)))
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
LOG.info(_LI("Copy image to volume completed successfully."),
resource=volume)
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to extend, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s).") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reserve_opts = {'gigabytes': size_increase}
QUOTAS.add_volume_type_opts(context, reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=volume['project_id'],
**reserve_opts)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _LE("Quota exceeded for %(s_pid)s, tried to extend volume "
"by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed).")
LOG.error(msg, {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
LOG.info(_LI("Extend volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume %(vol_id)s status must be available or in-use, '
'but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume %s is already part of an active "
"migration.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("Volume %s must not have snapshots.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# We only handle non-replicated volumes for now
rep_status = volume['replication_status']
if rep_status is not None and rep_status != 'disabled':
msg = _("Volume %s must not be replicated.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume %s must not be part of a consistency "
"group.") % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
svc_host = volume_utils.extract_host(host, 'backend')
if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = _('No available service named %s') % host
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different '
'than the current host.')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
LOG.info(_LI("Migrate volume request issued successfully."),
resource=volume)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
LOG.info(_LI("Migrate volume completion issued successfully."),
resource=volume)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume %(vol_id)s status must be available '
'to update readonly flag, but current status is: '
'%(vol_status)s.') % {'vol_id': volume['id'],
'vol_status': volume['status']}
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': six.text_type(flag)})
LOG.info(_LI("Update readonly setting on volume "
"completed successfully."),
resource=volume)
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status: '
'%(vol_status)s on volume: %(vol_id)s. Volume status '
'must be available or '
'in-use.') % {'vol_status': volume['status'],
'vol_id': volume['id']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
msg = _("Volume must not be part of a consistency group.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = _('New volume_type same as original: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements.')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volume: %s.') % volume['id']
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
LOG.info(_LI("Retype volume request issued successfully."),
resource=volume)
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None, bootable=False):
if availability_zone is None:
elevated = context.elevated()
try:
svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata,
'bootable': bootable
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
LOG.info(_LI("Manage volume request issued successfully."),
resource=volume)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
| {
"content_hash": "7cb47e91e31b6cd2cadd943a87fb9cb2",
"timestamp": "",
"source": "github",
"line_count": 1473,
"max_line_length": 79,
"avg_line_length": 44.57841140529531,
"alnum_prop": 0.5293311403508771,
"repo_name": "rakeshmi/cinder",
"id": "d4bde31fc1a3afe1d4c27fd4b6c5cf0d422ddd40",
"size": "66396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10782777"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
} |
'''
Created on Oct 4, 2013
@author: athiessen
'''
from brisa.core.network import parse_url
from brisa.core.threaded_call import run_async_function
from brisa.upnp.control_point.control_point import ControlPoint
service = ('u', 'urn:schemas-upnp-org:service:SwitchPower:1')
binary_light_type = 'urn:schemas-upnp-org:device:BinaryLight:1'
class AthaControlPoint(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
| {
"content_hash": "028c0b87e082c948e5b5292f0a239966",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 18.576923076923077,
"alnum_prop": 0.6625258799171843,
"repo_name": "AlanDThiessen/atha",
"id": "391a27a45723bcce209eb387cdbb461f9f4cb11c",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atha/ControlPoint/AthaControlPoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49644"
},
{
"name": "HTML",
"bytes": "5565"
},
{
"name": "JavaScript",
"bytes": "1371"
},
{
"name": "Python",
"bytes": "17424"
}
],
"symlink_target": ""
} |
from ..core import WesternCalendar
from ..registry_tools import iso_register
@iso_register('ES')
class Spain(WesternCalendar):
'Spain'
# Christian holidays
include_epiphany = True
include_good_friday = True
include_assumption = True
include_all_saints = True
include_immaculate_conception = True
# Civil holidays
include_labour_day = True
labour_day_label = "Día del trabajador"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(10, 12, "Fiesta nacional de España"),
(12, 6, "Día de la Constitución Española")
)
@iso_register('ES-AN')
class Andalusia(Spain):
"Andalusia"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(2, 28, "Andalusian National Day"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-AR')
class Aragon(Spain):
"Aragon"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(4, 23, "Aragonese National Day"),
(12, 20, "Aragon Ombudsman Day"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-CL')
class CastileAndLeon(Spain):
"Castile and León"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(4, 23, "Día de Castilla y León"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-CM')
class CastillaLaMancha(Spain):
"Castilla-La Mancha"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(5, 31, "Día de la Región Castilla-La Mancha"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-CN')
class CanaryIslands(Spain):
"Canary Islands"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(5, 30, "Día de Canarias"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-CT')
class Catalonia(Spain):
"Catalonia"
include_easter_monday = True
include_boxing_day = True
boxing_day_label = "Sant Esteve"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(9, 11, "Diada nacional de Catalunya"),
(6, 24, "La revetlla de Sant Joan, Nit de Sant Joan"),
)
@iso_register('ES-EX')
class Extremadura(Spain):
"Extremadura"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(9, 8, "Día de Extremadura"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-GA')
class Galicia(Spain):
"Galicia"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(5, 17, "Día das Letras Galegas"),
(7, 25, "Santiago Apóstol o Día da Patria Galega"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-IB')
class BalearicIslands(Spain):
"Balearic Islands"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(3, 1, "Dia de les Illes Balears"),
)
# Christian holidays
include_holy_thursday = True # Also called Maundy thursday
include_easter_monday = True
@iso_register('ES-RI')
class LaRioja(Spain):
"La Rioja"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(6, 9, "Dia de La Rioja"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-MD')
class CommunityofMadrid(Spain):
"Community of Madrid"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(5, 2, "Fiesta de la Comunidad de Madrid"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-MC')
class Murcia(Spain):
"Region of Murcia"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(6, 9, "Día de la Región de Murcia"),
(3, 19, "San José"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-NA')
class Navarre(Spain):
"Navarre"
# Christian holidays
include_holy_thursday = True # Also called Maundy thursday
include_easter_monday = True
@iso_register('ES-AS')
class Asturias(Spain):
"Asturias"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(9, 8, "Día de Asturias"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-PV')
class BasqueCountry(Spain):
"Basque Country"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(10, 25, "Euskadi Eguna"),
)
# Christian holidays
include_holy_thursday = True # Also called Maundy thursday
include_easter_monday = True
@iso_register('ES-CB')
class Cantabria(Spain):
"Cantabria"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(9, 15, "Día de Cantabria o Día de La Montaña"),
)
# Christian holiday
include_holy_thursday = True # Also called Maundy thursday
@iso_register('ES-VC')
class ValencianCommunity(Spain):
"Valencian Community"
FIXED_HOLIDAYS = Spain.FIXED_HOLIDAYS + (
(3, 19, "San José"),
(10, 9, "Dia de la Comunitat Valenciana"),
)
# Christian holiday
include_easter_monday = True
| {
"content_hash": "c4e84f773e49fec907a0f14bfb46b5b7",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 63,
"avg_line_length": 25.575,
"alnum_prop": 0.6439882697947215,
"repo_name": "novapost/workalendar",
"id": "77fc5bb342073f55c723e2857339344fe41b2213",
"size": "5138",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "workalendar/europe/spain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "268634"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from journal.models import Student, Advisor, Coordinator, School
class StudentSerializer(serializers.ModelSerializer):
"""Student model serializer"""
class Meta:
model = Student
fields = ('first_name', 'last_name', 'personal_code', 'student_id',
'grad_month', 'grad_year', 'advisor', 'coordinator', 'school')
class AdvisorSerializer(serializers.ModelSerializer):
"""Advisor model serializer"""
class Meta:
model = Advisor
fields = ('first_name', 'last_name', 'advisor_type', 'coordinator',
'school')
class CoordinatorSerializer(serializers.ModelSerializer):
"""Coordinator model serializer"""
class Meta:
model = Coordinator
fields = ('first_name', 'last_name', 'coordinator_type', 'school')
class SchoolSerializer(serializers.ModelSerializer):
"""School model serializer"""
class Meta:
model = School
field = ('name', 'school_id', 'address', 'city', 'state', 'country')
| {
"content_hash": "9192a6424506aee69782f5b316ca25d4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 33.83870967741935,
"alnum_prop": 0.6491897044804575,
"repo_name": "WildCAS/CASCategorization",
"id": "ee9583d41df56069913ca14264028716a8a4c779",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "journal/serializers/person_serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "148"
},
{
"name": "HTML",
"bytes": "280"
},
{
"name": "Python",
"bytes": "34472"
},
{
"name": "Shell",
"bytes": "5566"
}
],
"symlink_target": ""
} |
from flask import current_app
from flask.ext.celery import CELERY_LOCK
import pytest
from redis.exceptions import LockError
from pypi_portal.extensions import db, redis
from pypi_portal.models.pypi import Package
from pypi_portal.models.redis import POLL_SIMPLE_THROTTLE
from pypi_portal.tasks import pypi
class FakeDelay(object):
@staticmethod
def ready():
return False
def test_index():
assert '200 OK' == current_app.test_client().get('/pypi/').status
def test_sync_empty(alter_xmlrpc):
alter_xmlrpc(set())
redis.delete(POLL_SIMPLE_THROTTLE)
Package.query.delete()
db.session.commit()
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
assert [] == db.session.query(Package.name, Package.summary, Package.latest_version).all()
def test_sync_few(alter_xmlrpc):
alter_xmlrpc([dict(name='packageB', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_rate_limit(alter_xmlrpc):
alter_xmlrpc([dict(name='packageC', summary='Test package.', version='3.0.0'), ])
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_parallel(alter_xmlrpc):
alter_xmlrpc([dict(name='packageD', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
redis_key = CELERY_LOCK.format(task_name='pypi_portal.tasks.pypi.update_package_list')
lock = redis.lock(redis_key, timeout=1)
assert lock.acquire(blocking=False)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
try:
lock.release()
except LockError:
pass
def test_sync_many(alter_xmlrpc):
alter_xmlrpc([
dict(name='packageB1', summary='Test package.', version='3.0.0'),
dict(name='packageB2', summary='Test package.', version='3.0.0'),
dict(name='packageB3', summary='Test package.', version='3.0.0'),
dict(name='packageB4', summary='Test package.', version='3.0.0'),
dict(name='packageB5', summary='Test package.', version='3.0.0'),
])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
def test_sync_unhandled_exception():
old_throttle = pypi.THROTTLE
pypi.THROTTLE = 'nan'
redis.delete(POLL_SIMPLE_THROTTLE)
with pytest.raises(ValueError):
current_app.test_client().get('/pypi/sync').status()
pypi.THROTTLE = old_throttle
def test_sync_timeout():
old_delay = pypi.update_package_list.delay
pypi.update_package_list.delay = FakeDelay
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
pypi.update_package_list.delay = old_delay
| {
"content_hash": "c7f1b52b979e4ab13c53e78cfe695ad6",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 94,
"avg_line_length": 35.325,
"alnum_prop": 0.6551073366359991,
"repo_name": "Robpol86/Flask-Large-Application-Example",
"id": "27394594cc76c8ccde073c14c83e1f2757b0f036",
"size": "4239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/views/test_pypi_packages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16906"
},
{
"name": "Makefile",
"bytes": "1674"
},
{
"name": "Python",
"bytes": "57237"
}
],
"symlink_target": ""
} |
from rctk.tests.base import BaseTest
from rctk.task import Task
from rctk.widgets import StaticText, Control
class BaseContainerTest(BaseTest):
"""
Test basic container type behaviour
"""
container = None
def create_widgets(self):
c = self.container(self.tk)
w = StaticText(self.tk, 'Hello World')
self.tk.clear()
return (c, w)
def test_append(self):
c, w = self.create_widgets()
c.append(w)
task = self.tk._queue.pop()
assert task == Task('Append %d to %d' % (w.id, c.id),
{'action': 'append', 'id': c.id, 'child': w.id})
assert w in c._controls
def test_remove(self):
c, w = self.create_widgets()
c.append(w)
self.tk.clear() # ignore append task
c.remove(w)
task = self.tk._queue.pop()
assert task == Task('Remove %d from %d' % (w.id, c.id),
{'action': 'remove', 'id': c.id, 'child': w.id})
assert w not in c._controls
def test_do_not_append_self(self):
c, w = self.create_widgets()
c.append(c)
assert len(self.tk._queue) == 0
assert c not in c._controls
def test_do_not_remove_if_not_appended(self):
c, w = self.create_widgets()
assert w not in c._controls
c.remove(w)
assert len(self.tk._queue) == 0
assert w not in c._controls
def test_remove_before_reappending(self):
c1, w = self.create_widgets()
c2 = self.container(self.tk)
self.tk.clear()
c1.append(w)
self.tk._queue.pop() # ignore first append task
c2.append(w)
remove_task = self.tk._queue.pop(0)
append_task = self.tk._queue.pop()
assert remove_task == Task('Remove %d from %d' % (w.id, c1.id),
{'action': 'remove', 'id': c1.id, 'child': w.id})
assert append_task == Task('Append %d to %d' % (w.id, c2.id),
{'action': 'append', 'id': c2.id, 'child': w.id})
def test_destroy_widget(self):
c, w = self.create_widgets()
c.append(w)
self.tk.clear()
w.destroy()
self.tk._queue.pop(0) # ignore layout remove task
assert w not in c._controls
assert w not in c._controls_args
assert w.parent is None
assert w._append_args is None
def test_destroy(self):
c, w = self.create_widgets()
c.append(w)
self.tk.clear()
c.destroy()
assert len(c._controls) == 0
assert len(c._controls_args) == 0
from rctk.widgets.panel import Panel
class TestPanelContainer(BaseContainerTest):
container = Panel
from rctk.widgets.window import Window
class TestWindowContainer(BaseContainerTest):
container = Window
def test_do_not_append_self(self):
pass
from rctk.widgets.root import Root
class TestRootContainer(BaseContainerTest):
container = Root
def test_do_not_append_self(self):
pass
| {
"content_hash": "d8f3481b75847b6d2861f79ead51e8c7",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 71,
"avg_line_length": 30.653061224489797,
"alnum_prop": 0.5702396804260985,
"repo_name": "rctk/rctk",
"id": "5c3693705138d51489c65de9981de82e903b2c15",
"size": "3004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rctk/tests/test_container.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "25764"
},
{
"name": "Python",
"bytes": "214120"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings_test")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "e55bcb98dfd4751c84b4ab5c2f5d9741",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 68,
"avg_line_length": 25,
"alnum_prop": 0.7066666666666667,
"repo_name": "foundertherapy/django-users-plus",
"id": "6808838e56ab3964d2ffc02ec1a1467aec2f0448",
"size": "247",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2029"
},
{
"name": "Python",
"bytes": "104516"
}
],
"symlink_target": ""
} |
import arcpy, os
#variables
directory = "C:\\TxDOT\\CountyRoadInventory"
#issue list
issues = []
def re_source_admin():
#walk through each directory
for root, dirs, files in os.walk(directory):
#ignore file and personal geodatabases
specDir = root.split("\\")[-1]
dbsuffix = specDir.split(".")[-1]
if dbsuffix == "gdb" or dbsuffix == "mdb" or dbsuffix == "tbx":
pass
else:
for n in files:
#identify the mxds
if str(n).split(".")[-1] == "mxd":
print "working on: " + str(os.path.join(root, n))
map = arcpy.mapping.MapDocument(os.path.join(root, n))
dataframes = arcpy.mapping.ListDataFrames(map)
for df in dataframes:
layers = arcpy.mapping.ListLayers(map, "", df)
for lyr in layers:
try:
if "TPP_GIS.MCHAMB1." in lyr.dataSource:
print "lyr source: " + lyr.dataSource
newsource = lyr.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
lyr.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "lyr replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print lyr.name + " is not a feature layer"
tables = arcpy.mapping.ListTableViews(map, "", df)
for tbl in tables:
try:
if "TPP_GIS.MCHAMB1." in tbl.dataSource:
print "tbl source: " + tbl.dataSource
newsource = tbl.dataSource.replace("TPP_GIS.MCHAMB1.", "TPP_GIS.APP_TPP_GIS_ADMIN.")
location = newsource.split("\\")[:-2]
locationFixed = "\\".join(location)
print locationFixed
newname = newsource.split("\\")[-1]
print newname
tbl.replaceDataSource(locationFixed, "SDE_WORKSPACE", newname)
print "tbl replaced: " + newsource
except:
if os.path.join(root, n) not in issues:
issues.append(os.path.join(root, n))
print tbl.name + " is not a feature layer"
map.save()
re_source_admin()
print "success!"
print "the following MXDs contained issues with a layer having not a dataSource (e.g. a non-feature layer):"
for i in issues:
print str(i) | {
"content_hash": "4043ec235fd37c75a11ec4f19f520c61",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 120,
"avg_line_length": 50.04477611940298,
"alnum_prop": 0.43155383238890543,
"repo_name": "adambreznicky/python",
"id": "3156267f47ecdd5514eb28258349ffae93803b34",
"size": "3730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AdminPrefix_Resourcer_v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2498272"
},
{
"name": "Visual Basic",
"bytes": "40594"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import colors
from scipy import misc
import argparse
import os
parser = argparse.ArgumentParser(description='Display recorded Kinect data', fromfile_prefix_chars="@")
parser.add_argument('-p', dest='recording_path', action='store', type=str, default="", help="Which recording do you want to display?")
parser.add_argument('-f', dest='frame', action='store', type=int, default=0, help='Which frame do you want to display')
args = parser.parse_args()
frame = args.frame
recording_path = args.recording_path.strip(os.sep)
#COLOR
colorFile = os.path.join(recording_path, 'COLOR', '%d.uint8' % frame)
if (os.path.isfile(colorFile)):
color = np.fromfile(colorFile, dtype=np.uint8)
color.shape = (1080, 1920, 4)
plt.subplot(2, 2, 1)
plt.imshow(color)
plt.title('Original color 1920x1080')
#DEPTH
depthFile = os.path.join(recording_path, 'DEPTH', '%d.uint16' % frame)
if (os.path.isfile(depthFile)):
min_depth = 500 # Min reliable depth
max_depth = 4500 # Max reliable depth
max_value = np.iinfo(np.uint16).max
depth = np.fromfile(depthFile, dtype=np.uint16)
depth.shape = (424, 512)
plt.subplot(2, 2, 2)
plt.imshow(depth, interpolation='nearest', cmap=cm.gist_heat)
plt.title('Depth values 512x424')
#INDEX
indexFile = os.path.join(recording_path, 'INDEX', '%d.uint8' % frame)
if (os.path.isfile(indexFile)):
index = np.fromfile(indexFile, dtype=np.uint8)
index.shape = (424, 512)
index[index == 255] = 8
index_color_map = colors.ListedColormap(['red', 'green', 'blue', 'cyan', 'magenta', 'yellow', '#ff7700', 'white', 'black'])
plt.subplot(2, 2, 3)
plt.imshow(index, interpolation='nearest', cmap=index_color_map)
plt.title('Segmentation data 512x424')
#TRACKEDCOLOR
trackedFile = os.path.join(recording_path, 'TRACKEDCOLOR', '%d.uint8' % frame)
if (os.path.isfile(trackedFile)):
tracked = np.fromfile(trackedFile, dtype=np.uint8);
tracked.shape = (424, 512, 4)
plt.subplot(2, 2, 4)
plt.imshow(tracked)
plt.title('Color mapped to tracked depth space 512x424')
plt.show()
| {
"content_hash": "656c6b965a562efd00f4d6ef3d35ef06",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 134,
"avg_line_length": 33.953125,
"alnum_prop": 0.6916705016106764,
"repo_name": "dotKokott/KinectV2Recorder",
"id": "2e0823debe71f68add68083c5e2d38be772d5b46",
"size": "2173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/show.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "544192"
},
{
"name": "GLSL",
"bytes": "910"
},
{
"name": "Python",
"bytes": "2173"
}
],
"symlink_target": ""
} |
import datetime
import pickle
import unittest
import uuid
from collections import namedtuple
from copy import deepcopy
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import (
AutoField,
Avg,
BinaryField,
BooleanField,
Case,
CharField,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
Expression,
ExpressionList,
ExpressionWrapper,
F,
FloatField,
Func,
IntegerField,
Max,
Min,
Model,
OrderBy,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
UUIDField,
Value,
Variance,
When,
)
from django.db.models.expressions import (
Col,
Combinable,
CombinedExpression,
RawSQL,
Ref,
)
from django.db.models.functions import (
Coalesce,
Concat,
Left,
Length,
Lower,
Substr,
Upper,
)
from django.db.models.sql import constants
from django.db.models.sql.datastructures import Join
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import (
Approximate,
CaptureQueriesContext,
isolate_apps,
register_lookup,
)
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import SimpleLazyObject
from .models import (
UUID,
UUIDPK,
Company,
Employee,
Experiment,
Manager,
Number,
RemoteEmployee,
Result,
SimulationRun,
Time,
)
class BasicExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.example_inc = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10),
)
cls.foobar_ltd = Company.objects.create(
name="Foobar Ltd.",
num_employees=3,
num_chairs=4,
based_in_eu=True,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20),
)
cls.max = Employee.objects.create(
firstname="Max", lastname="Mustermann", salary=30
)
cls.gmbh = Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1, ceo=cls.max
)
def setUp(self):
self.company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by("name", "num_employees", "num_chairs")
def test_annotate_values_aggregate(self):
companies = (
Company.objects.annotate(
salaries=F("ceo__salary"),
)
.values("num_employees", "salaries")
.aggregate(
result=Sum(
F("salaries") + F("num_employees"), output_field=IntegerField()
),
)
)
self.assertEqual(companies["result"], 2395)
def test_annotate_values_filter(self):
companies = (
Company.objects.annotate(
foo=RawSQL("%s", ["value"]),
)
.filter(foo="value")
.order_by("name")
)
self.assertSequenceEqual(
companies,
[self.example_inc, self.foobar_ltd, self.gmbh],
)
def test_annotate_values_count(self):
companies = Company.objects.annotate(foo=RawSQL("%s", ["value"]))
self.assertEqual(companies.count(), 3)
@skipUnlessDBFeature("supports_boolean_expr_in_select_clause")
def test_filtering_on_annotate_that_uses_q(self):
self.assertEqual(
Company.objects.annotate(
num_employees_check=ExpressionWrapper(
Q(num_employees__gt=3), output_field=BooleanField()
)
)
.filter(num_employees_check=True)
.count(),
2,
)
def test_filtering_on_q_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField())
).count(),
2,
)
def test_filtering_on_rawsql_that_is_boolean(self):
self.assertEqual(
Company.objects.filter(
RawSQL("num_employees > %s", (3,), output_field=BooleanField()),
).count(),
2,
)
def test_filter_inter_attribute(self):
# We can filter on attribute relationships on same model obj, e.g.
# find companies where the number of employees is greater
# than the number of chairs.
self.assertSequenceEqual(
self.company_query.filter(num_employees__gt=F("num_chairs")),
[
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{"num_chairs": 1, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update(self):
# We can set one field to have the value of another field
# Make sure we have enough chairs
self.company_query.update(num_chairs=F("num_employees"))
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 32, "name": "Test GmbH", "num_employees": 32},
],
)
def test_arithmetic(self):
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
self.company_query.update(num_chairs=F("num_employees") + 2)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 2302, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 5, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 34, "name": "Test GmbH", "num_employees": 32},
],
)
def test_order_of_operations(self):
# Law of order of operations is followed
self.company_query.update(
num_chairs=F("num_employees") + 2 * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 6900, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 9, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 96, "name": "Test GmbH", "num_employees": 32},
],
)
def test_parenthesis_priority(self):
# Law of order of operations can be overridden by parentheses
self.company_query.update(
num_chairs=(F("num_employees") + 2) * F("num_employees")
)
self.assertSequenceEqual(
self.company_query,
[
{"num_chairs": 5294600, "name": "Example Inc.", "num_employees": 2300},
{"num_chairs": 15, "name": "Foobar Ltd.", "num_employees": 3},
{"num_chairs": 1088, "name": "Test GmbH", "num_employees": 32},
],
)
def test_update_with_fk(self):
# ForeignKey can become updated with the value of another ForeignKey.
self.assertEqual(Company.objects.update(point_of_contact=F("ceo")), 3)
self.assertQuerysetEqual(
Company.objects.all(),
["Joe Smith", "Frank Meyer", "Max Mustermann"],
lambda c: str(c.point_of_contact),
ordered=False,
)
def test_update_with_none(self):
Number.objects.create(integer=1, float=1.0)
Number.objects.create(integer=2)
Number.objects.filter(float__isnull=False).update(float=Value(None))
self.assertQuerysetEqual(
Number.objects.all(), [None, None], lambda n: n.float, ordered=False
)
def test_filter_with_join(self):
# F Expressions can also span joins
Company.objects.update(point_of_contact=F("ceo"))
c = Company.objects.first()
c.point_of_contact = Employee.objects.create(
firstname="Guido", lastname="van Rossum"
)
c.save()
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")),
["Foobar Ltd.", "Test GmbH"],
lambda c: c.name,
ordered=False,
)
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname")).update(
name="foo"
)
self.assertEqual(
Company.objects.exclude(ceo__firstname=F("point_of_contact__firstname"))
.get()
.name,
"foo",
)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name=F("point_of_contact__lastname"))
def test_object_update(self):
# F expressions can be used to update attributes on single objects
self.gmbh.num_employees = F("num_employees") + 4
self.gmbh.save()
self.gmbh.refresh_from_db()
self.assertEqual(self.gmbh.num_employees, 36)
def test_new_object_save(self):
# We should be able to use Funcs when inserting new data
test_co = Company(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.save()
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_new_object_create(self):
test_co = Company.objects.create(
name=Lower(Value("UPPER")), num_employees=32, num_chairs=1, ceo=self.max
)
test_co.refresh_from_db()
self.assertEqual(test_co.name, "upper")
def test_object_create_with_aggregate(self):
# Aggregates are not allowed when inserting new data
msg = (
"Aggregate functions are not allowed in this query "
"(num_employees=Max(Value(1)))."
)
with self.assertRaisesMessage(FieldError, msg):
Company.objects.create(
name="Company",
num_employees=Max(Value(1)),
num_chairs=1,
ceo=Employee.objects.create(
firstname="Just", lastname="Doit", salary=30
),
)
def test_object_update_fk(self):
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh = Company.objects.get(pk=self.gmbh.pk)
msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.'
with self.assertRaisesMessage(ValueError, msg):
test_gmbh.point_of_contact = F("ceo")
test_gmbh.point_of_contact = self.gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__lastname")
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
test_gmbh.save()
def test_update_inherited_field_value(self):
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
RemoteEmployee.objects.update(adjusted_salary=F("salary") * 5)
def test_object_update_unsaved_objects(self):
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5, ceo=self.max
)
acme.num_employees = F("num_employees") + 16
msg = (
'Failed to insert expression "Col(expressions_company, '
'expressions.Company.num_employees) + Value(16)" on '
"expressions.Company.num_employees. F() expressions can only be "
"used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
acme.num_employees = 12
acme.name = Lower(F("name"))
msg = (
'Failed to insert expression "Lower(Col(expressions_company, '
'expressions.Company.name))" on expressions.Company.name. F() '
"expressions can only be used to update, not to insert."
)
with self.assertRaisesMessage(ValueError, msg):
acme.save()
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
test = Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F("lastname"))
self.assertSequenceEqual(queryset, [test])
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertSequenceEqual(
Employee.objects.filter(lastname__startswith=F("firstname")),
[e2, e3] if connection.features.has_case_insensitive_like else [e2],
)
qs = Employee.objects.filter(lastname__istartswith=F("firstname")).order_by(
"pk"
)
self.assertSequenceEqual(qs, [e2, e3])
def test_ticket_18375_join_reuse(self):
# Reverse multijoin F() references and the lookup target the same join.
# Pre #18375 the F() join was generated first and the lookup couldn't
# reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F("company_ceo_set__num_employees"),
company_ceo_set__num_chairs__gte=1,
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F("pk"),
pk=F("company_ceo_set__num_employees"),
)
self.assertEqual(str(qs.query).count("JOIN"), 1)
def test_ticket_18375_chained_filters(self):
# F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(company_ceo_set__num_employees=F("pk")).filter(
company_ceo_set__num_employees=F("company_ceo_set__num_employees")
)
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_order_by_exists(self):
mary = Employee.objects.create(
firstname="Mary", lastname="Mustermann", salary=20
)
mustermanns_by_seniority = Employee.objects.filter(
lastname="Mustermann"
).order_by(
# Order by whether the employee is the CEO of a company
Exists(Company.objects.filter(ceo=OuterRef("pk"))).desc()
)
self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])
def test_order_by_multiline_sql(self):
raw_order_by = (
RawSQL(
"""
CASE WHEN num_employees > 1000
THEN num_chairs
ELSE 0 END
""",
[],
).desc(),
RawSQL(
"""
CASE WHEN num_chairs > 1
THEN 1
ELSE 0 END
""",
[],
).asc(),
)
for qs in (
Company.objects.all(),
Company.objects.distinct(),
):
with self.subTest(qs=qs):
self.assertSequenceEqual(
qs.order_by(*raw_order_by),
[self.example_inc, self.gmbh, self.foobar_ltd],
)
def test_outerref(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
msg = (
"This queryset contains a reference to an outer query and may only "
"be used in a subquery."
)
with self.assertRaisesMessage(ValueError, msg):
inner.exists()
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
self.assertIs(outer.exists(), True)
def test_exist_single_field_output_field(self):
queryset = Company.objects.values("pk")
self.assertIsInstance(Exists(queryset).output_field, BooleanField)
def test_subquery(self):
Company.objects.filter(name="Example Inc.").update(
point_of_contact=Employee.objects.get(firstname="Joe", lastname="Smith"),
ceo=self.max,
)
Employee.objects.create(firstname="Bob", lastname="Brown", salary=40)
qs = (
Employee.objects.annotate(
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_not_point_of_contact=~Exists(
Company.objects.filter(point_of_contact=OuterRef("pk"))
),
is_ceo_of_small_company=Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
is_ceo_small_2=~~Exists(
Company.objects.filter(num_employees__lt=200, ceo=OuterRef("pk"))
),
largest_company=Subquery(
Company.objects.order_by("-num_employees")
.filter(Q(ceo=OuterRef("pk")) | Q(point_of_contact=OuterRef("pk")))
.values("name")[:1],
output_field=CharField(),
),
)
.values(
"firstname",
"is_point_of_contact",
"is_not_point_of_contact",
"is_ceo_of_small_company",
"is_ceo_small_2",
"largest_company",
)
.order_by("firstname")
)
results = list(qs)
# Could use Coalesce(subq, Value('')) instead except for the bug in
# cx_Oracle mentioned in #23843.
bob = results[0]
if (
bob["largest_company"] == ""
and connection.features.interprets_empty_strings_as_nulls
):
bob["largest_company"] = None
self.assertEqual(
results,
[
{
"firstname": "Bob",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": None,
},
{
"firstname": "Frank",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Foobar Ltd.",
},
{
"firstname": "Joe",
"is_point_of_contact": True,
"is_not_point_of_contact": False,
"is_ceo_of_small_company": False,
"is_ceo_small_2": False,
"largest_company": "Example Inc.",
},
{
"firstname": "Max",
"is_point_of_contact": False,
"is_not_point_of_contact": True,
"is_ceo_of_small_company": True,
"is_ceo_small_2": True,
"largest_company": "Example Inc.",
},
],
)
# A less elegant way to write the same query: this uses a LEFT OUTER
# JOIN and an IS NULL, inside a WHERE NOT IN which is probably less
# efficient than EXISTS.
self.assertCountEqual(
qs.filter(is_point_of_contact=True).values("pk"),
Employee.objects.exclude(company_point_of_contact_set=None).values("pk"),
)
def test_subquery_eq(self):
qs = Employee.objects.annotate(
is_ceo=Exists(Company.objects.filter(ceo=OuterRef("pk"))),
is_point_of_contact=Exists(
Company.objects.filter(point_of_contact=OuterRef("pk")),
),
small_company=Exists(
queryset=Company.objects.filter(num_employees__lt=200),
),
).filter(is_ceo=True, is_point_of_contact=False, small_company=True)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["is_point_of_contact"],
)
self.assertNotEqual(
qs.query.annotations["is_ceo"],
qs.query.annotations["small_company"],
)
def test_subquery_sql(self):
employees = Employee.objects.all()
employees_subquery = Subquery(employees)
self.assertIs(employees_subquery.query.subquery, True)
self.assertIs(employees.query.subquery, False)
compiler = employees_subquery.query.get_compiler(connection=connection)
sql, _ = employees_subquery.as_sql(compiler, connection)
self.assertIn("(SELECT ", sql)
def test_in_subquery(self):
# This is a contrived test (and you really wouldn't write this query),
# but it is a succinct way to test the __in=Subquery() construct.
small_companies = Company.objects.filter(num_employees__lt=200).values("pk")
subquery_test = Company.objects.filter(pk__in=Subquery(small_companies))
self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh])
subquery_test2 = Company.objects.filter(
pk=Subquery(small_companies.filter(num_employees=3))
)
self.assertCountEqual(subquery_test2, [self.foobar_ltd])
def test_uuid_pk_subquery(self):
u = UUIDPK.objects.create()
UUID.objects.create(uuid_fk=u)
qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values("uuid_fk__id")))
self.assertCountEqual(qs, [u])
def test_nested_subquery(self):
inner = Company.objects.filter(point_of_contact=OuterRef("pk"))
outer = Employee.objects.annotate(is_point_of_contact=Exists(inner))
contrived = Employee.objects.annotate(
is_point_of_contact=Subquery(
outer.filter(pk=OuterRef("pk")).values("is_point_of_contact"),
output_field=BooleanField(),
),
)
self.assertCountEqual(contrived.values_list(), outer.values_list())
def test_nested_subquery_join_outer_ref(self):
inner = Employee.objects.filter(pk=OuterRef("ceo__pk")).values("pk")
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
ceo__in=inner,
ceo__pk=OuterRef("pk"),
).values("pk"),
),
)
self.assertSequenceEqual(
qs.values_list("ceo_company", flat=True),
[self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk],
)
def test_nested_subquery_outer_ref_2(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
third = Time.objects.create(time="21:00")
SimulationRun.objects.bulk_create(
[
SimulationRun(start=first, end=second, midpoint="12:00"),
SimulationRun(start=first, end=third, midpoint="15:00"),
SimulationRun(start=second, end=first, midpoint="00:00"),
]
)
inner = Time.objects.filter(
time=OuterRef(OuterRef("time")), pk=OuterRef("start")
).values("time")
middle = SimulationRun.objects.annotate(other=Subquery(inner)).values("other")[
:1
]
outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField()))
# This is a contrived example. It exercises the double OuterRef form.
self.assertCountEqual(outer, [first, second, third])
def test_nested_subquery_outer_ref_with_autofield(self):
first = Time.objects.create(time="09:00")
second = Time.objects.create(time="17:00")
SimulationRun.objects.create(start=first, end=second, midpoint="12:00")
inner = SimulationRun.objects.filter(start=OuterRef(OuterRef("pk"))).values(
"start"
)
middle = Time.objects.annotate(other=Subquery(inner)).values("other")[:1]
outer = Time.objects.annotate(
other=Subquery(middle, output_field=IntegerField())
)
# This exercises the double OuterRef form with AutoField as pk.
self.assertCountEqual(outer, [first, second])
def test_annotations_within_subquery(self):
Company.objects.filter(num_employees__lt=50).update(
ceo=Employee.objects.get(firstname="Frank")
)
inner = (
Company.objects.filter(ceo=OuterRef("pk"))
.values("ceo")
.annotate(total_employees=Sum("num_employees"))
.values("total_employees")
)
outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(
salary__lte=Subquery(inner)
)
self.assertSequenceEqual(
outer.order_by("-total_employees").values("salary", "total_employees"),
[
{"salary": 10, "total_employees": 2300},
{"salary": 20, "total_employees": 35},
],
)
def test_subquery_references_joined_table_twice(self):
inner = Company.objects.filter(
num_chairs__gte=OuterRef("ceo__salary"),
num_employees__gte=OuterRef("point_of_contact__salary"),
)
# Another contrived example (there is no need to have a subquery here)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_subquery_filter_by_aggregate(self):
Number.objects.create(integer=1000, float=1.2)
Employee.objects.create(salary=1000)
qs = Number.objects.annotate(
min_valuable_count=Subquery(
Employee.objects.filter(
salary=OuterRef("integer"),
)
.annotate(cnt=Count("salary"))
.filter(cnt__gt=0)
.values("cnt")[:1]
),
)
self.assertEqual(qs.get().float, 1.2)
def test_subquery_filter_by_lazy(self):
self.max.manager = Manager.objects.create(name="Manager")
self.max.save()
max_manager = SimpleLazyObject(
lambda: Manager.objects.get(pk=self.max.manager.pk)
)
qs = Company.objects.annotate(
ceo_manager=Subquery(
Employee.objects.filter(
lastname=OuterRef("ceo__lastname"),
).values("manager"),
),
).filter(ceo_manager=max_manager)
self.assertEqual(qs.get(), self.gmbh)
def test_aggregate_subquery_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
ceo_salary=Subquery(
Employee.objects.filter(
id=OuterRef("ceo_id"),
).values("salary")
),
).aggregate(
ceo_salary_gt_20=Count("pk", filter=Q(ceo_salary__gt=20)),
)
self.assertEqual(aggregate, {"ceo_salary_gt_20": 1})
# Aggregation over a subquery annotation doesn't annotate the subquery
# twice in the inner query.
sql = ctx.captured_queries[0]["sql"]
self.assertLessEqual(sql.count("SELECT"), 3)
# GROUP BY isn't required to aggregate over a query that doesn't
# contain nested aggregates.
self.assertNotIn("GROUP BY", sql)
@skipUnlessDBFeature("supports_over_clause")
def test_aggregate_rawsql_annotation(self):
with self.assertNumQueries(1) as ctx:
aggregate = Company.objects.annotate(
salary=RawSQL("SUM(num_chairs) OVER (ORDER BY num_employees)", []),
).aggregate(
count=Count("pk"),
)
self.assertEqual(aggregate, {"count": 3})
sql = ctx.captured_queries[0]["sql"]
self.assertNotIn("GROUP BY", sql)
def test_explicit_output_field(self):
class FuncA(Func):
output_field = CharField()
class FuncB(Func):
pass
expr = FuncB(FuncA())
self.assertEqual(expr.output_field, FuncA.output_field)
def test_outerref_mixed_case_table_name(self):
inner = Result.objects.filter(result_time__gte=OuterRef("experiment__assigned"))
outer = Result.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertFalse(outer.exists())
def test_outerref_with_operator(self):
inner = Company.objects.filter(num_employees=OuterRef("ceo__salary") + 2)
outer = Company.objects.filter(pk__in=Subquery(inner.values("pk")))
self.assertEqual(outer.get().name, "Test GmbH")
def test_nested_outerref_with_function(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.filter(
lastname__startswith=Left(OuterRef(OuterRef("lastname")), 1),
)
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_annotation_with_outerref(self):
gmbh_salary = Company.objects.annotate(
max_ceo_salary_raise=Subquery(
Company.objects.annotate(
salary_raise=OuterRef("num_employees") + F("num_employees"),
)
.order_by("-salary_raise")
.values("salary_raise")[:1],
output_field=IntegerField(),
),
).get(pk=self.gmbh.pk)
self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332)
def test_annotation_with_nested_outerref(self):
self.gmbh.point_of_contact = Employee.objects.get(lastname="Meyer")
self.gmbh.save()
inner = Employee.objects.annotate(
outer_lastname=OuterRef(OuterRef("lastname")),
).filter(lastname__startswith=Left("outer_lastname", 1))
qs = Employee.objects.annotate(
ceo_company=Subquery(
Company.objects.filter(
point_of_contact__in=inner,
ceo__pk=OuterRef("pk"),
).values("name"),
),
).filter(ceo_company__isnull=False)
self.assertEqual(qs.get().ceo_company, "Test GmbH")
def test_pickle_expression(self):
expr = Value(1)
expr.convert_value # populate cached property
self.assertEqual(pickle.loads(pickle.dumps(expr)), expr)
def test_incorrect_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Employee.objects.filter(firstname=F("nope")))
def test_incorrect_joined_field_in_F_expression(self):
with self.assertRaisesMessage(
FieldError, "Cannot resolve keyword 'nope' into field."
):
list(Company.objects.filter(ceo__pk=F("point_of_contact__nope")))
def test_exists_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("pk")
qs1 = Employee.objects.filter(Exists(inner))
qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True)
self.assertCountEqual(qs1, qs2)
self.assertFalse(Employee.objects.exclude(Exists(inner)).exists())
self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner)))
def test_subquery_in_filter(self):
inner = Company.objects.filter(ceo=OuterRef("pk")).values("based_in_eu")
self.assertSequenceEqual(
Employee.objects.filter(Subquery(inner)),
[self.foobar_ltd.ceo],
)
def test_subquery_group_by_outerref_in_filter(self):
inner = (
Company.objects.annotate(
employee=OuterRef("pk"),
)
.values("employee")
.annotate(
min_num_chairs=Min("num_chairs"),
)
.values("ceo")
)
self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True)
def test_case_in_filter_if_boolean_output_field(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
qs = Employee.objects.filter(
Case(
When(Exists(is_ceo), then=True),
When(Exists(is_poc), then=True),
default=False,
output_field=BooleanField(),
),
)
self.assertCountEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max])
def test_boolean_expression_combined(self):
is_ceo = Company.objects.filter(ceo=OuterRef("pk"))
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)),
[self.example_inc.ceo, self.foobar_ltd.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)),
[self.example_inc.ceo, self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__gte=30) & Exists(is_ceo)),
[self.max],
)
self.assertCountEqual(
Employee.objects.filter(Q(salary__lt=15) | Exists(is_poc)),
[self.example_inc.ceo, self.max],
)
def test_boolean_expression_combined_with_empty_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
tests = [
Exists(is_poc) & Q(),
Q() & Exists(is_poc),
Exists(is_poc) | Q(),
Q() | Exists(is_poc),
Q(Exists(is_poc)) & Q(),
Q() & Q(Exists(is_poc)),
Q(Exists(is_poc)) | Q(),
Q() | Q(Exists(is_poc)),
]
for conditions in tests:
with self.subTest(conditions):
self.assertCountEqual(Employee.objects.filter(conditions), [self.max])
def test_boolean_expression_in_Q(self):
is_poc = Company.objects.filter(point_of_contact=OuterRef("pk"))
self.gmbh.point_of_contact = self.max
self.gmbh.save()
self.assertCountEqual(Employee.objects.filter(Q(Exists(is_poc))), [self.max])
class IterableLookupInnerExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
ceo = Employee.objects.create(firstname="Just", lastname="Doit", salary=30)
# MySQL requires that the values calculated for expressions don't pass
# outside of the field's range, so it's inconvenient to use the values
# in the more general tests.
cls.c5020 = Company.objects.create(
name="5020 Ltd", num_employees=50, num_chairs=20, ceo=ceo
)
cls.c5040 = Company.objects.create(
name="5040 Ltd", num_employees=50, num_chairs=40, ceo=ceo
)
cls.c5050 = Company.objects.create(
name="5050 Ltd", num_employees=50, num_chairs=50, ceo=ceo
)
cls.c5060 = Company.objects.create(
name="5060 Ltd", num_employees=50, num_chairs=60, ceo=ceo
)
cls.c99300 = Company.objects.create(
name="99300 Ltd", num_employees=99, num_chairs=300, ceo=ceo
)
def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):
# __in lookups can use F() expressions for integers.
queryset = Company.objects.filter(num_employees__in=([F("num_chairs") - 10]))
self.assertSequenceEqual(queryset, [self.c5060])
self.assertCountEqual(
Company.objects.filter(
num_employees__in=([F("num_chairs") - 10, F("num_chairs") + 10])
),
[self.c5040, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__in=(
[F("num_chairs") - 10, F("num_chairs"), F("num_chairs") + 10]
)
),
[self.c5040, self.c5050, self.c5060],
)
def test_expressions_in_lookups_join_choice(self):
midpoint = datetime.time(13, 0)
t1 = Time.objects.create(time=datetime.time(12, 0))
t2 = Time.objects.create(time=datetime.time(14, 0))
s1 = SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint)
SimulationRun.objects.create(start=None, end=None, midpoint=midpoint)
queryset = SimulationRun.objects.filter(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertSequenceEqual(queryset, [s1])
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.INNER)
queryset = SimulationRun.objects.exclude(
midpoint__range=[F("start__time"), F("end__time")]
)
self.assertQuerysetEqual(queryset, [], ordered=False)
for alias in queryset.query.alias_map.values():
if isinstance(alias, Join):
self.assertEqual(alias.join_type, constants.LOUTER)
def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):
# Range lookups can use F() expressions for integers.
Company.objects.filter(num_employees__exact=F("num_chairs"))
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs"), 100)),
[self.c5020, self.c5040, self.c5050],
)
self.assertCountEqual(
Company.objects.filter(
num_employees__range=(F("num_chairs") - 10, F("num_chairs") + 10)
),
[self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(F("num_chairs") - 10, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060],
)
self.assertCountEqual(
Company.objects.filter(num_employees__range=(1, 100)),
[self.c5020, self.c5040, self.c5050, self.c5060, self.c99300],
)
def test_range_lookup_namedtuple(self):
EmployeeRange = namedtuple("EmployeeRange", ["minimum", "maximum"])
qs = Company.objects.filter(
num_employees__range=EmployeeRange(minimum=51, maximum=100),
)
self.assertSequenceEqual(qs, [self.c99300])
@unittest.skipUnless(
connection.vendor == "sqlite",
"This defensive test only works on databases that don't validate parameter "
"types",
)
def test_expressions_not_introduce_sql_injection_via_untrusted_string_inclusion(
self,
):
"""
This tests that SQL injection isn't possible using compilation of
expressions in iterable filters, as their compilation happens before
the main query compilation. It's limited to SQLite, as PostgreSQL,
Oracle and other vendors have defense in depth against this by type
checking. Testing against SQLite (the most permissive of the built-in
databases) demonstrates that the problem doesn't exist while keeping
the test simple.
"""
queryset = Company.objects.filter(name__in=[F("num_chairs") + "1)) OR ((1==1"])
self.assertQuerysetEqual(queryset, [], ordered=False)
def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self):
start = datetime.datetime(2016, 2, 3, 15, 0, 0)
end = datetime.datetime(2016, 2, 5, 15, 0, 0)
experiment_1 = Experiment.objects.create(
name="Integrity testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
experiment_2 = Experiment.objects.create(
name="Taste testing",
assigned=start.date(),
start=start,
end=end,
completed=end.date(),
estimated_time=end - start,
)
r1 = Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 2, 4, 15, 0, 0),
)
Result.objects.create(
experiment=experiment_1,
result_time=datetime.datetime(2016, 3, 10, 2, 0, 0),
)
Result.objects.create(
experiment=experiment_2,
result_time=datetime.datetime(2016, 1, 8, 5, 0, 0),
)
within_experiment_time = [F("experiment__start"), F("experiment__end")]
queryset = Result.objects.filter(result_time__range=within_experiment_time)
self.assertSequenceEqual(queryset, [r1])
class FTests(SimpleTestCase):
def test_deepcopy(self):
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_deconstruct(self):
f = F("name")
path, args, kwargs = f.deconstruct()
self.assertEqual(path, "django.db.models.F")
self.assertEqual(args, (f.name,))
self.assertEqual(kwargs, {})
def test_equal(self):
f = F("name")
same_f = F("name")
other_f = F("username")
self.assertEqual(f, same_f)
self.assertNotEqual(f, other_f)
def test_hash(self):
d = {F("name"): "Bob"}
self.assertIn(F("name"), d)
self.assertEqual(d[F("name")], "Bob")
def test_not_equal_Value(self):
f = F("name")
value = Value("name")
self.assertNotEqual(f, value)
self.assertNotEqual(value, f)
class ExpressionsTests(TestCase):
def test_F_reuse(self):
f = F("id")
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.",
num_employees=2300,
num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith"),
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
def test_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a pattern lookup with an expression
refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%John"),
Employee(firstname="Jean-Claude", lastname="Claud_"),
Employee(firstname="Jean-Claude", lastname="Claude%"),
Employee(firstname="Johnny", lastname="Joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="Claude")
john = Employee.objects.create(firstname="Johnny", lastname="John")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%Joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__contains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__startswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__endswith=F("lastname")),
[claude],
)
def test_insensitive_patterns_escape(self):
r"""
Special characters (e.g. %, _ and \) stored in database are
properly escaped when using a case insensitive pattern lookup with an
expression -- refs #16731
"""
Employee.objects.bulk_create(
[
Employee(firstname="Johnny", lastname="%john"),
Employee(firstname="Jean-Claude", lastname="claud_"),
Employee(firstname="Jean-Claude", lastname="claude%"),
Employee(firstname="Johnny", lastname="joh\\n"),
Employee(firstname="Johnny", lastname="_ohn"),
]
)
claude = Employee.objects.create(firstname="Jean-Claude", lastname="claude")
john = Employee.objects.create(firstname="Johnny", lastname="john")
john_sign = Employee.objects.create(firstname="%Joh\\nny", lastname="%joh\\n")
self.assertCountEqual(
Employee.objects.filter(firstname__icontains=F("lastname")),
[john_sign, john, claude],
)
self.assertCountEqual(
Employee.objects.filter(firstname__istartswith=F("lastname")),
[john_sign, john],
)
self.assertSequenceEqual(
Employee.objects.filter(firstname__iendswith=F("lastname")),
[claude],
)
@isolate_apps("expressions")
class SimpleExpressionTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(Expression(), Expression())
self.assertEqual(
Expression(IntegerField()), Expression(output_field=IntegerField())
)
self.assertEqual(Expression(IntegerField()), mock.ANY)
self.assertNotEqual(Expression(IntegerField()), Expression(CharField()))
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
Expression(TestModel._meta.get_field("field")),
Expression(TestModel._meta.get_field("other_field")),
)
def test_hash(self):
self.assertEqual(hash(Expression()), hash(Expression()))
self.assertEqual(
hash(Expression(IntegerField())),
hash(Expression(output_field=IntegerField())),
)
self.assertNotEqual(
hash(Expression(IntegerField())),
hash(Expression(CharField())),
)
class TestModel(Model):
field = IntegerField()
other_field = IntegerField()
self.assertNotEqual(
hash(Expression(TestModel._meta.get_field("field"))),
hash(Expression(TestModel._meta.get_field("other_field"))),
)
class ExpressionsNumericTests(TestCase):
@classmethod
def setUpTestData(cls):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
Number.objects.update(float=F("integer"))
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (42, 42), (1337, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.all(),
[(-1, -1), (43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0).update(integer=F("integer") + 1), 2
)
self.assertQuerysetEqual(
Number.objects.exclude(float=F("integer")),
[(43, 42), (1338, 1337)],
lambda n: (n.integer, round(n.float)),
ordered=False,
)
def test_filter_decimal_expression(self):
obj = Number.objects.create(integer=0, float=1, decimal_value=Decimal("1"))
qs = Number.objects.annotate(
x=ExpressionWrapper(Value(1), output_field=DecimalField()),
).filter(Q(x=1, integer=0) & Q(x=Decimal("1")))
self.assertSequenceEqual(qs, [obj])
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(
Number.objects.filter(pk=n.pk).update(float=F("integer") + F("float") * 2),
1,
)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(
Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)
)
def test_decimal_expression(self):
n = Number.objects.create(integer=1, decimal_value=Decimal("0.5"))
n.decimal_value = F("decimal_value") - Decimal("0.4")
n.save()
n.refresh_from_db()
self.assertEqual(n.decimal_value, Decimal("0.1"))
class ExpressionOperatorTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.n = Number.objects.create(integer=42, float=15.5)
cls.n1 = Number.objects.create(integer=-42, float=-15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") + 15, float=F("float") + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") - 15, float=F("float") - 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)
)
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") * 15, float=F("float") * 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") / 2, float=F("float") / 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)
)
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer") % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
def test_lefthand_modulo_null(self):
# LH Modulo arithmetic on integers.
Employee.objects.create(firstname="John", lastname="Doe", salary=None)
qs = Employee.objects.annotate(modsalary=F("salary") % 20)
self.assertIsNone(qs.get().salary)
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F("integer").bitand(56))
Number.objects.filter(pk=self.n1.pk).update(integer=F("integer").bitand(-56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64)
def test_lefthand_bitwise_left_shift_operator(self):
Number.objects.update(integer=F("integer").bitleftshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168)
def test_lefthand_bitwise_right_shift_operator(self):
Number.objects.update(integer=F("integer").bitrightshift(2))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11)
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.update(integer=F("integer").bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10)
def test_lefthand_transformed_field_bitwise_or(self):
Employee.objects.create(firstname="Max", lastname="Mustermann")
with register_lookup(CharField, Length):
qs = Employee.objects.annotate(bitor=F("lastname__length").bitor(48))
self.assertEqual(qs.get().bitor, 58)
def test_lefthand_power(self):
# LH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F("integer") ** 2, float=F("float") ** 1.5
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)
)
def test_lefthand_bitwise_xor(self):
Number.objects.update(integer=F("integer").bitxor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26)
self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26)
def test_lefthand_bitwise_xor_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.update(salary=F("salary").bitxor(48))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
def test_lefthand_bitwise_xor_right_null(self):
employee = Employee.objects.create(firstname="John", lastname="Doe", salary=48)
Employee.objects.update(salary=F("salary").bitxor(None))
employee.refresh_from_db()
self.assertIsNone(employee.salary)
@unittest.skipUnless(
connection.vendor == "oracle", "Oracle doesn't support bitwise XOR."
)
def test_lefthand_bitwise_xor_not_supported(self):
msg = "Bitwise XOR is not supported in Oracle."
with self.assertRaisesMessage(NotSupportedError, msg):
Number.objects.update(integer=F("integer").bitxor(48))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(
integer=15 + F("integer"), float=42.7 + F("float")
)
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)
)
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(
integer=15 - F("integer"), float=42.7 - F("float")
)
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)
)
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=15 * F("integer"), float=42.7 * F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)
)
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=640 / F("integer"), float=42.7 / F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)
)
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F("integer"))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
def test_righthand_power(self):
# RH Power arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=2 ** F("integer"), float=1.5 ** F("float")
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(
Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)
)
class FTimeDeltaTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
delta5 = datetime.timedelta(days=90)
# Test data is set so that deltas and delays will be
# strictly increasing.
cls.deltas = []
cls.delays = []
cls.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
cls.e0 = Experiment.objects.create(
name="e0",
assigned=sday,
start=stime,
end=end,
completed=end.date(),
estimated_time=delta0,
)
cls.deltas.append(delta0)
cls.delays.append(
cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)
)
cls.days_long.append(cls.e0.completed - cls.e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite.
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(
name="e1",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta1,
)
cls.deltas.append(delta1)
cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight))
cls.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(
name="e2",
assigned=sday - datetime.timedelta(3),
start=stime,
end=end,
completed=end.date(),
estimated_time=datetime.timedelta(hours=1),
)
cls.deltas.append(delta2)
cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight))
cls.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(
name="e3",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta3,
)
cls.deltas.append(delta3)
cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight))
cls.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(
name="e4",
assigned=sday - datetime.timedelta(10),
start=stime,
end=end,
completed=end.date(),
estimated_time=delta4 - datetime.timedelta(1),
)
cls.deltas.append(delta4)
cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight))
cls.days_long.append(e4.completed - e4.assigned)
# e5: started a month after assignment, very long duration
delay = datetime.timedelta(30)
end = stime + delay + delta5
e5 = Experiment.objects.create(
name="e5",
assigned=sday,
start=stime + delay,
end=end,
completed=end.date(),
estimated_time=delta5,
)
cls.deltas.append(delta5)
cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight))
cls.days_long.append(e5.completed - e5.assigned)
cls.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(
end__lt=F("start") + datetime.timedelta(hours=1)
)
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643 - Crash when compiling query more than once
qs = Experiment.objects.filter(end__lt=F("start") + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
# Intentionally no assert
def test_delta_add(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lt=delta + F("start"))
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_subtract(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.filter(start__gt=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name for e in Experiment.objects.filter(start__gte=F("end") - delta)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_exclude(self):
for i, delta in enumerate(self.deltas):
test_set = [
e.name for e in Experiment.objects.exclude(end__lt=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i:])
test_set = [
e.name for e in Experiment.objects.exclude(end__lte=F("start") + delta)
]
self.assertEqual(test_set, self.expnames[i + 1 :])
def test_date_comparison(self):
for i, days in enumerate(self.days_long):
test_set = [
e.name
for e in Experiment.objects.filter(completed__lt=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(completed__lte=F("assigned") + days)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_datetime_and_durationfield_addition_with_filter(self):
test_set = Experiment.objects.filter(end=F("start") + F("estimated_time"))
self.assertGreater(test_set.count(), 0)
self.assertEqual(
[e.name for e in test_set],
[
e.name
for e in Experiment.objects.all()
if e.end == e.start + e.estimated_time
],
)
def test_datetime_and_duration_field_addition_with_annotate_and_no_output_field(
self,
):
test_set = Experiment.objects.annotate(
estimated_end=F("start") + F("estimated_time")
)
self.assertEqual(
[e.estimated_end for e in test_set],
[e.start + e.estimated_time for e in test_set],
)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_with_annotate_and_no_output_field(self):
test_set = Experiment.objects.annotate(
calculated_duration=F("end") - F("start")
)
self.assertEqual(
[e.calculated_duration for e in test_set],
[e.end - e.start for e in test_set],
)
def test_mixed_comparisons1(self):
for i, delay in enumerate(self.delays):
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gt=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(assigned__gte=F("start") - delay)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_mixed_comparisons2(self):
for i, delay in enumerate(self.delays):
delay = datetime.timedelta(delay.days)
test_set = [
e.name
for e in Experiment.objects.filter(start__lt=F("assigned") + delay)
]
self.assertEqual(test_set, self.expnames[:i])
test_set = [
e.name
for e in Experiment.objects.filter(
start__lte=F("assigned") + delay + datetime.timedelta(1)
)
]
self.assertEqual(test_set, self.expnames[: i + 1])
def test_delta_update(self):
for delta in self.deltas:
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F("start") + delta, end=F("end") + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_invalid_operator(self):
with self.assertRaises(DatabaseError):
list(Experiment.objects.filter(start=F("start") * datetime.timedelta(0)))
def test_durationfield_add(self):
zeros = [
e.name
for e in Experiment.objects.filter(start=F("start") + F("estimated_time"))
]
self.assertEqual(zeros, ["e0"])
end_less = [
e.name
for e in Experiment.objects.filter(end__lt=F("start") + F("estimated_time"))
]
self.assertEqual(end_less, ["e2"])
delta_math = [
e.name
for e in Experiment.objects.filter(
end__gte=F("start") + F("estimated_time") + datetime.timedelta(hours=1)
)
]
self.assertEqual(delta_math, ["e4"])
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") + Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
def test_durationfield_multiply_divide(self):
Experiment.objects.update(scalar=2)
tests = [
(Decimal("2"), 2),
(F("scalar"), 2),
(2, 2),
(3.2, 3.2),
]
for expr, scalar in tests:
with self.subTest(expr=expr):
qs = Experiment.objects.annotate(
multiplied=ExpressionWrapper(
expr * F("estimated_time"),
output_field=DurationField(),
),
divided=ExpressionWrapper(
F("estimated_time") / expr,
output_field=DurationField(),
),
)
for experiment in qs:
self.assertEqual(
experiment.multiplied,
experiment.estimated_time * scalar,
)
self.assertEqual(
experiment.divided,
experiment.estimated_time / scalar,
)
def test_duration_expressions(self):
for delta in self.deltas:
qs = Experiment.objects.annotate(duration=F("estimated_time") + delta)
for obj in qs:
self.assertEqual(obj.duration, obj.estimated_time + delta)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subtraction(self):
queryset = Experiment.objects.annotate(
completion_duration=F("completed") - F("assigned"),
)
at_least_5_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=5)
)
}
self.assertEqual(at_least_5_days, {"e3", "e4", "e5"})
at_least_120_days = {
e.name
for e in queryset.filter(
completion_duration__gte=datetime.timedelta(days=120)
)
}
self.assertEqual(at_least_120_days, {"e5"})
less_than_5_days = {
e.name
for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))
}
self.assertEqual(less_than_5_days, {"e0", "e1", "e2"})
queryset = Experiment.objects.annotate(
difference=F("completed") - Value(None, output_field=DateField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("completed") - Value(None, output_field=DurationField()),
output_field=DateField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("completed")
queryset = Experiment.objects.annotate(
difference=subquery - F("completed"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_date_case_subtraction(self):
queryset = Experiment.objects.annotate(
date_case=Case(
When(Q(name="e0"), then=F("completed")),
output_field=DateField(),
),
completed_value=Value(
self.e0.completed,
output_field=DateField(),
),
difference=F("date_case") - F("completed_value"),
).filter(difference=datetime.timedelta())
self.assertEqual(queryset.get(), self.e0)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
queryset = Time.objects.annotate(
difference=F("time") - Value(datetime.time(11, 15, 0)),
)
self.assertEqual(
queryset.get().difference,
datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345),
)
queryset = Time.objects.annotate(
difference=F("time") - Value(None, output_field=TimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Time.objects.annotate(
shifted=ExpressionWrapper(
F("time") - Value(None, output_field=DurationField()),
output_field=TimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_time_subquery_subtraction(self):
Time.objects.create(time=datetime.time(12, 30, 15, 2345))
subquery = Time.objects.filter(pk=OuterRef("pk")).values("time")
queryset = Time.objects.annotate(
difference=subquery - F("time"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction(self):
under_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__gt=F("end") - F("start"))
]
self.assertEqual(under_estimate, ["e2"])
over_estimate = [
e.name
for e in Experiment.objects.filter(estimated_time__lt=F("end") - F("start"))
]
self.assertEqual(over_estimate, ["e4"])
queryset = Experiment.objects.annotate(
difference=F("start") - Value(None, output_field=DateTimeField()),
)
self.assertIsNone(queryset.first().difference)
queryset = Experiment.objects.annotate(
shifted=ExpressionWrapper(
F("start") - Value(None, output_field=DurationField()),
output_field=DateTimeField(),
)
)
self.assertIsNone(queryset.first().shifted)
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subquery_subtraction(self):
subquery = Experiment.objects.filter(pk=OuterRef("pk")).values("start")
queryset = Experiment.objects.annotate(
difference=subquery - F("start"),
).filter(difference=datetime.timedelta())
self.assertTrue(queryset.exists())
@skipUnlessDBFeature("supports_temporal_subtraction")
def test_datetime_subtraction_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
Experiment.objects.update(end=F("start") + delta)
qs = Experiment.objects.annotate(delta=F("end") - F("start"))
for e in qs:
self.assertEqual(e.delta, delta)
def test_duration_with_datetime(self):
# Exclude e1 which has very high precision so we can test this on all
# backends regardless of whether or not it supports
# microsecond_precision.
over_estimate = (
Experiment.objects.exclude(name="e1")
.filter(
completed__gt=self.stime + F("estimated_time"),
)
.order_by("name")
)
self.assertQuerysetEqual(over_estimate, ["e3", "e4", "e5"], lambda e: e.name)
def test_duration_with_datetime_microseconds(self):
delta = datetime.timedelta(microseconds=8999999999999999)
qs = Experiment.objects.annotate(
dt=ExpressionWrapper(
F("start") + delta,
output_field=DateTimeField(),
)
)
for e in qs:
self.assertEqual(e.dt, e.start + delta)
def test_date_minus_duration(self):
more_than_4_days = Experiment.objects.filter(
assigned__lt=F("completed") - Value(datetime.timedelta(days=4))
)
self.assertQuerysetEqual(more_than_4_days, ["e3", "e4", "e5"], lambda e: e.name)
def test_negative_timedelta_update(self):
# subtract 30 seconds, 30 minutes, 2 hours and 2 days
experiments = (
Experiment.objects.filter(name="e0")
.annotate(
start_sub_seconds=F("start") + datetime.timedelta(seconds=-30),
)
.annotate(
start_sub_minutes=F("start_sub_seconds")
+ datetime.timedelta(minutes=-30),
)
.annotate(
start_sub_hours=F("start_sub_minutes") + datetime.timedelta(hours=-2),
)
.annotate(
new_start=F("start_sub_hours") + datetime.timedelta(days=-2),
)
)
expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0)
# subtract 30 microseconds
experiments = experiments.annotate(
new_start=F("new_start") + datetime.timedelta(microseconds=-30)
)
expected_start += datetime.timedelta(microseconds=+746970)
experiments.update(start=F("new_start"))
e0 = Experiment.objects.get(name="e0")
self.assertEqual(e0.start, expected_start)
class ValueTests(TestCase):
def test_update_TimeField_using_Value(self):
Time.objects.create()
Time.objects.update(time=Value(datetime.time(1), output_field=TimeField()))
self.assertEqual(Time.objects.get().time, datetime.time(1))
def test_update_UUIDField_using_Value(self):
UUID.objects.create()
UUID.objects.update(
uuid=Value(
uuid.UUID("12345678901234567890123456789012"), output_field=UUIDField()
)
)
self.assertEqual(
UUID.objects.get().uuid, uuid.UUID("12345678901234567890123456789012")
)
def test_deconstruct(self):
value = Value("name")
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(kwargs, {})
def test_deconstruct_output_field(self):
value = Value("name", output_field=CharField())
path, args, kwargs = value.deconstruct()
self.assertEqual(path, "django.db.models.Value")
self.assertEqual(args, (value.value,))
self.assertEqual(len(kwargs), 1)
self.assertEqual(
kwargs["output_field"].deconstruct(), CharField().deconstruct()
)
def test_repr(self):
tests = [
(None, "Value(None)"),
("str", "Value('str')"),
(True, "Value(True)"),
(42, "Value(42)"),
(
datetime.datetime(2019, 5, 15),
"Value(datetime.datetime(2019, 5, 15, 0, 0))",
),
(Decimal("3.14"), "Value(Decimal('3.14'))"),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(repr(Value(value)), expected)
def test_equal(self):
value = Value("name")
self.assertEqual(value, Value("name"))
self.assertNotEqual(value, Value("username"))
def test_hash(self):
d = {Value("name"): "Bob"}
self.assertIn(Value("name"), d)
self.assertEqual(d[Value("name")], "Bob")
def test_equal_output_field(self):
value = Value("name", output_field=CharField())
same_value = Value("name", output_field=CharField())
other_value = Value("name", output_field=TimeField())
no_output_field = Value("name")
self.assertEqual(value, same_value)
self.assertNotEqual(value, other_value)
self.assertNotEqual(value, no_output_field)
def test_raise_empty_expressionlist(self):
msg = "ExpressionList requires at least one expression"
with self.assertRaisesMessage(ValueError, msg):
ExpressionList()
def test_compile_unresolved(self):
# This test might need to be revisited later on if #25425 is enforced.
compiler = Time.objects.all().query.get_compiler(connection=connection)
value = Value("foo")
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
value = Value("foo", output_field=CharField())
self.assertEqual(value.as_sql(compiler, connection), ("%s", ["foo"]))
def test_output_field_decimalfield(self):
Time.objects.create()
time = Time.objects.annotate(one=Value(1, output_field=DecimalField())).first()
self.assertEqual(time.one, 1)
def test_resolve_output_field(self):
value_types = [
("str", CharField),
(True, BooleanField),
(42, IntegerField),
(3.14, FloatField),
(datetime.date(2019, 5, 15), DateField),
(datetime.datetime(2019, 5, 15), DateTimeField),
(datetime.time(3, 16), TimeField),
(datetime.timedelta(1), DurationField),
(Decimal("3.14"), DecimalField),
(b"", BinaryField),
(uuid.uuid4(), UUIDField),
]
for value, output_field_type in value_types:
with self.subTest(type=type(value)):
expr = Value(value)
self.assertIsInstance(expr.output_field, output_field_type)
def test_resolve_output_field_failure(self):
msg = "Cannot resolve expression type, unknown output_field"
with self.assertRaisesMessage(FieldError, msg):
Value(object()).output_field
def test_output_field_does_not_create_broken_validators(self):
"""
The output field for a given Value doesn't get cleaned & validated,
however validators may still be instantiated for a given field type
and this demonstrates that they don't throw an exception.
"""
value_types = [
"str",
True,
42,
3.14,
datetime.date(2019, 5, 15),
datetime.datetime(2019, 5, 15),
datetime.time(3, 16),
datetime.timedelta(1),
Decimal("3.14"),
b"",
uuid.uuid4(),
]
for value in value_types:
with self.subTest(type=type(value)):
field = Value(value)._resolve_output_field()
field.clean(value, model_instance=None)
class ExistsTests(TestCase):
def test_optimizations(self):
with CaptureQueriesContext(connection) as context:
list(
Experiment.objects.values(
exists=Exists(
Experiment.objects.order_by("pk"),
)
).order_by()
)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Experiment._meta.pk.column),
captured_sql,
)
self.assertIn(
connection.ops.limit_offset_sql(None, 1),
captured_sql,
)
self.assertNotIn("ORDER BY", captured_sql)
def test_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.filter(~Exists(Manager.objects.none()) & Q(pk=manager.pk))
self.assertSequenceEqual(qs, [manager])
def test_select_negated_empty_exists(self):
manager = Manager.objects.create()
qs = Manager.objects.annotate(
not_exists=~Exists(Manager.objects.none())
).filter(pk=manager.pk)
self.assertSequenceEqual(qs, [manager])
self.assertIs(qs.get().not_exists, True)
class FieldTransformTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.sday = sday = datetime.date(2010, 6, 25)
cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
cls.ex1 = Experiment.objects.create(
name="Experiment 1",
assigned=sday,
completed=sday + datetime.timedelta(2),
estimated_time=datetime.timedelta(2),
start=stime,
end=stime + datetime.timedelta(2),
)
def test_month_aggregation(self):
self.assertEqual(
Experiment.objects.aggregate(month_count=Count("assigned__month")),
{"month_count": 1},
)
def test_transform_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("assigned__month"),
[{"assigned__month": 6}],
)
def test_multiple_transforms_in_values(self):
self.assertSequenceEqual(
Experiment.objects.values("end__date__month"),
[{"end__date__month": 6}],
)
class ReprTests(SimpleTestCase):
def test_expressions(self):
self.assertEqual(
repr(Case(When(a=1))),
"<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>",
)
self.assertEqual(
repr(When(Q(age__gte=18), then=Value("legal"))),
"<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value('legal')>",
)
self.assertEqual(repr(Col("alias", "field")), "Col(alias, field)")
self.assertEqual(repr(F("published")), "F(published)")
self.assertEqual(
repr(F("cost") + F("tax")), "<CombinedExpression: F(cost) + F(tax)>"
)
self.assertEqual(
repr(ExpressionWrapper(F("cost") + F("tax"), IntegerField())),
"ExpressionWrapper(F(cost) + F(tax))",
)
self.assertEqual(
repr(Func("published", function="TO_CHAR")),
"Func(F(published), function=TO_CHAR)",
)
self.assertEqual(repr(OrderBy(Value(1))), "OrderBy(Value(1), descending=False)")
self.assertEqual(repr(RawSQL("table.col", [])), "RawSQL(table.col, [])")
self.assertEqual(
repr(Ref("sum_cost", Sum("cost"))), "Ref(sum_cost, Sum(F(cost)))"
)
self.assertEqual(repr(Value(1)), "Value(1)")
self.assertEqual(
repr(ExpressionList(F("col"), F("anothercol"))),
"ExpressionList(F(col), F(anothercol))",
)
self.assertEqual(
repr(ExpressionList(OrderBy(F("col"), descending=False))),
"ExpressionList(OrderBy(F(col), descending=False))",
)
def test_functions(self):
self.assertEqual(repr(Coalesce("a", "b")), "Coalesce(F(a), F(b))")
self.assertEqual(repr(Concat("a", "b")), "Concat(ConcatPair(F(a), F(b)))")
self.assertEqual(repr(Length("a")), "Length(F(a))")
self.assertEqual(repr(Lower("a")), "Lower(F(a))")
self.assertEqual(repr(Substr("a", 1, 3)), "Substr(F(a), Value(1), Value(3))")
self.assertEqual(repr(Upper("a")), "Upper(F(a))")
def test_aggregates(self):
self.assertEqual(repr(Avg("a")), "Avg(F(a))")
self.assertEqual(repr(Count("a")), "Count(F(a))")
self.assertEqual(repr(Count("*")), "Count('*')")
self.assertEqual(repr(Max("a")), "Max(F(a))")
self.assertEqual(repr(Min("a")), "Min(F(a))")
self.assertEqual(repr(StdDev("a")), "StdDev(F(a), sample=False)")
self.assertEqual(repr(Sum("a")), "Sum(F(a))")
self.assertEqual(
repr(Variance("a", sample=True)), "Variance(F(a), sample=True)"
)
def test_distinct_aggregates(self):
self.assertEqual(repr(Count("a", distinct=True)), "Count(F(a), distinct=True)")
self.assertEqual(repr(Count("*", distinct=True)), "Count('*', distinct=True)")
def test_filtered_aggregates(self):
filter = Q(a=1)
self.assertEqual(
repr(Avg("a", filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Count("a", filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Max("a", filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Min("a", filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(StdDev("a", filter=filter)),
"StdDev(F(a), filter=(AND: ('a', 1)), sample=False)",
)
self.assertEqual(
repr(Sum("a", filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))"
)
self.assertEqual(
repr(Variance("a", sample=True, filter=filter)),
"Variance(F(a), filter=(AND: ('a', 1)), sample=True)",
)
self.assertEqual(
repr(Count("a", filter=filter, distinct=True)),
"Count(F(a), distinct=True, filter=(AND: ('a', 1)))",
)
class CombinableTests(SimpleTestCase):
bitwise_msg = (
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def test_negation(self):
c = Combinable()
self.assertEqual(-c, c * -1)
def test_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() & Combinable()
def test_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() | Combinable()
def test_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
Combinable() ^ Combinable()
def test_reversed_and(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() & Combinable()
def test_reversed_or(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() | Combinable()
def test_reversed_xor(self):
with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg):
object() ^ Combinable()
class CombinedExpressionTests(SimpleTestCase):
def test_resolve_output_field_number(self):
tests = [
(IntegerField, AutoField, IntegerField),
(AutoField, IntegerField, IntegerField),
(IntegerField, DecimalField, DecimalField),
(DecimalField, IntegerField, DecimalField),
(IntegerField, FloatField, FloatField),
(FloatField, IntegerField, FloatField),
]
connectors = [Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV]
for lhs, rhs, combined in tests:
for connector in connectors:
with self.subTest(
lhs=lhs, connector=connector, rhs=rhs, combined=combined
):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
self.assertIsInstance(expr.output_field, combined)
def test_resolve_output_field_with_null(self):
def null():
return Value(None)
tests = [
# Numbers.
(AutoField, Combinable.ADD, null),
(DecimalField, Combinable.ADD, null),
(FloatField, Combinable.ADD, null),
(IntegerField, Combinable.ADD, null),
(IntegerField, Combinable.SUB, null),
(null, Combinable.ADD, IntegerField),
# Dates.
(DateField, Combinable.ADD, null),
(DateTimeField, Combinable.ADD, null),
(DurationField, Combinable.ADD, null),
(TimeField, Combinable.ADD, null),
(TimeField, Combinable.SUB, null),
(null, Combinable.ADD, DateTimeField),
(DateField, Combinable.SUB, null),
]
for lhs, connector, rhs in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
with self.assertRaisesMessage(FieldError, msg):
expr.output_field
def test_resolve_output_field_dates(self):
tests = [
# Add - same type.
(DateField, Combinable.ADD, DateField, FieldError),
(DateTimeField, Combinable.ADD, DateTimeField, FieldError),
(TimeField, Combinable.ADD, TimeField, FieldError),
(DurationField, Combinable.ADD, DurationField, DurationField),
# Add - different type.
(DateField, Combinable.ADD, DurationField, DateTimeField),
(DateTimeField, Combinable.ADD, DurationField, DateTimeField),
(TimeField, Combinable.ADD, DurationField, TimeField),
(DurationField, Combinable.ADD, DateField, DateTimeField),
(DurationField, Combinable.ADD, DateTimeField, DateTimeField),
(DurationField, Combinable.ADD, TimeField, TimeField),
# Subtract - same type.
(DateField, Combinable.SUB, DateField, DurationField),
(DateTimeField, Combinable.SUB, DateTimeField, DurationField),
(TimeField, Combinable.SUB, TimeField, DurationField),
(DurationField, Combinable.SUB, DurationField, DurationField),
# Subtract - different type.
(DateField, Combinable.SUB, DurationField, DateTimeField),
(DateTimeField, Combinable.SUB, DurationField, DateTimeField),
(TimeField, Combinable.SUB, DurationField, TimeField),
(DurationField, Combinable.SUB, DateField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
]
for lhs, connector, rhs, combined in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs, combined=combined):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
if issubclass(combined, Exception):
with self.assertRaisesMessage(combined, msg):
expr.output_field
else:
self.assertIsInstance(expr.output_field, combined)
def test_mixed_char_date_with_annotate(self):
queryset = Experiment.objects.annotate(nonsense=F("name") + F("assigned"))
msg = (
"Cannot infer type of '+' expression involving these types: CharField, "
"DateField. You must set output_field."
)
with self.assertRaisesMessage(FieldError, msg):
list(queryset)
class ExpressionWrapperTests(SimpleTestCase):
def test_empty_group_by(self):
expr = ExpressionWrapper(Value(3), output_field=IntegerField())
self.assertEqual(expr.get_group_by_cols(alias=None), [])
def test_non_empty_group_by(self):
value = Value("f")
value.output_field = None
expr = ExpressionWrapper(Lower(value), output_field=IntegerField())
group_by_cols = expr.get_group_by_cols(alias=None)
self.assertEqual(group_by_cols, [expr.expression])
self.assertEqual(group_by_cols[0].output_field, expr.output_field)
class OrderByTests(SimpleTestCase):
def test_equal(self):
self.assertEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field"), nulls_last=True),
)
self.assertNotEqual(
OrderBy(F("field"), nulls_last=True),
OrderBy(F("field")),
)
def test_hash(self):
self.assertEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"), nulls_last=True)),
)
self.assertNotEqual(
hash(OrderBy(F("field"), nulls_last=True)),
hash(OrderBy(F("field"))),
)
def test_nulls_false(self):
# These tests will catch ValueError in Django 5.0 when passing False to
# nulls_first and nulls_last becomes forbidden.
# msg = "nulls_first and nulls_last values must be True or None."
msg = (
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead."
)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
OrderBy(F("field"), nulls_first=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
OrderBy(F("field"), nulls_last=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
F("field").asc(nulls_first=False)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
F("field").desc(nulls_last=False)
| {
"content_hash": "cef5c9e0894733589e5b77381c0da1f7",
"timestamp": "",
"source": "github",
"line_count": 2569,
"max_line_length": 88,
"avg_line_length": 38.43090696769171,
"alnum_prop": 0.5738536802763119,
"repo_name": "dsanders11/django",
"id": "39e6c18b1a126fabfc17881655ed57de9187bf2f",
"size": "98729",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tests/expressions/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "87587"
},
{
"name": "HTML",
"bytes": "236871"
},
{
"name": "JavaScript",
"bytes": "146496"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "15995318"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import curses
from collections import OrderedDict
import six
import pytest
from rtv import __version__
from rtv.subreddit_page import SubredditPage
from rtv.packages.praw.errors import NotFound, HTTPException
from requests.exceptions import ReadTimeout
try:
from unittest import mock
except ImportError:
import mock
PROMPTS = OrderedDict([
('prompt_1', 'comments/571dw3'),
('prompt_2', '///comments/571dw3'),
('prompt_3', '/comments/571dw3'),
('prompt_4', '/r/pics/comments/571dw3/'),
('prompt_5', 'https://www.reddit.com/r/pics/comments/571dw3/at_disneyland'),
])
def test_subreddit_page_construct(reddit, terminal, config, oauth):
window = terminal.stdscr.subwin
with terminal.loader():
page = SubredditPage(reddit, terminal, config, oauth, '/r/python')
assert terminal.loader.exception is None
page.draw()
# Title
title = '/r/python'.encode('utf-8')
window.addstr.assert_any_call(0, 0, title)
# Banner
menu = '[1]hot [2]top [3]rising [4]new [5]controversial [6]gilded'.encode('utf-8')
window.addstr.assert_any_call(0, 0, menu)
# Submission
text = page.content.get(0)['split_title'][0].encode('utf-8')
window.subwin.addstr.assert_any_call(0, 1, text, 2097152)
# Cursor should have been drawn
window.subwin.addch.assert_any_call(0, 0, ' ', curses.A_REVERSE)
# Reload with a smaller terminal window
terminal.stdscr.ncols = 20
terminal.stdscr.nlines = 10
with terminal.loader():
page = SubredditPage(reddit, terminal, config, oauth, '/r/python')
assert terminal.loader.exception is None
page.draw()
def test_subreddit_refresh(subreddit_page, terminal):
# Refresh the page with default values
subreddit_page.controller.trigger('r')
assert subreddit_page.content.order is None
assert subreddit_page.content.name == '/r/python'
assert terminal.loader.exception is None
# Refresh with the order in the name
subreddit_page.refresh_content(order='ignore', name='/r/front/hot')
assert subreddit_page.content.order == 'hot'
assert subreddit_page.content.name == '/r/front'
assert terminal.loader.exception is None
def test_subreddit_reload_page(subreddit_page, terminal, reddit):
cache = reddit.handler.cache
assert len(cache) == 1
# A plain refresh_content() will use whatever is in the praw cache
# instead of making a new request to reddit
list(cache.values())[0].status_code = 503
subreddit_page.refresh_content()
assert isinstance(terminal.loader.exception, HTTPException)
cache = reddit.handler.cache
assert len(cache) == 1
# But if we manually trigger a page refresh, it should clear the cache
# and reload the page instead of returning the cached 503 response
list(cache.values())[0].status_code = 503
subreddit_page.controller.trigger('r')
assert terminal.loader.exception is None
def test_subreddit_title(subreddit_page, terminal, capsys):
subreddit_page.content.name = 'hello ❤'
with mock.patch.dict('os.environ', {'DISPLAY': ':1'}):
terminal.config['ascii'] = True
subreddit_page.draw()
out, _ = capsys.readouterr()
assert isinstance(out, six.text_type)
assert out == '\x1b]2;hello ? - rtv {}\x07'.format(__version__)
terminal.config['ascii'] = False
subreddit_page.draw()
out, _ = capsys.readouterr()
assert isinstance(out, six.text_type)
assert out == '\x1b]2;hello ❤ - rtv {}\x07'.format(__version__)
with mock.patch.dict('os.environ', {'DISPLAY': ''}):
subreddit_page.draw()
out, _ = capsys.readouterr()
assert not out
with mock.patch.dict('os.environ', {'INSIDE_EMACS': '25.3.1,term:0.96'}):
subreddit_page.draw()
out, _ = capsys.readouterr()
assert not out
def test_subreddit_search(subreddit_page, terminal):
window = terminal.stdscr.subwin
# Search the current subreddit
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'search term'
subreddit_page.controller.trigger('f')
assert subreddit_page.content.name == '/r/python'
assert terminal.prompt_input.called
assert not terminal.loader.exception
# The page title should display the query
subreddit_page.draw()
title = 'Searching /r/python: search term'.encode('utf-8')
window.addstr.assert_any_call(0, 0, title)
# Ordering the results should preserve the query
window.addstr.reset_mock()
subreddit_page.refresh_content(order='hot')
subreddit_page.refresh_content(order='top-all')
subreddit_page.refresh_content(order='new')
assert subreddit_page.content.name == '/r/python'
assert subreddit_page.content.query == 'search term'
assert not terminal.loader.exception
# Searching with an empty query shouldn't crash
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = None
subreddit_page.controller.trigger('f')
assert not terminal.loader.exception
# Changing to a new subreddit should clear the query
window.addstr.reset_mock()
subreddit_page.refresh_content(name='/r/learnpython')
assert subreddit_page.content.query is None
def test_subreddit_prompt(subreddit_page, terminal):
# Prompt for a different subreddit
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'front/top'
subreddit_page.controller.trigger('/')
subreddit_page.handle_selected_page()
assert not subreddit_page.active
assert subreddit_page.selected_page
assert subreddit_page.selected_page.content.name == '/r/front'
assert subreddit_page.selected_page.content.order == 'top'
@pytest.mark.parametrize('prompt', PROMPTS.values(), ids=list(PROMPTS))
def test_subreddit_prompt_submission(subreddit_page, terminal, prompt):
url = 'https://www.reddit.com/comments/571dw3'
with mock.patch.object(subreddit_page, 'open_submission_page'), \
mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = prompt
subreddit_page.open_submission_page.return_value = 'MockPage'
subreddit_page.controller.trigger('/')
subreddit_page.open_submission_page.assert_called_with(url)
assert not terminal.loader.exception
assert subreddit_page.selected_page == 'MockPage'
def test_subreddit_prompt_submission_invalid(subreddit_page, terminal):
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'comments/571dw3fakeid'
subreddit_page.controller.trigger('/')
assert isinstance(terminal.loader.exception, NotFound)
def test_subreddit_order(subreddit_page):
# /r/python doesn't always have rising submissions, so use a larger sub
subreddit_page.refresh_content(name='all')
subreddit_page.content.query = ''
subreddit_page.controller.trigger('1')
assert subreddit_page.content.order == 'hot'
subreddit_page.controller.trigger('3')
assert subreddit_page.content.order == 'rising'
subreddit_page.controller.trigger('4')
assert subreddit_page.content.order == 'new'
subreddit_page.controller.trigger('6')
assert subreddit_page.content.order == 'gilded'
subreddit_page.content.query = 'search text'
subreddit_page.controller.trigger('1')
assert subreddit_page.content.order == 'relevance'
subreddit_page.controller.trigger('4')
assert subreddit_page.content.order == 'new'
# Shouldn't be able to sort queries by gilded
subreddit_page.controller.trigger('6')
assert curses.flash.called
assert subreddit_page.content.order == 'new'
def test_subreddit_order_top(subreddit_page, terminal):
# Sort by top
with mock.patch.object(terminal, 'show_notification'):
# Invalid selection
terminal.show_notification.return_value = ord('x')
subreddit_page.controller.trigger('2')
terminal.show_notification.assert_called_with('Invalid option')
assert subreddit_page.content.order is None
# Valid selection - sort by week
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('3')
subreddit_page.controller.trigger('2')
assert subreddit_page.content.order == 'top-week'
def test_subreddit_order_controversial(subreddit_page, terminal):
# Sort by controversial
with mock.patch.object(terminal, 'show_notification'):
# Invalid selection
terminal.show_notification.return_value = ord('x')
subreddit_page.controller.trigger('5')
terminal.show_notification.assert_called_with('Invalid option')
assert subreddit_page.content.order is None
# Valid selection - sort by default
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('\n')
subreddit_page.controller.trigger('5')
assert subreddit_page.content.order == 'controversial'
def test_subreddit_order_search(subreddit_page, terminal):
# Search the current subreddit
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'search term'
subreddit_page.controller.trigger('f')
assert subreddit_page.content.name == '/r/python'
assert terminal.prompt_input.called
assert not terminal.loader.exception
# Sort by relevance
subreddit_page.controller.trigger('1')
assert subreddit_page.content.order == 'relevance'
# Sort by top
with mock.patch.object(terminal, 'show_notification'):
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('6')
subreddit_page.controller.trigger('2')
assert subreddit_page.content.order == 'top-all'
# Sort by comments
with mock.patch.object(terminal, 'show_notification'):
terminal.show_notification.reset_mock()
terminal.show_notification.return_value = ord('6')
subreddit_page.controller.trigger('3')
assert subreddit_page.content.order == 'comments-all'
# Sort by new
subreddit_page.controller.trigger('4')
assert subreddit_page.content.order == 'new'
def test_subreddit_open(subreddit_page, terminal, config):
# Open the selected submission
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
with mock.patch.object(config.history, 'add'):
data['url_type'] = 'selfpost'
subreddit_page.controller.trigger('l')
assert not terminal.loader.exception
assert subreddit_page.selected_page
assert subreddit_page.active
config.history.add.assert_called_with(data['url_full'])
# Open the selected link externally
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
with mock.patch.object(terminal, 'open_link'), \
mock.patch.object(config.history, 'add'):
data['url_type'] = 'external'
subreddit_page.controller.trigger('o')
assert terminal.open_link.called
config.history.add.assert_called_with(data['url_full'])
# Open the selected link within rtv
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
with mock.patch.object(subreddit_page, 'open_submission'), \
mock.patch.object(config.history, 'add'):
data['url_type'] = 'selfpost'
subreddit_page.controller.trigger('o')
assert subreddit_page.open_submission.called
def test_subreddit_open_xpost(subreddit_page, config):
data = subreddit_page.content.get(subreddit_page.nav.absolute_index)
# Open an x-post subreddit, see /r/TinySubredditoftheDay for an example
with mock.patch.object(subreddit_page, 'refresh_content'):
data['url_type'] = 'x-post subreddit'
data['xpost_subreddit'] = 'goodbye'
subreddit_page.controller.trigger('o')
subreddit_page.refresh_content.assert_called_with(
name='goodbye', order='ignore')
# Open an x-post submission, see /r/bestof for an example
with mock.patch.object(subreddit_page, 'open_submission'):
data['url_type'] = 'x-post submission'
data['url_full'] = 'www.test.com'
subreddit_page.controller.trigger('o')
subreddit_page.open_submission.assert_called_with(url='www.test.com')
def test_subreddit_unauthenticated(subreddit_page, terminal):
# Unauthenticated commands
methods = [
'a', # Upvote
'z', # Downvote
'c', # Post
'e', # Edit
'd', # Delete
's', # Subscriptions
]
for ch in methods:
subreddit_page.controller.trigger(ch)
text = 'Not logged in'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
def test_subreddit_post(subreddit_page, terminal, reddit, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Post a submission to an invalid subreddit
subreddit_page.refresh_content(name='front')
subreddit_page.controller.trigger('c')
text = "Can't post to /r/front".encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
# Post a submission with a title but with no body
subreddit_page.refresh_content(name='python')
with mock.patch.object(terminal, 'open_editor'):
terminal.open_editor.return_value.__enter__.return_value = 'title'
subreddit_page.controller.trigger('c')
text = 'Missing body'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
# Post a fake submission
url = 'https://www.reddit.com/r/Python/comments/2xmo63/'
submission = reddit.get_submission(url)
with mock.patch.object(terminal, 'open_editor'), \
mock.patch.object(reddit, 'submit'), \
mock.patch('time.sleep'):
terminal.open_editor.return_value.__enter__.return_value = 'test\ncont'
reddit.submit.return_value = submission
subreddit_page.controller.trigger('c')
assert reddit.submit.called
assert subreddit_page.selected_page.content._submission == submission
assert subreddit_page.active
def test_subreddit_open_subscriptions(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Open subscriptions
subreddit_page.controller.trigger('s')
assert subreddit_page.selected_page
assert subreddit_page.active
with mock.patch('rtv.page.Page.loop') as loop:
subreddit_page.handle_selected_page()
assert loop.called
def test_subreddit_get_inbox_timeout(subreddit_page, refresh_token, terminal, vcr):
if vcr.record_mode == 'none':
pytest.skip('Unable to test ReadTimeout exceptions using a cassette')
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
subreddit_page.reddit.config.timeout = 0.00000001
subreddit_page.controller.trigger('i')
text = 'HTTP request timed out'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_called_with(1, 1, text)
assert isinstance(terminal.loader.exception, ReadTimeout)
def test_subreddit_open_multireddits(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Open multireddits
subreddit_page.controller.trigger('S')
assert subreddit_page.selected_page
assert subreddit_page.active
with mock.patch('rtv.page.Page.loop') as loop:
subreddit_page.handle_selected_page()
assert loop.called
def test_subreddit_private_user_pages(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
subreddit_page.refresh_content(name='/u/me/saved')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/hidden')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/upvoted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/downvoted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/overview')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/submitted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/me/comments')
subreddit_page.draw()
def test_subreddit_user_pages(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# Pick a user that has a lot of recent comments, so we can make sure that
# SavedComment objects have all of the properties necessary to be drawn
# on the submission page.
# Should default to the overview page
subreddit_page.refresh_content(name='/u/spez')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/spez/overview')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/spez/submitted')
subreddit_page.draw()
subreddit_page.refresh_content(name='/u/spez/comments')
subreddit_page.draw()
def test_subreddit_draw_header(subreddit_page, refresh_token, terminal):
# /r/front alias should be renamed in the header
subreddit_page.refresh_content(name='/r/front')
subreddit_page.draw()
text = 'Front Page'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
subreddit_page.refresh_content(name='/r/front/new')
subreddit_page.draw()
text = 'Front Page'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# Log in to check the user submissions page
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# /u/me alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me')
subreddit_page.draw()
text = 'My Overview'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
subreddit_page.refresh_content(name='/u/me/new')
subreddit_page.draw()
text = 'My Overview'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/saved alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/saved')
subreddit_page.draw()
text = 'My Saved Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/upvoted alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/upvoted')
subreddit_page.draw()
text = 'My Upvoted Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/downvoted alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/downvoted')
subreddit_page.draw()
text = 'My Downvoted Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
# /u/hidden alias should be renamed in the header
subreddit_page.refresh_content(name='/u/me/hidden')
subreddit_page.draw()
text = 'My Hidden Content'.encode('utf-8')
terminal.stdscr.subwin.addstr.assert_any_call(0, 0, text)
def test_subreddit_frontpage_toggle(subreddit_page, terminal):
with mock.patch.object(terminal, 'prompt_input'):
terminal.prompt_input.return_value = 'aww'
subreddit_page.controller.trigger('/')
subreddit_page.handle_selected_page()
new_page = subreddit_page.selected_page
assert new_page is not None
assert new_page.content.name == '/r/aww'
new_page.controller.trigger('p')
assert new_page.toggled_subreddit == '/r/aww'
assert new_page.content.name == '/r/front'
def test_subreddit_hide_submission(subreddit_page, refresh_token):
# Log in
subreddit_page.config.refresh_token = refresh_token
subreddit_page.oauth.authorize()
# The api won't return hidden posts in the submission listing, so the
# first post should always have hidden set to false
data = subreddit_page.get_selected_item()
assert data['hidden'] is False
# Hide the first submission by pressing the space key
subreddit_page.controller.trigger(0x20)
assert subreddit_page.term.loader.exception is None
data = subreddit_page.get_selected_item()
assert data['hidden'] is True
# Make sure that the status was actually updated on the server side
data['object'].refresh()
assert data['object'].hidden is True
# Now undo the hide by pressing space again
subreddit_page.controller.trigger(0x20)
assert subreddit_page.term.loader.exception is None
data = subreddit_page.get_selected_item()
assert data['hidden'] is False
# Make sure that the status was actually updated on the server side
data['object'].refresh()
assert data['object'].hidden is False
def test_subreddit_handle_selected_page(subreddit_page, subscription_page):
# Method should be a no-op if selected_page is unset
subreddit_page.active = True
subreddit_page.handle_selected_page()
assert subreddit_page.selected_page is None
assert subreddit_page.active
# Open the subscription page and select a subreddit from the list of
# subscriptions
with mock.patch.object(subscription_page, 'loop', return_value=subreddit_page):
subreddit_page.selected_page = subscription_page
subreddit_page.handle_selected_page()
assert subreddit_page.selected_page == subreddit_page
assert subreddit_page.active
# Now when handle_select_page() is called again, the current subreddit
# should be closed so the selected page can be opened
subreddit_page.handle_selected_page()
assert subreddit_page.selected_page == subreddit_page
assert not subreddit_page.active
def test_subreddit_page_loop_pre_select(subreddit_page, submission_page):
# Set the selected_page before entering the loop(). This will cause the
# selected page to immediately open. If the selected page returns a
# different subreddit page (e.g. the user enters a subreddit into the
# prompt before they hit the `h` key), the initial loop should be closed
# immediately
subreddit_page.selected_page = submission_page
with mock.patch.object(submission_page, 'loop', return_value=subreddit_page):
selected_page = subreddit_page.loop()
assert not subreddit_page.active
assert selected_page == subreddit_page
def test_subreddit_page_loop(subreddit_page, stdscr, terminal):
stdscr.getch.return_value = ord('/')
with mock.patch.object(terminal, 'prompt_input', return_value='all'):
new_page = subreddit_page.loop()
assert new_page.content.name == '/r/all'
| {
"content_hash": "768c5170a93827d15c4241d1ec99f04f",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 106,
"avg_line_length": 36.05790297339593,
"alnum_prop": 0.6882079770843279,
"repo_name": "michael-lazar/rtv",
"id": "f6d57399ee88f6479dec45f53f8214026ecb3bc3",
"size": "23069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_subreddit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "698"
},
{
"name": "Python",
"bytes": "717852"
},
{
"name": "Roff",
"bytes": "2696"
},
{
"name": "Shell",
"bytes": "765"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailredirects', '0006_redirect_increase_max_length'),
('wagtailmenus', '0022_auto_20170913_2125'),
('wagtailforms', '0003_capitalizeverbose'),
('content', '0040_auto_20180911_1600'),
]
operations = [
migrations.RemoveField(
model_name='staffdepartment',
name='page_ptr',
),
migrations.RemoveField(
model_name='staffmember',
name='department',
),
migrations.RemoveField(
model_name='staffmember',
name='photo',
),
migrations.RemoveField(
model_name='staffsection',
name='page_ptr',
),
migrations.AlterModelOptions(
name='staffmembersnippet',
options={'verbose_name': 'Job Role', 'verbose_name_plural': 'Job Roles'},
),
migrations.AlterField(
model_name='kbcategorypage',
name='page_icon',
field=models.FileField(blank=True, default=None, null=True, upload_to=''),
),
migrations.AlterField(
model_name='staffmembersnippet',
name='job_title',
field=models.CharField(max_length=255, null=True),
),
migrations.DeleteModel(
name='StaffDepartment',
),
migrations.DeleteModel(
name='StaffMember',
),
migrations.DeleteModel(
name='StaffSection',
),
]
| {
"content_hash": "98739c581927d5b1e0dbcb9a4bf5d5e5",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 86,
"avg_line_length": 30.25925925925926,
"alnum_prop": 0.5452876376988984,
"repo_name": "sussexstudent/falmer",
"id": "e51dab265d16b7f678d8f5ec9477ac90a055d154",
"size": "1683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falmer/content/migrations/0041_auto_20180912_1149.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "Dockerfile",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "8269"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "513792"
},
{
"name": "Shell",
"bytes": "8120"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from .version import __version__
| {
"content_hash": "68aecaa7236970531affc09f74c6d077",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 27.75,
"alnum_prop": 0.7477477477477478,
"repo_name": "blubberdiblub/eztemplate",
"id": "15c87f7fd5ae965bacc64ca7e2bf0c5d9b6e8e85",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eztemplate/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1207"
},
{
"name": "Python",
"bytes": "52876"
}
],
"symlink_target": ""
} |
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Class with common functions for Spider module
"""
import copy
import time
import hashlib
import re
import os
import shutil
from urlparse import urlparse
from urlparse import ParseResult
import pymongo
from libs.common import md5, mongo_result_to_list
from classes.Registry import Registry
from classes.models.UrlsBaseModel import UrlsBaseModel
from classes.kernel.WSCounter import WSCounter
from classes.models.UrlsModel import UrlsModel
from classes.models.HostsModel import HostsModel
class SpiderCommon(object):
""" Class with common functions for Spider module """
_link_object = {
'hash': '',
'path': '',
'query': '',
'time': 0,
'code':0,
'checked': 0,
'referer': '',
'founder': '',
'size': 0,
'getted': 0
}
ignore_regexp = None
_external_hosts = []
denied_schemas = None
@staticmethod
def make_full_new_scan(project_id):
""" Mark all links as no scanned """
Registry().get('ndb').q("UPDATE urls SET spidered = 0 WHERE project_id = {0}".format(int(project_id)))
@staticmethod
def _clear_link_obj(link):
""" Clear dict with link data from excess parts """
original = copy.copy(link)
for item in original:
if item not in SpiderCommon._link_object:
del link[item]
return link
@staticmethod
def links_checked(links):
""" Mark links as checked """
for link in links:
link = SpiderCommon._clear_link_obj(link)
link['checked'] = 1
Registry().get('mongo').spider_urls.update({'hash': link['hash']}, {'$set': link})
@staticmethod
def gen_url(link, host, protocol):
""" Generate URL by host and dict of link data """
url = protocol + '://' + host + link['path']
if link['query']:
url += '?' + link['query']
return url
@staticmethod
def prepare_links_for_insert(links, url, site):
""" Get links dicts and prepare it to insert in MongoDB """
links_to_insert = []
for link in links:
if not link:
continue
link = urlparse(link)
if not link.scheme and \
not link.netloc and \
not link.path and \
not link.query:
continue
if link.netloc \
and link.netloc != site \
and 'www.' + link.netloc != site \
and link.netloc != 'www.' + site:
SpiderCommon._external_hosts.append(link.netloc)
continue
link = SpiderCommon.clear_link(link)
link = SpiderCommon.build_path(link, url.path)
link = SpiderCommon.clear_link(link)
links_to_insert.append(link)
separated_links = []
for link in links_to_insert:
paths = link.path.split("/")
while len(paths) != 1:
del paths[-1]
separated_links.append(
ParseResult(
scheme='',
netloc='',
path="/".join(paths) + '/',
params='',
query='',
fragment=''
)
)
return links_to_insert + separated_links
@staticmethod
def get_denied_schemas():
""" Get list of denied schemas """
if SpiderCommon.denied_schemas is None:
denied_schemas = Registry().get('config')['spider']['denied_schemes'].split(',')
for dschema in denied_schemas:
index = denied_schemas.index(dschema)
denied_schemas[index] = dschema.encode('utf8')
SpiderCommon.denied_schemas = list(map(str.strip, denied_schemas))
return SpiderCommon.denied_schemas
@staticmethod
def get_url_hash(path, query):
""" Build md5-hash for url """
path = path.strip()
query = query.strip()
url = str(path + query).decode('utf-8', errors='ignore')
#url = url.encode()
return hashlib.md5(url.encode('utf-8')).hexdigest()
@staticmethod
def insert_links(links, referer, site):
""" Put links data in MongoDB """
links = SpiderCommon.prepare_links_for_insert(links, urlparse(referer), site)
if not len(links):
return
denied_schemas = SpiderCommon.denied_schemas
for link in links:
if 'scheme' in link and link['scheme'] in denied_schemas:
continue
insert = {
'hash': SpiderCommon.get_url_hash(link.path, link.query),
'path': link.path.strip(),
'query': link.query.strip(),
'referer': referer,
'founder': 'spider',
'checked': 0 if SpiderCommon._link_allowed(link) else 1,
'getted': 0,
'code': 0,
'time': 0,
'size': 0
}
try:
Registry().get('mongo').spider_urls.insert(insert)
except pymongo.errors.DuplicateKeyError:
pass
except BaseException as e:
Registry().get('logger').log(
"Can`t insert link " + insert['path'] + " " + insert['query'] + ") in db. "
"May be it have non-utf8 symbols or somethink else. Exception message:"
)
Registry().get('logger').ex(e)
@staticmethod
def _link_allowed(link):
""" Are link match to allow_regexp ? """
return Registry().get('allow_regexp').search(link.path) if link.path[len(link.path)-5:].count('.') else True
@staticmethod
def build_path(link, url_path):
""" Build link with full path (for relatively links) """
if link.path[0:1] == '/':
return link
path = link.path
path = SpiderCommon.del_file_from_path(url_path) + "/" + path
return ParseResult(
scheme=link.scheme,
netloc=link.netloc,
path=path,
params=link.params,
query=link.query,
fragment=link.fragment
)
@staticmethod
def del_file_from_path(path):
""" Method delete file from path """
if path.find("/") == -1:
return ""
path = path.split("/")
if path[-1].find("."):
del path[-1]
if len(path) == 1 and not path[0]:
return "/"
return "/".join(path)
@staticmethod
def clear_link(link):
""" Clear link from some trash """
path = link.path
while path and (path.find("\\") > -1 or path.find("//") > -1 or path.find("/./") > -1):
path = path.replace("\\", "/")
path = path.replace("//", "/")
path = path.replace("/./", "/")
query = link.query.replace('&', '&') if link.query else ""
back_regex = re.compile(r"(.*|)/(.*)/\.\./") #свойство
reg_res = back_regex.findall(path)
while reg_res and len(reg_res[0]) == 2:
path = path.replace(reg_res[0][1] + "/../", "")
reg_res = back_regex.findall(path)
return ParseResult(
scheme=link.scheme,
netloc=link.netloc,
path=path,
params=link.params,
query=query,
fragment=link.fragment
)
# ==============================================================
_header_object = {'url': '', 'header': '', 'value': ''}
_pages = []
_sitemap_name = ''
def get_link_data_by_hash(self, _hash):
""" Return link data from MongoDB by hash """
return Registry().get('mongo').spider_urls.find_one({'hash': _hash})
@staticmethod
def clear_old_data(host):
""" Clear data from old scans of current host """
Registry().get('mongo').spider_urls.drop()
if os.path.exists(Registry().get('data_path') + host):
shutil.rmtree(Registry().get('data_path') + host)
def _get_pages_list(self, _map):
""" Get list of pages with scan data of current host """
expr = re.compile('^[a-z0-9]{32}$')
if not len(self._pages):
for page in os.listdir(Registry().get('data_path') + _map):
if expr.match(page):
self._pages.append(page)
return self._pages
@staticmethod
def prepare_first_pages(host):
""" Prepare link on first page in MongoDB. Add root url if urls for this host not exists. """
pid = Registry().get('pData')['id']
coll = Registry().get('mongo').spider_urls
coll.drop()
Urls = UrlsModel()
urls = Urls.list_by_host_name_for_spider(pid, host)
if not len(urls):
Registry().get('logger').log("Spider: Root URL was added automaticaly")
Urls.add(
pid, HostsModel().get_id_by_name(pid, host), '/', who_add='spider'
)
urls = Urls.list_by_host_name_for_spider(pid, host)
for url in urls:
url = urlparse(url['url'])
data = {
'hash': md5(str(url.path + url.query)),
'path': url.path,
'query': url.query,
'time': 0,
'code':0,
'checked': 0,
'getted' : 0,
'referer': '',
'size': 0,
'founder': 'spider'
}
coll.insert(data)
coll.create_index([('hash', 1)], unique=True, dropDups=True)
coll.create_index([('checked', 1)])
@staticmethod
def links_in_spider_base(pid, host):
""" Put found links in MySQL """
links_per_time_limit = 50
c = WSCounter(1, 60, int(Registry().get('mongo').spider_urls.count()/links_per_time_limit))
Urls = UrlsModel()
host_id = HostsModel().get_id_by_name(pid, host)
urls_add = []
skip = 0
while True:
links = mongo_result_to_list(
Registry().get('mongo').spider_urls.find().skip(skip).limit(links_per_time_limit)
)
for link in links:
url = link['path'] + '?' + link['query'] if len(link['query']) else link['path']
urls_add.append({
'url': url,
'referer': link['referer'],
'response_code': link['code'],
'response_time': link['time'],
'size': link['size'],
'who_add': 'spider',
'spidered': link['checked']
})
Urls.add_mass(pid, host_id, urls_add)
urls_add = []
to_update = {
'spidered': [],
'code': [],
'time': [],
'size': []
}
for link in links:
url = link['path'] + '?' + link['query'] if len(link['query']) else link['path']
if link['checked']:
to_update['spidered'].append({'url': url, 'value': 1})
to_update['code'].append({'url': url, 'value': link['code']})
to_update['time'].append({'url': url, 'value': link['time']})
to_update['size'].append({'url': url, 'value': link['size']})
Urls.update_url_field_mass(pid, host, 'spidered', to_update['spidered'])
Urls.update_url_field_mass(pid, host, 'response_code', to_update['code'])
Urls.update_url_field_mass(pid, host, 'response_time', to_update['time'])
Urls.update_url_field_mass(pid, host, 'size', to_update['size'])
skip += len(links)
c.up()
if len(links) < links_per_time_limit:
break
@staticmethod
def links_in_urls_base(pid, host):
""" Put links in url_base table (MySQL) for site tree build """
links_per_time_limit = 50
c = WSCounter(1, 60, Registry().get('mongo').spider_urls.count()/links_per_time_limit)
UrlsBase = UrlsBaseModel()
host_id = HostsModel().get_id_by_name(pid, host)
skip = 0
while True:
links = mongo_result_to_list(
Registry().get('mongo').spider_urls.find().skip(skip).limit(links_per_time_limit)
)
for link in links:
url = link['path'] + '?' + link['query'] if len(link['query']) else link['path']
UrlsBase.add_url(
host_id,
url
)
skip += len(links)
c.up()
if len(links) < links_per_time_limit:
break
@staticmethod
def links_in_database(pid, host):
""" Method for insert all found links in MySQL in work end """
Registry().get('logger').log(
"\nInsert links in DB..." + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
SpiderCommon.links_in_spider_base(pid, host)
Registry().get('logger').log(
"\nInsert links in DB (base)..." + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
SpiderCommon.links_in_urls_base(pid, host)
#print "\nMysql Done " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
| {
"content_hash": "d5fcefde9c36ac70435741d162a88c23",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 116,
"avg_line_length": 33.915841584158414,
"alnum_prop": 0.5064224200846592,
"repo_name": "hack4sec/ws-cli",
"id": "7726c5045bf620123c880b07901423efb810124e",
"size": "13734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/SpiderCommon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "680434"
}
],
"symlink_target": ""
} |
def setup_module(module):
module.TestStateFullThing.classcount = 0
class TestStateFullThing(object):
def setup_class(cls):
cls.classcount += 1
def teardown_class(cls):
cls.classcount -= 1
def setup_method(self, method):
self.id = eval(method.__name__[5:])
def test_42(self):
assert self.classcount == 1
assert self.id == 42
def test_23(self):
assert self.classcount == 1
assert self.id == 23
def teardown_module(module):
assert module.TestStateFullThing.classcount == 0
""" For this example the control flow happens as follows::
import test_setup_flow_example
setup_module(test_setup_flow_example)
setup_class(TestStateFullThing)
instance = TestStateFullThing()
setup_method(instance, instance.test_42)
instance.test_42()
setup_method(instance, instance.test_23)
instance.test_23()
teardown_class(TestStateFullThing)
teardown_module(test_setup_flow_example)
Note that ``setup_class(TestStateFullThing)`` is called and not
``TestStateFullThing.setup_class()`` which would require you
to insert ``setup_class = classmethod(setup_class)`` to make
your setup function callable.
"""
| {
"content_hash": "f3b6f5db6ee405fa0c97f2f904296f07",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 63,
"avg_line_length": 29.952380952380953,
"alnum_prop": 0.6629570747217806,
"repo_name": "flub/pytest",
"id": "100effa499f3e4ba8e42c95f50ec11288ca5c0f3",
"size": "1258",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "doc/en/example/assertion/test_setup_flow_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1424649"
}
],
"symlink_target": ""
} |
"""
:mod:`sense` -- Word Sense Annotation
------------------------------------------------
See:
- :class:`on.corpora.sense.on_sense`
- :class:`on.corpora.sense.sense_bank`
- :class:`on.corpora.sense.on_sense_type`
Word sense annotation consists of specifying which sense a word is
being used in. In the ``.sense`` file format, a word sense would
be annotated as:
.. code-block: bash
bn/cnn/00/cnn_0001@all@cnn@bn@en@on 6 9 fire-n ?,? 4
This tells us that word 9 of sentence 6 in broadcast news document
cnn_0001 has the lemma "fire", is a noun, and has sense 4. The
sense numbers, such as 4, are defined in the sense inventory files.
Looking up sense 4 of fire-n in
``data/english/metadata/sense-inventories/fire-n.xml``, we see:
.. code-block:: xml
<sense n="4" type="Event" name="the discharge of a gun" group="1">
<commentary>
FIRE[+event][+physical][+discharge][+gun]
The event of a gun going off.
</commentary>
<examples>
Hold your fire until you see the whites of their eyes.
He ran straight into enemy fire.
The marines came under heavy fire when they stormed the hill.
</examples>
<mappings><wn version="2.1">2</wn><omega></omega><pb></pb></mappings>
<SENSE_META clarity=""/>
</sense>
Just knowing that word 9 of sentence 6 in some document has some
sense is not very useful on its own. We need to match this data
with the document it was annotated against. The python code can do
this for you. First, load the data you're interested in, to memory
with :mod:`on.corpora.tools.load_to_memory`. Then we can
iterate over all the leaves to look for cases where a_leaf was
tagged with a noun sense "fire":
.. code-block:: python
fire_n_leaves = []
for a_subcorpus in a_ontonotes:
for a_tree_document in a_subcorpus["tree"]:
for a_tree in a_tree_document:
for a_leaf in a_tree.leaves():
if a_leaf.on_sense: # whether the leaf is sense tagged
if a_leaf.on_sense.lemma == "fire" and a_leaf.on_sense.pos == "n":
fire_n_leaves.append(a_leaf)
Now say we want to print the sentences for each tagged example of
"fire-n":
.. code-block:: python
# first we collect all the sentences for each sense of fire
sense_to_sentences = defaultdict(list)
for a_leaf in fire_n_leaves:
a_sense = a_leaf.on_sense.sense
a_sentence = a_leaf.get_root().get_word_string()
sense_to_sentences[a_sense].append(a_sentence)
# then we print them
for a_sense, sentences in sense_to_sentences.iteritems():
a_sense_name = on_sense_type.get_name("fire", "n", a_sense)
print "Sense %s: %s" % (a_sense, a_sense_name)
for a_sentence in sentences:
print " ", a_sentence
print ""
Correspondences:
=============================== ============================== ====================================================================================
**Database Tables** **Python Objects** **File Elements**
=============================== ============================== ====================================================================================
``sense_bank`` :class:`sense_bank` All ``.sense`` files in a :class:`on.corpora.subcorpus`
None :class:`sense_tagged_document` A single ``.sense`` file
``on_sense`` :class:`on_sense` A line in a ``.sense`` file
None :class:`sense_inventory` A sense inventory xml file (SI)
``on_sense_type`` :class:`on_sense_type` Fields four and six of a sense line and the ``inventory/sense`` element of a SI
``on_sense_lemma_type`` :class:`on_sense_lemma_type` The ``inventory/ita`` element of a SI
``wn_sense_type`` :class:`wn_sense_type` The ``inventory/sense/mappings/wn`` element of a SI
``pb_sense_type`` :class:`pb_sense_type` The ``inventory/sense/mappings/pb`` element of a SI
``tree`` :class:`on.corpora.tree.tree` The first three fields of a sense line
=============================== ============================== ====================================================================================
Classes:
.. autoclass:: sense_bank
.. autoclass:: sense_tagged_document
.. autoclass:: on_sense
.. autoclass:: on_sense_type
.. autoclass:: on_sense_lemma_type
.. autoclass:: sense_inventory
.. autoclass:: pb_sense_type
.. autoclass:: wn_sense_type
"""
#---- standard python imports ----#
from __future__ import with_statement
import operator
import os.path
try:
import MySQLdb
except ImportError:
pass
import string
import sys
import re
import exceptions
import codecs
#---- xml specific imports ----#
from xml.etree import ElementTree
import xml.etree.cElementTree as ElementTree
#---- custom package imports ----#
import on
import on.common.log
import on.common.util
import on.corpora
import on.corpora.tree
import on.corpora.proposition
import on.corpora.coreference
import on.corpora.name
from collections import defaultdict
from on.common.util import is_db_ref, is_not_loaded, insert_ignoring_dups, esc
from on.corpora import abstract_bank
#--------------------------------------------------------------------------------#
# this is a class of objects that contain just enough information that
# can be used to locate the actual object, and change the properties
# of the actual object if required -- such as the wordnet and frame
# sense mappings, which are not really class variables, but then we
# also don't want to replicate them all over the place, and also be
# able to manipulate that information using back-pointers to the actual
# objects pool is required
#--------------------------------------------------------------------------------#
class on_sense(object):
""" A sense annotation; a line in a ``.sense`` file.
Contained by: :class:`sense_tagged_document`
Attributes:
.. attribute:: lemma
Together with the :attr:`pos` , a reference to a
:class:`sense_inventory` .
.. attribute:: pos
Either ``n`` or ``v``. Indicates whether this leaf was annotated
by people who primarily tagged nouns or verbs. This should
agree with :meth:`on.corpora.tree.tree.is_noun` and
:meth:`~on.corpora.tree.tree.is_verb` methods for English and Arabic,
but not Chinese.
.. attribute:: sense
Which sense in the :class:`sense_inventory` the annotators gave
this leaf.
"""
def __init__(self, document_id, tree_index, word_index, lemma, pos, ann_1_sense,
ann_2_sense, adj_sense, sense, adjudicated_flag, a_cursor=None, indexing="word"):
self.lemma = lemma
self.pos = pos
self.ann_1_sense = ann_1_sense
self.ann_2_sense = ann_2_sense
self.adj_sense = adj_sense
self.adjudicated_flag = adjudicated_flag
self.enc_sense = None
self.valid = True
#--------------------------------------------------------------#
# this is the final sense associated with this lemma. it is
# either the adjudicated sense, or the sense agreed upon by
# both annotators. if it is None, that means this is an
# unadjudicated disagreement, and should probably not ever be
# a part of the corpus.
#--------------------------------------------------------------#
self.sense = sense
self._word_index = None
self._token_index = None
self._tree_index = tree_index
self._document_id = document_id
self._leaf = None # none until successful enrichment
if(self.sense == None):
on.common.log.error("a None sense should not be added to the corpus")
self.sense_list = self.sense.split("&&") #---- we will always consider a sense to be a part of multiple senses
if indexing == "word" or (indexing == "ntoken_vword" and pos == 'v') or (indexing == "nword_vtoken" and pos == 'n'):
self.word_index = word_index
elif indexing == "token" or (indexing == "ntoken_vword" and pos == 'n') or (indexing == "nword_vtoken" and pos == 'v'):
self.token_index = word_index
else:
raise Exception("sense indexing must be 'word', 'token', 'ntoken_vword', or 'ntoken_vword'. Given %s" % lemma_index)
def _get_word_index(self):
if self.leaf:
return self.leaf.get_word_index()
return self._word_index
def _set_word_index(self, idx):
if idx is not None:
idx = int(idx)
if self.leaf:
raise Exception("Cannot set on_sense's word index after enrichment. Set on_sense.leaf instead.")
elif self.token_index is not None:
raise Exception("Tried to set on_sense.word_index when on_sense.token_index was already set.")
self._word_index = idx
word_index = property(_get_word_index, _set_word_index)
def _get_token_index(self):
if self.leaf:
return self.leaf.get_token_index()
return self._token_index
def _set_token_index(self, idx):
if idx is not None:
idx = int(idx)
if self.leaf:
raise Exception("Cannot set on_sense's token index after enrichment. Set on_sense.leaf instead.")
elif self.word_index is not None:
raise Exception("Tried to set on_sense.token_index when on_sense.word_index was already set.")
self._token_index = idx
token_index = property(_get_token_index, _set_token_index)
def _get_tree_index(self):
if self.leaf:
return self.leaf.get_sentence_index()
return self._tree_index
def _set_tree_index(self, idx):
if self.leaf:
raise Exception("Cannot set on_sense's tree index after enrichment. Set on_sense.leaf instead.")
self._tree_index = idx
tree_index = property(_get_tree_index, _set_tree_index)
def _get_leaf(self):
return self._leaf
def _set_leaf(self, new_leaf):
if new_leaf is None and self.leaf is not None:
self._token_index = None
self._word_index = self.word_index
self._tree_index = self.tree_index
self._document_id = self.document_id
if new_leaf is not None:
new_leaf.on_sense = self
if self.leaf is not None:
self.leaf.on_sense = None
self._leaf = new_leaf
leaf = property(_get_leaf, _set_leaf)
def _get_document_id(self):
if self.leaf:
return self.leaf.get_document_id()
return self._document_id
def _set_document_id(self, new_document_id):
if self.leaf:
raise Exception("Cannot set on_sense's document_id after enrichment. Set on_sense.leaf instead.")
self._document_id = new_document_id
document_id = property(_get_document_id, _set_document_id)
@property
def id(self):
return "%s.%s@%s@%s@%s@%s" % (self.lemma, self.sense, self.pos, self.word_index, self.tree_index, self.document_id)
def sense_annotation(self, preserve_ita=False):
""" return all sense components after the pointer """
if not preserve_ita or not self.enc_sense:
lp = "%s-%s" % (self.lemma, self.pos)
if self.adjudicated_flag:
return "%s ?,? %s" % (lp, self.sense)
elif self.ann_2_sense != "?":
return "%s %s,%s" % (lp, self.sense, self.sense)
else:
return "%s %s" % (lp, self.sense)
return " ".join(self.enc_sense.split()[3:])
@property
def primary_index(self):
if self.word_index is not None:
return self.word_index
return self.token_index
def enrich_tree(self, a_tree):
try:
if self.word_index is not None:
self.leaf = a_tree.get_leaf_by_word_index(self.word_index)
elif self.token_index is not None:
self.leaf = a_tree.get_leaf_by_token_index(self.token_index)
else:
raise KeyError("No index available")
except KeyError, e:
self.valid = False
def __repr__(self):
return "<on_sense object: id: %s; tree_index: %s; index: %s; lemma: %s; pos: %s; ann_1_sense: %s; ann_2_sense: %s; adj_sense: %s sense: %s>" % (
self.id, self.tree_index, self.primary_index, self.lemma, self.pos, self.ann_1_sense, self.ann_2_sense, self.adj_sense, self.sense)
sql_table_name = "on_sense"
sql_create_statement = \
"""
create table on_sense
(
id varchar(255) not null,
lemma varchar(255),
pos varchar(255),
ann_1_sense varchar(255),
ann_2_sense varchar(255),
adj_sense varchar(255),
sense varchar(255),
adjudicated_flag int,
word_index int,
tree_index int,
document_id varchar(255),
foreign key (document_id) references document.id
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into on_sense
(
id,
lemma,
pos,
ann_1_sense,
ann_2_sense,
adj_sense,
sense,
adjudicated_flag,
word_index,
tree_index,
document_id
) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
def write_to_db(self, cursor):
data = []
for a_sense in self.sense_list:
if self.word_index is None:
if self.token_index is not None:
raise Exception("Cannot write token indexed sense files to db until after enrichment")
raise Exception("Cannot write senses to db if they are not indexed")
a_tuple = (self.id, \
self.lemma, \
self.pos, \
"?", \
"?", \
"?", \
a_sense, \
self.adjudicated_flag, \
self.word_index, \
self.tree_index, \
self.document_id)
data.append(a_tuple)
#---- insert the value in the table ----#
cursor.executemany("%s" % (self.__class__.sql_insert_statement), data)
def pb_mappings(self, a_sense_bank):
""" -> [(pb_lemma, pb_sense_num), ...] or None if no mapping """
return a_sense_bank.pb_mappings(self.lemma, self.pos, self.sense)
class wn_sense_type(on.corpora.abstract_open_type_table):
""" a wordnet sense, for mapping ontonotes senses to wordnet senses
Contained by: :class:`on_sense_type`
"""
type_hash = defaultdict(int)
wn_sense_num_hash = {}
wn_version_hash = {}
pos_hash = {}
lemma_hash = {}
def __init__(self, lemma, wn_sense_num, pos, wn_version):
self.lemma = lemma #---- this is the lemma for which the sense is defined
self.wn_sense_num = wn_sense_num #---- the wn sense number
self.wn_version = wn_version
self.pos = pos
on.corpora.abstract_open_type_table.__init__(self, "%s.%s@%s@%s" % (self.lemma, self.wn_sense_num, self.pos, self.wn_version))
self.wn_sense_num_hash[self.id] = wn_sense_num
self.wn_version_hash[self.id] = wn_version
self.pos_hash[self.id] = pos
self.lemma_hash[self.id] = lemma
def __repr__(self):
return "<wn_sense_type object: lemma: %s; wn_sense_num: %s; pos: %s; wn_version: %s>" % (self.lemma, self.wn_sense_num, self.pos, self.wn_version)
sql_table_name = "wn_sense_type"
sql_create_statement = \
"""
create table wn_sense_type
(
id varchar(255) not null collate utf8_bin primary key,
lemma varchar(255) not null,
wn_sense varchar(255) not null,
pos varchar(255) not null,
wn_version varchar(255) not null
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into wn_sense_type
(
id,
lemma,
wn_sense,
pos,
wn_version
) values (%s, %s, %s, %s, %s)
"""
@classmethod
def write_to_db(cls, cursor):
for a_type in cls.type_hash.keys():
insert_ignoring_dups(cls, cursor, a_type, cls.lemma_hash[a_type],
cls.wn_sense_num_hash[a_type],
cls.pos_hash[a_type], cls.wn_version_hash[a_type])
class pb_sense_type(on.corpora.abstract_open_type_table):
""" A frame sense
Contained by: :class:`on.corpora.proposition.frame_set`, :class:`on_sense_type`
"""
type_hash = defaultdict(int)
def __init__(self, lemma, num):
self.lemma = lemma #---- this is the lemma for which the sense is defined
self.num = num #---- this is the frame sense number
on.corpora.abstract_open_type_table.__init__(self, "%s.%s" % (self.lemma, self.num))
def __repr__(self):
return "<pb_sense_type object: lemma: %s; num: %s>" % (self.lemma, self.num)
sql_table_name = "pb_sense_type"
sql_create_statement = \
"""
create table pb_sense_type
(
id varchar(255) not null collate utf8_bin primary key
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into pb_sense_type
(
id
) values (%s)
"""
class on_sense_type(on.corpora.abstract_open_type_table):
""" Information to interpret :class:`on_sense` annotations
Contained by: :class:`sense_inventory`
Attributes:
.. attribute:: lemma
.. attribute:: sense_num
.. attribute:: pos
Either 'n' or 'v', depending on whether this is a noun sense or a verb sense.
.. attribute:: wn_sense_types
list of :class:`wn_sense_type` instances
.. attribute:: pb_sense_types
list of :class:`pb_sense_type` instances (frame senses)
.. attribute:: sense_type
the type of the sense, such as 'Event'
Methods:
.. automethod:: get_name
"""
type_hash = defaultdict(int)
eventive_hash = {}
nominalization_hash = {}
name_hash = {}
sense_number_hash = {}
sense_pos_hash = {}
sense_lemma_hash = {}
def __init__(self, lemma, pos, group, sense_num, name, sense_type):
self.lemma = lemma #---- this is the lemma for which this sense is defined
self.pos = pos #---- pos of the lemma
self.group = group
self.sense_num = sense_num #---- this is the sense number or id
self.sense_type = sense_type
self.name = name
if(self.lemma == None or
self.pos == None or
self.group == None or
self.sense_num == None or
self.sense_type == None or
self.name == None):
on.common.log.warning("problem with sense definition for %s-%s" % (self.lemma, self.pos))
self.wn_sense_types = [] #---- a list of wn sense objects that form this sense
self.pb_sense_types = [] #---- a list of frame sense objects that form this sense
self.commentary = "" #---- a commentary
self.examples = [] #---- a list of examples
on.corpora.abstract_open_type_table.__init__(self, "%s@%s@%s" % (self.lemma, self.sense_num, self.pos))
self.eventive_flag = False
if(self.sense_type == "Event"):
self.eventive_flag = True
self.eventive_hash[self.id] = 1
else:
self.eventive_hash[self.id] = None
self.nominalization_hash[self.id] = None
self.sense_number_hash[self.id] = sense_num
self.sense_pos_hash[self.id] = pos
self.sense_lemma_hash[self.id] = lemma
self.name_hash[self.id] = name
@classmethod
def get_name(cls, a_lemma, a_pos, a_sense):
""" given a lemma, pos, and sense number, return the name from the sense inventory """
candidate_ids = [a_id for (a_id, lemma) in cls.sense_lemma_hash.iteritems()
if (lemma == a_lemma and cls.sense_pos_hash[a_id] == a_pos and \
cls.sense_number_hash[a_id] == a_sense)]
if not candidate_ids:
return None
if len(candidate_ids) > 1:
on.common.log.report("sense", "duplicate on_sense_types",
lemma=a_lemma, pos=a_pos, sense=a_sense)
return cls.name_hash[candidate_ids[0]]
def __repr__(self):
wn_sense_type_string = ""
for wn_sense_type in self.wn_sense_types:
wn_sense_type_string = wn_sense_type_string + "\n\t\t" + "%s" % (wn_sense_type)
if(wn_sense_type_string == ""):
wn_sense_type_string = "<None>"
pb_sense_type_string = ""
for pb_sense_type in self.pb_sense_types:
pb_sense_type_string = pb_sense_type_string + "\n\t\t" + "%s" % (pb_sense_type)
if(pb_sense_type_string == ""):
pb_sense_type_string = "<None>"
return "<on_sense_type object:\n\tlemma: %s;\n\tpos: %s;\n\tgroup: %s;\n\tsense_num: %s;\n\tname: %s;\n\tsense_type: %s;\n\twn_sense_types: %s;\n\tpb_sense_types: %s;\n\tcommentary: %s....;\n\texamples: %s....>" % (self.lemma, self.pos, self.group, self.sense_num, self.name, self.sense_type, wn_sense_type_string, pb_sense_type_string, self.commentary.strip()[0:10], str(self.examples).strip()[0:10])
sql_table_name = "on_sense_type"
sql_create_statement = \
"""
create table on_sense_type
(
id varchar(255) not null collate utf8_bin primary key,
lemma varchar(255) not null,
sense_num varchar(255) not null,
pos varchar(255) not null,
eventive_flag varchar(255) default null,
nominalization_flag varchar(255) default null,
name text
)
default character set utf8;
"""
wn_sql_table_name = "on_sense_type_wn_sense_type"
wn_sql_create_statement = \
"""
create table on_sense_type_wn_sense_type
(
on_sense_type varchar(255) not null,
wn_sense_type varchar(255),
unique key(on_sense_type),
foreign key (on_sense_type) references on_sense_type.id,
foreign key (wn_sense_type) references wn_sense_type.id
)
default character set utf8;
"""
pb_sql_table_name = "on_sense_type_pb_sense_type"
pb_sql_create_statement = \
"""
create table on_sense_type_pb_sense_type
(
on_sense_type varchar(127) not null,
pb_sense_type varchar(127),
unique key(on_sense_type, pb_sense_type),
foreign key (on_sense_type) references on_sense_type.id,
foreign key (pb_sense_type) references pb_sense_type.id
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into on_sense_type
(
id,
lemma,
sense_num,
pos,
eventive_flag,
nominalization_flag,
name
) values (%s, %s, %s, %s, %s, %s, %s)
"""
wn_sql_insert_statement = \
"""insert into on_sense_type_wn_sense_type
(
on_sense_type,
wn_sense_type
) values (%s, %s)
"""
pb_sql_insert_statement = \
"""insert into on_sense_type_pb_sense_type
(
on_sense_type,
pb_sense_type
) values (%s, %s)
"""
@classmethod
def write_to_db(cls, cursor):
for a_type in cls.type_hash.keys():
insert_ignoring_dups(cls, cursor,
a_type, cls.sense_lemma_hash[a_type],
cls.sense_number_hash[a_type],
cls.sense_pos_hash[a_type],
cls.eventive_hash[a_type],
cls.nominalization_hash[a_type],
cls.name_hash[a_type])
def write_instance_to_db(self, b_key, cursor):
for a_pb_sense_type in self.pb_sense_types:
insert_ignoring_dups(self.__class__.pb_sql_insert_statement, cursor, b_key, a_pb_sense_type.id)
for a_wn_sense_type in self.wn_sense_types:
insert_ignoring_dups(self.__class__.wn_sql_insert_statement, cursor,
b_key, a_wn_sense_type.id)
#a_wn_sense_type.write_to_db(cursor)
class on_sense_lemma_type(on.corpora.abstract_open_type_table):
""" computes and holds ita statistics for a lemma/pos combination
"""
type_hash = defaultdict(int)
count = defaultdict(int)
ann_1_2_agreement = defaultdict(int)
lemma_pos_hash = {}
def __init__(self, a_on_sense):
lemma = a_on_sense.lemma
pos = a_on_sense.pos
on.corpora.abstract_open_type_table.__init__(self, "%s@%s" % (lemma, pos))
if a_on_sense.ann_1_sense != '?' and a_on_sense.ann_2_sense != '?':
""" if we have the individual annotator information, update the other counts -- otherwise, make no changes """
ann_1_2_agree = False
ann_1_2_agree = (a_on_sense.ann_1_sense == a_on_sense.ann_2_sense == a_on_sense.sense)
if ann_1_2_agree:
self.__class__.ann_1_2_agreement[self.id] += 1
self.__class__.count[self.id] += 1
self.__class__.lemma_pos_hash[self.id] = lemma, pos
@classmethod
def write_to_db(cls, cursor):
for id, (lemma, pos) in cls.lemma_pos_hash.iteritems():
insert_ignoring_dups(cls, cursor, id, lemma, pos, 0, 0)
for counter, c_name in [[cls.count, "count"],
[cls.ann_1_2_agreement, "ann_1_2_agreement"]]:
for id, val in counter.iteritems():
cursor.execute("update on_sense_lemma_type set %s=%s+%s where id='%s'" % esc(
c_name, c_name, val, id))
@classmethod
def write_to_db_quick(cls, lemma, pos, ita_dict, cursor):
id = lemma + "@" + pos
cursor.execute("select * from on_sense_lemma_type where id='%s'" % id)
if cursor.fetchall():
on.common.log.report("sense", "ita already in db", lemma=lemma, pos=pos)
return
cursor.executemany("%s" % (cls.sql_insert_statement), [
esc(id, lemma, pos,
ita_dict["count"],
ita_dict["ann_1_2_agreement"])])
@staticmethod
def update_sense_inventory_detail(a_sense_inventory_fname, count, agreement):
count = int(count)
agreement = float(agreement)
if agreement >= 1:
agreement = agreement/count
ita_attributes = 'count="%s" ann_1_2_agreement="%s"' % (count, agreement)
with codecs.open(a_sense_inventory_fname, "r", "utf8") as inf:
in_lines = inf.readlines()
out_lines = []
set_ita = False
for line in in_lines:
if "<ita " in line:
if set_ita:
on.common.log.report("sense", "duplicate sense inventory ita field", fname=a_sense_inventory_fname, line=line)
return
before, ita_and_after = line.split("<ita")
ita, after = ita_and_after.split(" />", 1)
line = "%s<ita %s />%s" % (before, ita_attributes, after)
set_ita = True
if "</inventory>" in line and not set_ita:
before, after = line.split("</inventory>")
out_lines.append(before + "\n")
out_lines.append("<ita %s />\n" % ita_attributes)
line = "</inventory>" + after
set_ita = True
out_lines.append(line)
if not set_ita:
on.common.log.report("sense", "no close inventory tag in file; no ita info stored", fname=a_sense_inventory_fname)
with codecs.open(a_sense_inventory_fname, "w", "utf8") as outf:
outf.writelines(out_lines)
@staticmethod
def update_sense_inventory(a_sense_inventory_fname, a_cursor):
""" add sections to the sense_inventory file that correspond
to the fields in this table, or update them if already
present.
The general form is:
.. code-block:: xml
<inventory lemma="foo">
...
<ita (fieldname="value")* />
</inventory>
That is, we're adding an ita element at the very end of the
inventory which has an attribute for each numeric field
in this table. With the current set of fields, this would
be:
.. code-block:: xml
<ita count="N_1" ann_1_2_agreement="N_2" />
"""
lemma, pos = sense_inventory.extract_lemma_pos(a_sense_inventory_fname)
if not lemma or not pos:
return
try:
a_cursor.execute("""SELECT count, ann_1_2_agreement FROM on_sense_lemma_type WHERE lemma = '%s' AND pos = '%s';""" % esc(lemma, pos))
except Exception:
raise
rows = a_cursor.fetchall()
if not rows:
on.common.log.report("sense", "unused sense inventory file", lemma=lemma, pos=pos, fname=a_sense_inventory_fname)
return
if len(rows) != 1:
raise Exception("Multiple rows found for lemma %s with pos %s in on_sense_lemma_type" % (lemma, pos))
a_row = rows[0]
on_sense_lemma_type.update_sense_inventory_detail(a_sense_inventory_fname, a_row["count"], a_row["ann_1_2_agreement"])
sql_table_name = "on_sense_lemma_type"
sql_create_statement = \
"""
create table on_sense_lemma_type
(
id varchar(255) not null collate utf8_bin primary key,
lemma varchar(255) not null,
pos varchar(255) not null,
count int not null,
ann_1_2_agreement float not null
)
default character set utf8;
"""
sql_insert_statement = \
"""insert into on_sense_lemma_type
(
id,
lemma,
pos,
count,
ann_1_2_agreement
) values (%s, %s, %s, %s, %s)
"""
class sense_inventory:
""" Contains: :class:`on_sense_type` """
IS_VN_REF_RE = re.compile("[a-z_]+(-[0-9]+(\.[0-9]+)*)+") # things that should be in the <vn> element
def __init__(self, a_fname, a_xml_string, a_lang_id, a_frame_set_hash={}):
self.sense_hash = {}
self.lemma = None
self.pos = None
self.lang_id = a_lang_id
def complain(reason, *args):
pos = "n" if a_fname.endswith("-n.xml") else "v"
def drop(reason, *args):
complain(reason, *args)
raise Exception(reason)
#---- create a DOM object for the xml string ----#
try:
a_inventory_tree = ElementTree.fromstring(a_xml_string)
except Exception, e:
drop("problem reading sense inventory xml file", ["error", e])
a_lemma_attribute = on.common.util.make_sgml_unsafe(on.common.util.get_attribute(a_inventory_tree, "lemma"))
a_lemma_attribute = a_lemma_attribute.split()[-1]
if "-" not in a_lemma_attribute:
#drop("lemma attribute of sense inventory doesn't have 2 bits",
# ["lemma", a_lemma_attribute])
self.lemma = a_lemma_attribute
self.pos = "v"
else:
self.lemma, self.pos = a_lemma_attribute.rsplit("-", 1)
if(self.lemma == ""):
drop("sense inv lemma is undefined")
match_fname = os.path.splitext(a_fname)[0]
lemma_pos = '-'.join([self.lemma, self.pos])
if lemma_pos != match_fname:
complain("sense inv lemma doesn't match fname", ["lemma_pos", lemma_pos], ["match_fname", match_fname])
self.ita_dict = {}
ita_trees = a_inventory_tree.findall(".//ita")
if ita_trees:
if len(ita_trees) != 1:
complain("sense inv has mulitple ita segments")
else:
self.ita_dict = ita_trees[0].attrib
on.common.log.debug("processing inventory element: %s" % (a_lemma_attribute),
on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
#---- get the sense elements in this inventory ----#
i=0
for a_sense_tree in a_inventory_tree.findall(".//sense"):
#---- lets get the attributes of this sense element ----#
a_group = on.common.util.get_attribute(a_sense_tree, "group")
n = on.common.util.get_attribute(a_sense_tree, "n")
a_name = on.common.util.get_attribute(a_sense_tree, "name")
a_sense_type = on.common.util.get_attribute(a_sense_tree, "type")
a_on_sense_type = on_sense_type(self.lemma, self.pos, a_group, n, a_name, a_sense_type)
#---- get the on sense commentary ----#
for a_commentary_tree in a_sense_tree.findall(".//commentary"):
a_commentary = a_commentary_tree.text
a_on_sense_type.commentary = a_commentary
#---- get the on sense examples ----#
for a_examples_tree in a_sense_tree.findall(".//examples"):
if a_examples_tree.text:
a_examples = a_examples_tree.text.split("\n")
else:
a_examples = []
a_on_sense_type.examples = a_examples
#--------------------------------------------------------------------------------#
# as all the three -- wn, omega and pb mapping are in
# this one element, we will process them one by one.
# however, there is no mapping with omega elements at
# this point
#--------------------------------------------------------------------------------#
k=0
for a_mapping_tree in a_sense_tree.findall(".//mappings"):
if self.lang_id.startswith("en"):
#---- now get the wn elements ----#
for a_wn_tree in a_mapping_tree.findall(".//wn"):
if(a_wn_tree.attrib.has_key("lemma")):
a_wn_lemma = on.common.util.get_attribute(a_wn_tree, "lemma")
else:
#---- using the default lemma ----#
#---- there is an assumption that the wordnet lemma is the same as the one for the inventory, if it is not defined ----#
a_wn_lemma = self.lemma
if(a_wn_tree.attrib.has_key("version")):
a_wn_version = on.common.util.get_attribute(a_wn_tree, "version")
#---- check if it is indeed wordnet and not something else ----#
if(a_wn_version not in ["1.7", "2.0", "2.1"]):
continue
else:
a_wn_version = ""
complain("undefined wn version", ["wn_version", a_wn_version])
a_wn_description = a_wn_tree.text
if a_wn_description:
a_wn_description = a_wn_description.strip()
if a_wn_description:
if(a_wn_description.find(",") != -1):
a_wn_sense_nums = a_wn_description.split(",")
else:
a_wn_sense_nums = a_wn_description.split(" ")
for a_wn_sense_num in a_wn_sense_nums:
a_on_sense_type.wn_sense_types.append(wn_sense_type(
a_wn_lemma, a_wn_sense_num, self.pos, a_wn_version))
#---- now get the pb elements ----#
for a_pb_tree in a_mapping_tree.findall(".//pb"):
a_pb_description = a_pb_tree.text
if a_pb_description and a_pb_description.strip():
for a_pb_sense_description in a_pb_description.split(","):
if self.IS_VN_REF_RE.match(a_pb_sense_description):
continue # ignore verbnet mappings
period_separated_bits = len(a_pb_sense_description.split("."))
if period_separated_bits == 2:
a_pb_lemma, a_pb_sense_num = a_pb_sense_description.split(".")
a_pb_lemma = a_pb_lemma.strip()
if on.corpora.proposition.proposition_bank.is_valid_frameset_helper(
a_frame_set_hash, a_pb_lemma, "v", a_pb_sense_num):
a_pb_sense_type = pb_sense_type(a_pb_lemma, a_pb_sense_num)
a_on_sense_type.pb_sense_types.append(a_pb_sense_type)
else:
complain("Bad frame reference", ["lemma", self.lemma], ["pb_ref", a_pb_sense_description])
elif a_pb_sense_description not in ["NM", "NP"]:
complain("Bad pb field",
["lemma", self.lemma],
["pb_desc", a_pb_description],
["sense number", n])
if(self.sense_hash.has_key(a_on_sense_type.id)):
drop("sense inventories define this-sense multiple times", ["a_on_sense_type_id", a_on_sense_type.id])
else:
self.sense_hash[a_on_sense_type.id] = a_on_sense_type
def num_senses(self):
return len(self.sense_hash.keys())
def write_to_db(self, cursor):
for b_key, a_on_sense_type in self.sense_hash.iteritems():
a_on_sense_type.write_instance_to_db(b_key, cursor)
if self.ita_dict:
on_sense_lemma_type.write_to_db_quick(self.lemma, self.pos, self.ita_dict, cursor)
@staticmethod
def extract_lemma_pos(fname):
""" given a filename representing a sense inventory file, return
the defined lemma and pos for that file..
"""
language = "unknown"
for l in ["english", "chinese", "arabic"]:
if l in fname:
language = l
pos = "n" if fname.endswith("-n.xml") else "v"
lemma_pos = ""
with codecs.open(fname, "r", "utf8") as f:
for line in f:
if '<inventory lemma=' in line:
lemma_pos = line.split('<inventory lemma="')[1].split('"')[0]
if not lemma_pos:
on.common.log.warning("no lemma field found for sense inventory file")
return None, None
lemma_pos = lemma_pos.strip().split()[-1]
if lemma_pos.endswith("-n"):
lemma, pos = lemma_pos[:-len("-n")], "n"
elif lemma_pos.endswith("-v"):
lemma, pos = lemma_pos[:-len("-v")], "v"
else:
lemma = lemma_pos
return lemma, pos
class sense_tagged_document:
"""
Contained by: :class:`sense_bank`
Contains: :class:`on_sense`
"""
def __init__(self, sense_tagged_document_string, document_id,
a_sense_bank, a_cursor=None, preserve_ita=False,
indexing="word"):
on.common.log.debug("building document: %s" % (document_id), on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
self.on_sense_list = []
self.sense_tagged_document_string = sense_tagged_document_string
self.document_id = document_id
self.lemma_pos_hash = {}
self.a_sense_bank = a_sense_bank
self.preserve_ita = preserve_ita
lang = document_id.split("@")[-2]
if(a_cursor == None):
#---- initialize the SENSE bank that contains skeletal sense information ----#
for enc_sense in sense_tagged_document_string.split("\n"):
def reject(errcode, *comments):
on.common.log.reject(["docid", self.document_id, "sense"], "sense", [[errcode, comments]], enc_sense)
#--- if a blank line is found, just skip it ----#
if(enc_sense.strip() == ""):
continue
a_list = enc_sense.split()
ann_1_sense, ann_2_sense = "?", "?"
a_sense = None
if len(a_list) == 6 and a_list[5] == "-1":
del a_list[5]
try:
if len(a_list) == 4:
continue # just a pointer
elif len(a_list) == 5:
adjudicated_flag = 0
adj_sense = None
(document_id, tree_index, word_index, lemma_pos, senses) = a_list
if "," not in senses:
a_sense = senses
else:
(ann_1_sense, ann_2_sense) = senses.split(",")
if ann_1_sense != ann_2_sense:
if "&&" in ann_1_sense and ann_2_sense in ann_1_sense.split("&&"):
a_sense = ann_2_sense
elif "&&" in ann_2_sense and ann_1_sense in ann_2_sense.split("&&"):
a_sense = ann_1_sense
if not a_sense:
a_sense = ann_1_sense
elif len(a_list) == 6:
(document_id, tree_index, word_index, lemma_pos, senses, adj_sense) = a_list
ann_1_sense, ann_2_sense = senses.split(",")
a_sense = adj_sense
adjudicated_flag = 1
else:
reject("invfields")
continue
except ValueError:
reject("invfields")
continue
# treat ",1 1" as "1,1"
# treat "1, 1" as "1,1"
if ann_1_sense and adj_sense and not ann_2_sense:
ann_2_sense = adj_sense
adj_sense = None
adjudicated_flag = 0
elif ann_2_sense and adj_sense and not ann_1_sense:
ann_1_sense = adj_sense
adj_sense = None
adjudicated_flag = 0
elif not ann_1_sense or not ann_2_sense:
reject("blankfield")
continue
if ann_1_sense == "-1":
ann_1_sense = "?"
if ann_2_sense == "-1":
ann_2_sense = "?"
lemma, pos = lemma_pos.rsplit("-", 1)
if (not adjudicated_flag and
ann_2_sense not in [ann_1_sense, "?"] and
ann_1_sense not in ann_2_sense.split("&&") and
ann_2_sense not in ann_1_sense.split("&&")):
ann_2_sense = "?"
if not a_sense:
reject("invsense")
continue
a_sense = a_sense.split("&&")[0]
#---- add lemma_pos to the hash ----#
if(not self.lemma_pos_hash.has_key(lemma_pos)):
self.lemma_pos_hash[lemma_pos] = 0
document_id = "%s" % (re.sub(".mrg", "", document_id))
#---- at this point the document id does not have a .wsd ----#
self.document_id = re.sub(".wsd", "", self.document_id)
#---- do not perform this check for chinese files ----#
if(lang != "ch"):
if(document_id != self.document_id.split("@")[0]):
on.common.log.warning("looks like the document id from filename (%s) and the id inside (%s) do not match" % (self.document_id.split("@")[0], document_id), on.common.log.MAX_VERBOSITY)
valid_senses = a_sense_bank.list_valid_senses(lemma, pos)
if not a_sense_bank.sense_inventories_loaded() or a_sense in valid_senses or (lang == "ch" and pos == "n"):
#if True:
a_on_sense = on_sense(self.document_id, tree_index, word_index, lemma, pos, ann_1_sense, ann_2_sense,
adj_sense, a_sense, adjudicated_flag, indexing=indexing)
a_on_sense.enc_sense = enc_sense
on.common.log.debug(enc_sense, on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
on.common.log.debug(a_on_sense, on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
self.on_sense_list.append(a_on_sense)
a_on_sense_lemma_type = on_sense_lemma_type(a_on_sense) # we just instantiate it; nothing else need happen
else:
reject("invsense" if a_sense != "XXX" else "invsenseXXX",
"valid_senses=%s" % ", ".join(valid_senses) or "None",
"extracted_sense=%s" % a_sense)
def __getitem__(self, index):
return self.on_sense_list[index]
def __len__(self):
return len(self.on_sense_list)
def __repr__(self):
return "sense_tagged_document instance, id=%s, on_senses:\n%s" % (
self.document_id, on.common.util.repr_helper(enumerate(a_on_sense.id for a_on_sense in self)))
def write_to_db(self, cursor):
for a_on_sense in self.on_sense_list:
if a_on_sense.leaf:
a_on_sense.write_to_db(cursor)
def dump_view(self, a_cursor=None, out_dir=""):
#---- write view file -----#
with codecs.open(on.common.util.output_file_name(self.document_id, self.a_sense_bank.extension, out_dir), "w", "utf-8") as f:
sense_tuples = [( a_on_sense.document_id, int(a_on_sense.tree_index),
int(a_on_sense.word_index), a_on_sense.sense_annotation(self.preserve_ita) )
for a_on_sense in self.on_sense_list if a_on_sense.leaf ]
sense_tuples.sort()
f.writelines([ "%s %s %s %s\n" % t for t in sense_tuples ])
class sense_bank(abstract_bank):
"""
Extends: :class:`on.corpora.abstract_bank`
Contains: :class:`sense_tagged_document`
"""
def __init__(self, a_subcorpus, tag, a_cursor=None, extension="sense",
a_sense_inv_hash=None, a_frame_set_hash=None, indexing="word"):
abstract_bank.__init__(self, a_subcorpus, tag, extension)
self.lemma_pos_hash = {}
self.sense_inventory_hash = a_sense_inv_hash if a_sense_inv_hash else {}
if(a_cursor == None):
if not self.sense_inventory_hash:
self.sense_inventory_hash = self.build_sense_inventory_hash(
a_subcorpus.language_id, a_subcorpus.top_dir, self.lemma_pos_hash,
a_frame_set_hash)
sys.stderr.write("reading the sense bank [%s] ..." % self.extension)
for a_file in self.subcorpus.get_files(self.extension):
sys.stderr.write(".")
sense_tagged_document_id = "%s@%s" % (a_file.document_id, a_subcorpus.id)
with codecs.open(a_file.physical_filename, "r", "utf-8") as sf:
a_sense_tagged_document = sense_tagged_document(sf.read(), sense_tagged_document_id, self, indexing=indexing)
#---- update the lemma_hash ----#
for a_lemma_pos in a_sense_tagged_document.lemma_pos_hash:
self.lemma_pos_hash[a_lemma_pos] = 0
self.append(a_sense_tagged_document)
sys.stderr.write("\n")
else:
if not self.sense_inventory_hash:
self.sense_inventory_hash = on.common.util.make_db_ref(a_cursor)
def sense_inventories_loaded(self):
return not is_not_loaded(self.sense_inventory_hash)
@staticmethod
def build_sense_inventory_hash(lang_id, top_dir, lemma_pos_hash=None,
a_frame_set_hash=None):
""" read in all the sense inventories from disk and make a hash of :class:`sense_inventory` instances """
sense_inv_hash = {}
sys.stderr.write("reading the sense inventory files ....")
sense_inv_dir = "%s/metadata/sense-inventories" % top_dir
unproc_sense_invs = None
for sense_inv_fname in [sfn for sfn in os.listdir(sense_inv_dir)
if sfn[-4:] == ".xml"]:
try:
full_sense_inv_fname = os.path.join(sense_inv_dir, sense_inv_fname)
lemma, pos = sense_inventory.extract_lemma_pos(full_sense_inv_fname)
a_lemma_pos = "%s-%s" % (lemma, pos)
if lemma_pos_hash and not a_lemma_pos in lemma_pos_hash:
on.common.log.debug("skipping %s ...." % a_lemma_pos, on.common.log.DEBUG,
on.common.log.MAX_VERBOSITY)
continue
else:
on.common.log.debug("adding %s ...." % (a_lemma_pos), on.common.log.DEBUG,
on.common.log.MAX_VERBOSITY)
on.common.log.debug("processing %s ...." % (sense_inv_fname),
on.common.log.DEBUG, on.common.log.MAX_VERBOSITY)
sys.stderr.write(".")
with codecs.open(full_sense_inv_fname, "r", "utf-8") as s_inv_f:
sense_inv_file_str = s_inv_f.read()
sense_inv_hash[a_lemma_pos] = on.corpora.sense.sense_inventory(
sense_inv_fname, sense_inv_file_str, lang_id, a_frame_set_hash)
except Exception, e:
on.common.log.report("senseinv", "sense inventory failed to load", fname=sense_inv_fname)
sys.stderr.write("\n")
return sense_inv_hash
def pb_mappings(self, a_lemma, a_pos, a_sense):
""" return [(pb_lemma, pb_sense), ...] or [] """
if is_not_loaded(self.sense_inventory_hash):
return []
elif is_db_ref(self.sense_inventory_hash):
a_cursor = self.sense_inventory_hash['DB']
try:
a_cursor.execute("""SELECT pb_sense_type
FROM on_sense_type_pb_sense_type
WHERE on_sense_type = '%s';""" % esc("@".join([a_lemma, a_sense, a_pos])))
except Exception:
raise
return [row["pb_sense_type"].split(".") for row in a_cursor.fetchall()]
else:
lpos = "%s-%s" % (a_lemma, a_pos)
if lpos not in self.sense_inventory_hash:
return []
senses = [sense for sense in self.sense_inventory_hash[lpos].sense_hash.itervalues() if sense.sense_num == a_sense]
if len(senses) != 1:
on.common.log.report("sense", "pb_mappings -- did not expect invalid sense here",
lemma=a_lemma, pos=a_pos, sense=a_sense)
return []
pbs = senses[0].pb_sense_types
return [(pb.lemma, pb.num) for pb in pbs]
def list_valid_senses(self, a_lemma, a_pos):
""" One can't just assume that senses 1 through
lookup_senses(a_lemma, a_pos)-1 are the valid senses for a
lemma/pos combination because there are allowed to be gaps.
So we provide a helpful list of valid senses
"""
if is_not_loaded(self.sense_inventory_hash):
return []
elif is_db_ref(self.sense_inventory_hash):
a_cursor = self.sense_inventory_hash['DB']
try:
a_cursor.execute("""SELECT sense_num
FROM on_sense_type
WHERE lemma = '%s'
AND pos = '%s';""" % esc(a_lemma, a_pos))
except Exception:
raise
return [document_row["sense_num"]
for document_row in a_cursor.fetchall()]
else:
lpos = "%s-%s" % (a_lemma, a_pos)
if lpos not in self.sense_inventory_hash:
return []
senses = self.sense_inventory_hash[lpos].sense_hash
return [senses[sense_id].sense_num for sense_id in senses]
def enrich_treebank_helper(self, a_on_sense, a_tree_document, lang_id, ignore_lemma_mismatches):
def reject(errcode, *comments):
on.common.log.reject(["docid", a_tree_document.document_id, "sense"], "sense",
[[errcode, comments]], a_on_sense.enc_sense)
dropme = (on.common.log.ERRS["sense"][1][errcode][0][0] == "5")
if dropme:
a_on_sense.leaf = None
return None
#----- now lets get the required tree ----#
a_tree_id = "%s@%s" % (a_on_sense.tree_index, a_on_sense.document_id)
try:
a_tree = a_tree_document.get_tree(a_tree_id)
except Exception:
return reject("badtree")
a_leaf = None
#---- attach the ontonotes sense to the required token ----#
a_on_sense.enrich_tree(a_tree)
a_leaf = a_on_sense.leaf
if not a_leaf:
return reject("oob")
if lang_id in ["ch", "ar"]:
leaf_lemma = a_leaf.get_lemma()
if leaf_lemma != a_on_sense.lemma and not ignore_lemma_mismatches:
return reject("badlemma",
"leaf_lemma='%s'" % leaf_lemma,
"on_sense_lemma='%s'" % a_on_sense.lemma)
leaf_pos = 'other'
if a_leaf.part_of_speech:
if lang_id == "en" and a_leaf.is_aux():
leaf_pos = 'auxilliary_verb'
elif a_leaf.is_noun():
leaf_pos = 'n'
elif a_leaf.is_verb():
leaf_pos = 'v'
if leaf_pos != a_on_sense.pos:
return reject("posmm" if lang_id != 'ch' else "notinc",
"leaf pos=%s" % leaf_pos,
"sense pos=%s" % a_on_sense.pos,
"leaf tag=%s" % a_leaf.tag,
"leaf word=%s" % a_leaf.word)
def enrich_treebank(self, a_treebank, a_cursor=None, ignore_lemma_mismatches=False):
abstract_bank.enrich_treebank(self, a_treebank)
for a_sense_tagged_document in self:
sys.stderr.write(".")
for a_on_sense in a_sense_tagged_document.on_sense_list:
self.enrich_treebank_helper(a_on_sense, a_sense_tagged_document.tree_document,
lang_id = a_treebank.subcorpus.language_id,
ignore_lemma_mismatches=ignore_lemma_mismatches)
sys.stderr.write("\n")
return a_treebank
sql_table_name = "sense_bank"
sql_exists_table = "on_sense"
## @var SQL create statement for the syntactic_link table
#
sql_create_statement = \
"""
create table sense_bank
(
id varchar(255) not null collate utf8_bin primary key,
subcorpus_id varchar(255) not null,
tag varchar (255) not null,
foreign key (subcorpus_id) references subcorpus.id
)
default character set utf8;
"""
## @var SQL insert statement for the syntactic_link table
#
sql_insert_statement = \
"""
insert into sense_bank
(
id,
subcorpus_id,
tag
) values(%s, %s, %s)
"""
def write_to_db(self, a_cursor):
abstract_bank.write_to_db(self, a_cursor)
self.write_sense_inventory_hash_to_db(self.sense_inventory_hash, a_cursor)
@staticmethod
def write_sense_inventory_hash_to_db(a_sense_inventory_hash, a_cursor):
if not is_db_ref(a_sense_inventory_hash) and not is_not_loaded(a_sense_inventory_hash):
for a_sense_inventory in a_sense_inventory_hash.itervalues():
try:
a_sense_inventory.write_to_db(a_cursor)
sys.stderr.write("... writing sense inventory " + a_sense_inventory.lemma + "\n")
except AttributeError, e:
on.common.log.report("sense", "Failed to write sense inventory to db", "si: %s" % a_sense_inventory)
sys.stderr.write("\n")
wn_sense_type.write_to_db(a_cursor)
@classmethod
def from_db(cls, a_subcorpus, tag, a_cursor, affixes=None):
sys.stderr.write("reading the sense bank ....")
a_sense_bank = sense_bank(a_subcorpus, tag, a_cursor)
#---- now get document ids for this treebank ----#
a_cursor.execute("""select document.id from document where subcorpus_id = '%s';""" % (a_subcorpus.id))
document_rows = a_cursor.fetchall()
#---- and process each document ----#
for document_row in document_rows:
a_document_id = document_row["id"]
if not on.common.util.matches_an_affix(a_document_id, affixes):
continue
sys.stderr.write(".")
a_sense_tagged_document = sense_tagged_document("", a_document_id, a_sense_bank, a_cursor)
a_cursor.execute("""select * from on_sense where document_id = '%s';""" % (a_document_id))
on_sense_rows = a_cursor.fetchall()
for on_sense_row in on_sense_rows:
# a_on_sense_id = on_sense_row["id"]
a_on_sense_lemma = on_sense_row["lemma"]
a_on_sense_pos = on_sense_row["pos"]
a_on_sense_ann_1_sense = on_sense_row["ann_1_sense"]
a_on_sense_ann_2_sense = on_sense_row["ann_2_sense"]
a_on_sense_adj_sense = on_sense_row["adj_sense"]
a_on_sense_adjudicated_flag = on_sense_row["adjudicated_flag"]
a_on_sense_sense = on_sense_row["sense"]
a_on_sense_word_index = on_sense_row["word_index"]
a_on_sense_tree_index = on_sense_row["tree_index"]
a_on_sense_document_id = on_sense_row["document_id"]
a_on_sense = on_sense(a_on_sense_document_id, a_on_sense_tree_index, a_on_sense_word_index, a_on_sense_lemma, a_on_sense_pos,
a_on_sense_ann_1_sense, a_on_sense_ann_2_sense, a_on_sense_adj_sense, a_on_sense_sense, a_on_sense_adjudicated_flag, a_cursor)
a_sense_tagged_document.on_sense_list.append(a_on_sense)
a_sense_tagged_document.lemma_pos_hash["%s-%s" % (a_on_sense_lemma, a_on_sense_pos)] = 0
a_sense_bank.append(a_sense_tagged_document)
sys.stderr.write("\n")
return a_sense_bank
| {
"content_hash": "8ea61b06cf289f5750a56d1863da2643",
"timestamp": "",
"source": "github",
"line_count": 1636,
"max_line_length": 409,
"avg_line_length": 35.613691931540345,
"alnum_prop": 0.5401105313744337,
"repo_name": "Juicechuan/AMR_graph",
"id": "1d3834d67eb5372419544c766d9a20739ac5403a",
"size": "61457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camr_conversion/on/corpora/sense.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31944"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ImproperlyConfigured
try:
# Django < 2.0
from django.core.urlresolvers import RegexURLPattern as URLPattern, RegexURLResolver as URLResolver, get_callable
except:
# Django >= 2.0
from django.urls import URLPattern, URLResolver, get_callable
from django.utils import six
from django.views.generic import View
class CBVRegexURLPattern(URLPattern):
_callback_processed = None
@property
def callback(self):
if self._callback_processed is not None:
return self._callback
if getattr(self, '_callback_str', None) is not None:
self._callback = get_callable(self._callback_str)
if isinstance(self._callback, type) and issubclass(self._callback, View):
self._callback = self._callback.as_view()
else:
self._callback = self._callback
self._callback_processed = True
return self._callback
def patterns(prefix, *args):
"""As patterns() in django."""
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
"""As url() in Django."""
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return URLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
view = get_callable(view)
return CBVRegexURLPattern(regex, view, kwargs, name)
| {
"content_hash": "f44c94fe445c79eae9ad08d05c790358",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 117,
"avg_line_length": 35.21818181818182,
"alnum_prop": 0.6339700567888488,
"repo_name": "mjtamlyn/django-cbvpatterns",
"id": "a657d0128d101cbbd5da278fa7796303552abdfa",
"size": "1937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cbvpatterns.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5304"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core import serializers
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import templateholder
# Create your views here.
def calendar(request, error=None, info=None, success=None, warning=None):
context = {'title':'AdminLTE 2 | Calendar'}
if error:
context['error'] = str(error)
if info:
context['info'] = str(info)
if warning:
context['warning'] = str(warning)
if success:
context['success'] = str(success)
return render(request, 'webappdemo/pages/calendar.html', {'context': context})
| {
"content_hash": "82607fac561c0bd1e6b0ca37c7e1634b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 79,
"avg_line_length": 34.65217391304348,
"alnum_prop": 0.7440401505646174,
"repo_name": "drfrink/adminLTE_Django",
"id": "391380a7d582986a6c4b8f7bcda7c88a27875e8d",
"size": "822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webappdemo/calendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1126734"
},
{
"name": "HTML",
"bytes": "7912253"
},
{
"name": "JavaScript",
"bytes": "6441573"
},
{
"name": "PHP",
"bytes": "11748"
},
{
"name": "Python",
"bytes": "46379"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import time
import google.auth.credentials
from google.auth.environment_vars import CREDENTIALS as TEST_CREDENTIALS
# From shell environ. May be None.
CREDENTIALS = os.getenv(TEST_CREDENTIALS)
ENVIRON_ERROR_MSG = """\
To run the system tests, you need to set some environment variables.
Please check the CONTRIBUTING guide for instructions.
"""
class EmulatorCreds(google.auth.credentials.Credentials):
"""A mock credential object.
Used to avoid unnecessary token refreshing or reliance on the network
while an emulator is running.
"""
def __init__(self): # pylint: disable=super-init-not-called
self.token = b'seekrit'
self.expiry = None
@property
def valid(self):
"""Would-be validity check of the credentials.
Always is :data:`True`.
"""
return True
def refresh(self, unused_request): # pylint: disable=unused-argument
"""Off-limits implementation for abstract method."""
raise RuntimeError('Should never be refreshed.')
def check_environ():
err_msg = None
if CREDENTIALS is None:
err_msg = '\nMissing variables: ' + TEST_CREDENTIALS
elif not os.path.isfile(CREDENTIALS):
err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS,
CREDENTIALS)
if err_msg is not None:
msg = ENVIRON_ERROR_MSG + err_msg
print(msg, file=sys.stderr)
sys.exit(1)
def unique_resource_id(delimiter='_'):
"""A unique identifier for a resource.
Intended to help locate resources created in particular
testing environments and at particular times.
"""
build_id = os.getenv('CIRCLE_BUILD_NUM', '')
if build_id == '':
return '%s%d' % (delimiter, 1000 * time.time())
else:
return '%s%s%s%d' % (delimiter, build_id, delimiter, time.time())
| {
"content_hash": "0cf4a04c09f923a85ceb90186d1a36fe",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 73,
"avg_line_length": 29.074626865671643,
"alnum_prop": 0.6442505133470225,
"repo_name": "calpeyser/google-cloud-python",
"id": "44914d7ce29ab5a1f5c70444fc1bfe6e566edbe1",
"size": "2524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_utils/test_utils/system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62906"
},
{
"name": "Python",
"bytes": "4584603"
},
{
"name": "Shell",
"bytes": "4147"
}
],
"symlink_target": ""
} |
"""SCons.Scanner.IDL
This module implements the depenency scanner for IDL (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/IDL.py 2013/03/03 09:48:35 garyo"
import SCons.Node.FS
import SCons.Scanner
def IDLScan():
"""Return a prototype Scanner instance for scanning IDL source files"""
cs = SCons.Scanner.ClassicCPP("IDLScan",
"$IDLSUFFIXES",
"CPPPATH",
'^[ \t]*(?:#[ \t]*include|[ \t]*import)[ \t]+(<|")([^>"]+)(>|")')
return cs
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "c79acc9af0db5fcb9100504c2fcc8cac",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 113,
"avg_line_length": 39,
"alnum_prop": 0.6960470085470085,
"repo_name": "jjenki11/blaze-chem-rendering",
"id": "13a0226b91df318893966c1139dcd0e615864ef6",
"size": "1872",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "build_utils/scons-2.3.0/build/lib.linux-i686-2.7/SCons/Scanner/IDL.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "2476"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
# imports
import os
import random
import sys
HeadBias = 15
changeFactor = 5
#return
def Difference(a,b):
return np.abs(a - b)
def tiltShift(frame, center,heights):
blurIterations = 8
focus = frame.shape[0] / 8
yTopOriginal = int(center - focus)
yBotOriginal = int(center + focus)
#TODO if the top or bottom goes past the image edge, clamp it
yTop = yTopOriginal
yBot = yBotOriginal
distTop = yTop
distBot = frame.shape[0] - yBot
blurred = frame
for i in range(blurIterations):
ksize = (i * 2) + 1
blurred = cv2.GaussianBlur(frame,(ksize,ksize),0)
shapeImage = (frame.shape[0],frame.shape[1],1)
shape = (frame.shape[0],frame.shape[1])
mask = np.zeros(shape)
row,col = np.indices(shape)
mask[(row < yTop) | (row > yBot)] = 1
frame[mask == 1] = blurred[mask == 1]
val = int((i / float(blurIterations))*255)
heights[mask == 1] = [val,val,val]
yTop = yTopOriginal - distTop * (i / float(blurIterations))
yBot = yBotOriginal + distBot * (i / float(blurIterations))
heights[yTopOriginal,:] = [0,255,0]
heights[yBotOriginal,:] = [0,255,0]
heights[center,:] = [0,0,255]
return frame,heights
def findBestVerticle(frame,fgbg,kernel, prevHeight):
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
y,x = np.indices(fgmask.shape)
mask = y[fgmask >= 1]
height = np.average(mask)
if not height >= 0:
height = frame.shape[0] / 2
height = height - HeadBias
change = height - prevHeight
if(np.abs(change) > changeFactor):
height = prevHeight + np.sign(change)*changeFactor
return int(height), fgmask
if __name__ == "__main__":
videoName = sys.argv[1]
#writer info
cap = cv2.VideoCapture(0)
vidSource = cv2.VideoCapture(videoName)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(15,15))
fgbg = cv2.createBackgroundSubtractorMOG2()
pre = videoName.split('.')[0]
outName = pre + "_" + "Post.avi"
outName2 = pre + "_" + "PostColor.avi"
outName3 = pre + "_" + "Movement.avi"
outName4 = pre + "_" + "Center.avi"
# Define the codec and create VideoWriter object
out = cv2.VideoWriter(outName,
cv2.VideoWriter_fourcc('m','p','4','v'),
vidSource.get(5), # setting the frame rate of composite to be same as vidSource
(int(vidSource.get(3)), int(vidSource.get(4))), True) # setting size of composite to be same as vidSource
# Define the codec and create VideoWriter object
outPost = cv2.VideoWriter(outName2,
cv2.VideoWriter_fourcc('m','p','4','v'),
vidSource.get(5), # setting the frame rate of composite to be same as vidSource
(int(vidSource.get(3)), int(vidSource.get(4))), True) # setting size of composite to be same as vidSource
# Define the codec and create VideoWriter object
outMovement = cv2.VideoWriter(outName3,
cv2.VideoWriter_fourcc('m','p','4','v'),
vidSource.get(5), # setting the frame rate of composite to be same as vidSource
(int(vidSource.get(3)), int(vidSource.get(4))), True) # setting size of composite to be same as vidSource
# Define the codec and create VideoWriter object
outHeight = cv2.VideoWriter(outName4,
cv2.VideoWriter_fourcc('m','p','4','v'),
vidSource.get(5), # setting the frame rate of composite to be same as vidSource
(int(vidSource.get(3)), int(vidSource.get(4))), True) # setting size of composite to be same as vidSource
ok, prevFrame = vidSource.read()
prevHeight = (prevFrame.shape[0] / 2) - HeadBias
frameNumber = 1
#second loop for write processing
while True:
ret, frame = vidSource.read()
if not ret:
break
avgY, mask = findBestVerticle(frame,fgbg,kernel, prevHeight)
prevHeight = avgY
#reshape the mask so it can be printed out
mask = mask[:,:,np.newaxis]
mask = np.repeat(mask,3,axis=2)
heights = np.zeros_like(mask)
frame, heights = tiltShift(frame,avgY,heights)
#color changes
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
saturationAdd = 25
valueAdd = 20
maskS = 255 - hsv[:,:,1]
maskV = 255 - hsv[:,:,2]
hsv[:,:,1] = np.where(maskS < saturationAdd,255,hsv[:,:,1] + saturationAdd)
hsv[:,:,2] = np.where(maskV < valueAdd,255,hsv[:,:,2] + valueAdd)
moddedFrame = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
#writes
outPost.write(moddedFrame)
out.write(frame)
outMovement.write(mask)
outHeight.write(heights)
#frame logic
prevFrame = frame
print(frameNumber)
frameNumber = frameNumber + 1
# Release everything if job is finished
cap.release()
out.release()
outPost.release()
outMovement.release()
outHeight.release() | {
"content_hash": "f2c6355c24d376e196c134d7f30ad4d8",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 113,
"avg_line_length": 28.04945054945055,
"alnum_prop": 0.6186092066601371,
"repo_name": "JasonKraft/CVFall2017",
"id": "a5350a074994c76847c1d47b260d1812cec2bae2",
"size": "5105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/AutoTiltShift/AutoTiltShift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12444"
},
{
"name": "C++",
"bytes": "6757"
},
{
"name": "M",
"bytes": "2367"
},
{
"name": "Matlab",
"bytes": "378580"
},
{
"name": "Python",
"bytes": "63689"
}
],
"symlink_target": ""
} |
"""
Fabric's own fabfile.
"""
import nose
from fabric.api import abort, local, task
@task(default=True)
def test(args=None):
"""
Run all unit tests and doctests.
Specify string argument ``args`` for additional args to ``nosetests``.
"""
# Default to explicitly targeting the 'tests' folder, but only if nothing
# is being overridden.
tests = "" if args else " tests"
default_args = "-sv --with-doctest --nologcapture --with-color %s" % tests
default_args += (" " + args) if args else ""
nose.core.run_exit(argv=[''] + default_args.split())
| {
"content_hash": "339f79dadc23c0514e3b1eb505ac596e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 24.458333333333332,
"alnum_prop": 0.6388415672913118,
"repo_name": "xLegoz/fabric",
"id": "5f9e5f69678ffdc36455b3b9c0fb321b4619980f",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "fabfile/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "438244"
}
],
"symlink_target": ""
} |
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of
the HTTP specification [RFC 2616] for a complete explaination.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into it's component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a
'q' quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must
be pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = "0.1.1"
__author__ = 'Joe Gregorio'
__email__ = "[email protected]"
__credits__ = ""
def parse_mime_type(mime_type):
"""Carves up a mime_type and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would
get parsed into:
('application', 'xhtml', {'q', '0.5'})
"""
import re
parts = re.split(r'[,|;]', mime_type)
params = dict([tuple([s.strip() for s in param.split("=")]) for param in parts[1:] if param.count("=")])
(type, subtype) = parts[0].split("/") if parts[0].count("/") else (parts[0], parts[0])
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Carves up a media range and returns a tuple of the
(type, subtype, params) where 'params' is a dictionary
of all the parameters for the media range.
For example, the media range 'application/*;q=0.5' would
get parsed into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there
is a value for 'q' in the params dictionary, filling it
in with a proper default if necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a given mime_type against
a list of media_ranges that have already been
parsed by parse_media_range(). Returns the
'q' quality parameter of the best match, 0 if no
match was found. This function bahaves the same as quality()
except that 'parsed_ranges' must be a list of
parsed media ranges. """
best_fitness = -1
best_match = ""
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
param_matches = reduce(lambda x, y: x+y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
if (type == target_type or type == '*' or target_type == '*') and \
(subtype == target_subtype or subtype == '*' or target_subtype == '*'):
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return float(best_fit_q)
def quality(mime_type, ranges):
"""Returns the quality 'q' of a mime_type when compared
against the media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7, text/html;level=1, '
'text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Takes a list of supported mime-types and finds the best
match for all the media-ranges listed in header. The value of
header must be a string that conforms to the format of the
HTTP Accept: header. The value of 'supported' is a list of
mime-types.
>>> best_match(['application/xbel+xml', 'text/xml'], 'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
parsed_header = [parse_media_range(r) for r in header.split(",")]
weighted_matches = [(quality_parsed(mime_type, parsed_header), mime_type)\
for mime_type in supported]
weighted_matches.sort()
return weighted_matches[-1][0] and weighted_matches[-1][1] or ''
if __name__ == "__main__":
import unittest
class TestMimeParsing(unittest.TestCase):
def test_parse_media_range(self):
self.assert_(('application', 'xml', {'q': '1'}) == parse_media_range('application/xml;q=1'))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml'))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml;q='))
self.assertEqual(('application', 'xml', {'q': '1'}), parse_media_range('application/xml ; q='))
self.assertEqual(('application', 'xml', {'q': '1', 'b': 'other'}), parse_media_range('application/xml ; q=1;b=other'))
self.assertEqual(('application', 'xml', {'q': '1', 'b': 'other'}), parse_media_range('application/xml ; q=2;b=other'))
def test_rfc_2616_example(self):
accept = "text/*;q=0.3, text/html;q=0.7, text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5"
self.assertEqual(1, quality("text/html;level=1", accept))
self.assertEqual(0.7, quality("text/html", accept))
self.assertEqual(0.3, quality("text/plain", accept))
self.assertEqual(0.5, quality("image/jpeg", accept))
self.assertEqual(0.4, quality("text/html;level=2", accept))
self.assertEqual(0.7, quality("text/html;level=3", accept))
def test_best_match(self):
mime_types_supported = ['application/xbel+xml', 'application/xml']
# direct match
self.assertEqual(best_match(mime_types_supported, 'application/xbel+xml'), 'application/xbel+xml')
# direct match with a q parameter
self.assertEqual(best_match(mime_types_supported, 'application/xbel+xml; q=1'), 'application/xbel+xml')
# direct match of our second choice with a q parameter
self.assertEqual(best_match(mime_types_supported, 'application/xml; q=1'), 'application/xml')
# match using a subtype wildcard
self.assertEqual(best_match(mime_types_supported, 'application/*; q=1'), 'application/xml')
# match using a type wildcard
self.assertEqual(best_match(mime_types_supported, '*/*'), 'application/xml')
mime_types_supported = ['application/xbel+xml', 'text/xml']
# match using a type versus a lower weighted subtype
self.assertEqual(best_match(mime_types_supported, 'text/*;q=0.5,*/*; q=0.1'), 'text/xml')
# fail to match anything
self.assertEqual(best_match(mime_types_supported, 'text/html,application/atom+xml; q=0.9'), '')
def test_support_wildcards(self):
mime_types_supported = ['image/*', 'application/xml']
# match using a type wildcard
self.assertEqual(best_match(mime_types_supported, 'image/png'), 'image/*')
# match using a wildcard for both requested and supported
self.assertEqual(best_match(mime_types_supported, 'image/*'), 'image/*')
unittest.main()
| {
"content_hash": "f622a73dedd343219efd43edd78327a6",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 130,
"avg_line_length": 47.58181818181818,
"alnum_prop": 0.6112597121385811,
"repo_name": "ramusus/django-authopenid",
"id": "ca304772d2824f1aa7088b4bd37264eb65ac235d",
"size": "7851",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_authopenid/utils/mimeparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3815"
},
{
"name": "HTML",
"bytes": "14318"
},
{
"name": "JavaScript",
"bytes": "7919"
},
{
"name": "Python",
"bytes": "91738"
}
],
"symlink_target": ""
} |
import logging
from django.core.management.base import BaseCommand
from osf.features import switches, flags
from waffle.models import Flag, Switch
logger = logging.getLogger(__name__)
def manage_waffle(delete_waffle=False):
file_switches = list(switches.values())
current_switches = Switch.objects.values_list('name', flat=True)
add_switches = set(file_switches) - set(current_switches)
for switch in add_switches:
Switch.objects.get_or_create(name=switch, defaults={'active': False})
logger.info('Adding switch: {}'.format(switch))
file_flags = list(flags.values())
current_flags = Flag.objects.values_list('name', flat=True)
add_flags = set(file_flags) - set(current_flags)
for flag_name in add_flags:
Flag.objects.get_or_create(name=flag_name, defaults={'everyone': False})
logger.info('Adding flag: {}'.format(flag_name))
if delete_waffle:
delete_switches = set(current_switches) - set(file_switches)
Switch.objects.filter(name__in=delete_switches).delete()
logger.info('Deleting switches: {}'.format(delete_switches))
delete_flags = set(current_flags) - set(file_flags)
Flag.objects.filter(name__in=delete_flags).delete()
logger.info('Deleting flags: {}'.format(delete_flags))
class Command(BaseCommand):
"""Ensure all features and switches are updated with the switch and flag files
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'-delete',
action='store_true',
help='Use this flag to remove flags, otherwise the script will just add flags'
)
def handle(self, *args, **options):
delete_waffle = options.get('delete', False)
manage_waffle(delete_waffle)
| {
"content_hash": "174a7b2ee99e72c67920d4fcef3afea6",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 90,
"avg_line_length": 36.66,
"alnum_prop": 0.668303327877796,
"repo_name": "baylee-d/osf.io",
"id": "186ef15e6828daa6d1077b51629d6e314aefd808",
"size": "1857",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "osf/management/commands/manage_switch_flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
"""The Migrate command."""
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.api_lib.app import operations_util
from googlecloudsdk.api_lib.app import service_util
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.util import exceptions as api_exceptions
from googlecloudsdk.calliope import base
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class VersionsMigrateError(exceptions.Error):
"""Errors when migrating versions."""
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.GA)
class Migrate(base.Command):
"""Migrate traffic from one version to another for a set of services."""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
This only works for automatically scaled Standard versions.
To migrate from one version to another for all services where there
is a version v2 and shut down the previous version, run:
$ {command} v2
To migrate from one version to another for a specific service, run:
$ {command} v2 --service="s1"
""",
}
@staticmethod
def Args(parser):
parser.add_argument('version', help='The version to migrate to.')
parser.add_argument('--service', '-s',
help='If specified, only migrate versions from the '
'given service.')
def Run(self, args):
client = appengine_api_client.GetApiClient()
if args.service:
service = client.GetServiceResource(args.service)
traffic_split = {}
if service.split:
for split in service.split.allocations.additionalProperties:
traffic_split[split.key] = split.value
services = [service_util.Service(client.project, service.id,
traffic_split)]
else:
services = client.ListServices()
all_versions = client.ListVersions(services)
if args.version not in {v.id for v in all_versions}:
if args.service:
raise VersionsMigrateError('Version [{0}/{1}] does not exist.'.format(
args.service, args.version))
else:
raise VersionsMigrateError('Version [{0}] does not exist.'.format(
args.version))
service_names = {v.service for v in all_versions if v.id == args.version}
def WillBeMigrated(v):
return (v.service in service_names and v.traffic_split and
v.traffic_split > 0 and v.id != args.version)
# All versions that will stop receiving traffic.
versions_to_migrate = filter(WillBeMigrated, all_versions)
for version in versions_to_migrate:
short_name = '{0}/{1}'.format(version.service, version.id)
promoted_name = '{0}/{1}'.format(version.service, args.version)
log.status.Print('Migrating all traffic from version '
'[{0}] to [{1}]'.format(
short_name, promoted_name))
console_io.PromptContinue(cancel_on_no=True)
errors = {}
for service in sorted(set([v.service for v in versions_to_migrate])):
allocations = {args.version: 1.0}
try:
client.SetTrafficSplit(service, allocations,
shard_by='ip', migrate=True)
except (api_exceptions.HttpException,
operations_util.OperationError,
operations_util.OperationTimeoutError, util.RPCError) as e:
errors[service] = str(e)
if errors:
error_string = ('Issues migrating all traffic of '
'service(s): [{0}]\n\n{1}'.format(
', '.join(errors.keys()),
'\n\n'.join(errors.values())))
raise VersionsMigrateError(error_string)
| {
"content_hash": "249b241a51ccd45551d19876841cf000",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 38.83673469387755,
"alnum_prop": 0.6374146085128745,
"repo_name": "KaranToor/MA450",
"id": "d16cf1392f9b08edffde82d52090c0b6b54b51ac",
"size": "4402",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/app/versions/migrate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
import io
import shutil
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.test import tutils
from mitmproxy.addons import dumper
from mitmproxy import exceptions
from mitmproxy.tools import dump
from mitmproxy import http
def test_configure():
d = dumper.Dumper()
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, filtstr="~b foo")
assert d.filter
f = tflow.tflow(resp=True)
assert not d.match(f)
f.response.content = b"foo"
assert d.match(f)
ctx.configure(d, filtstr=None)
assert not d.filter
tutils.raises(exceptions.OptionsError, ctx.configure, d, filtstr="~~")
assert not d.filter
def test_simple():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=0)
d.response(tflow.tflow(resp=True))
assert not sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.error(tflow.tflow(err=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(err=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request = tutils.treq()
flow.request.stickycookie = True
flow.client_conn = mock.MagicMock()
flow.client_conn.address.host = "foo"
flow.response = tutils.tresp(content=None)
flow.response.is_replay = True
flow.response.status_code = 300
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow(resp=tutils.tresp(content=b"{"))
flow.response.headers["content-type"] = "application/json"
flow.response.status_code = 400
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request.content = None
flow.response = http.HTTPResponse.wrap(tutils.tresp())
flow.response.content = None
d.response(flow)
assert "content missing" in sio.getvalue()
sio.truncate(0)
def test_echo_body():
f = tflow.tflow(client_conn=True, server_conn=True, resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 100
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3)
d._echo_message(f.response)
t = sio.getvalue()
assert "cut off" in t
def test_echo_request_line():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = True
d._echo_request_line(f)
assert "[replay]" in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = False
d._echo_request_line(f)
assert "[replay]" not in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.http_version = "nonstandard"
d._echo_request_line(f)
assert "nonstandard" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=0, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50)
f.request.url = "http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated"
d._echo_request_line(f)
assert "textToBeTruncated" not in sio.getvalue()
sio.truncate(0)
class TestContentView:
@mock.patch("mitmproxy.contentviews.auto.ViewAuto.__call__")
def test_contentview(self, view_auto):
view_auto.side_effect = exceptions.ContentViewException("")
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=4, verbosity=3)
d.response(tflow.tflow())
assert "Content viewer failed" in ctx.master.event_log[0][1]
def test_tcp():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.ttcpflow()
d.tcp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.ttcpflow(client_conn=True, err=True)
d.tcp_error(f)
assert "Error in TCP" in sio.getvalue()
def test_websocket():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.twebsocketflow()
d.websocket_message(f)
assert "hello text" in sio.getvalue()
sio.truncate(0)
d.websocket_end(f)
assert "WebSocket connection closed by" in sio.getvalue()
f = tflow.twebsocketflow(client_conn=True, err=True)
d.websocket_error(f)
assert "Error in WebSocket" in sio.getvalue()
| {
"content_hash": "bbfd8f8837428e706aa5a26e28ee435c",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 90,
"avg_line_length": 31.994594594594595,
"alnum_prop": 0.6181787464098666,
"repo_name": "dwfreed/mitmproxy",
"id": "8fa8a22a0841ca18dbb93bba4e67f21e59fa2e01",
"size": "5919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mitmproxy/addons/test_dumper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "208058"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "2149949"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1378470"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
} |
"""
All DNA is composed of a series of nucleotides abbreviated as A, C, G, and T, for example: "ACGAATTCCG". When studying DNA, it is sometimes useful to identify repeated sequences within the DNA.
Write a function to find all the 10-letter-long sequences (substrings) that occur more than once in a DNA molecule.
Example:
Input: s = "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
Output: ["AAAAACCCCC", "CCCCCAAAAA"]
"""
class Solution:
def findRepeatedDnaSequences(self, s: str) -> List[str]:
track = {}
ret = set()
for i in range(10, len(s) + 1):
sub = s[i - 10:i]
if sub not in track:
track[sub] = True
else:
ret.add(sub)
return list(ret)
| {
"content_hash": "0a7e8a8d8c761bd2df1cf1c6859b9891",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 193,
"avg_line_length": 30.875,
"alnum_prop": 0.6194331983805668,
"repo_name": "franklingu/leetcode-solutions",
"id": "a377f7c23a2a655558ad515a301bcff2e53f4985",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/repeated-dna-sequences/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
} |
"""Tests for batch_fuzzer_jobs."""
import unittest
from google.cloud import ndb
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.cron import batch_fuzzer_jobs
@test_utils.with_cloud_emulators('datastore')
class TestBatchingFuzzerJobs(unittest.TestCase):
"""Test batching FuzzerJob entitites."""
def setUp(self):
self.total_fuzzer_jobs = 7000
self.platforms = ['LINUX', 'WINDOWS']
fuzzer_jobs = []
for platform in self.platforms:
for i in range(self.total_fuzzer_jobs):
fuzzer_job = data_types.FuzzerJob(
fuzzer='libFuzzer',
job='libfuzzer_asan_{}_{:06d}'.format(platform, i),
platform=platform)
fuzzer_jobs.append(fuzzer_job)
ndb.put_multi(fuzzer_jobs)
# Should be removed.
data_types.FuzzerJobs(id='LINUX-2', platform='LINUX').put()
# Should be overwritten and not removed.
data_types.FuzzerJobs(id='LINUX-0', platform='LINUX').put()
def test_batch(self):
"""Test batching."""
batch_fuzzer_jobs.batch_fuzzer_jobs()
for platform in self.platforms:
all_fuzzer_jobs = []
for i in range(2):
key = ndb.Key(data_types.FuzzerJobs, platform + '-' + str(i))
fuzzer_jobs = key.get()
all_fuzzer_jobs.extend(fuzzer_jobs.fuzzer_jobs)
self.assertEqual(self.total_fuzzer_jobs, len(all_fuzzer_jobs))
for i in range(self.total_fuzzer_jobs):
self.assertEqual('libfuzzer_asan_{}_{:06d}'.format(platform, i),
all_fuzzer_jobs[i].job)
self.assertIsNone(ndb.Key(data_types.FuzzerJobs, 'LINUX-2').get())
| {
"content_hash": "cafde47ab1c011b1ac65a3fb65e273a0",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 72,
"avg_line_length": 32.76470588235294,
"alnum_prop": 0.6576900059844405,
"repo_name": "google/clusterfuzz",
"id": "f14e6d35dea554056a21702699fb013c8bde7065",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clusterfuzz/_internal/tests/appengine/handlers/cron/batch_fuzzer_jobs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "21721"
},
{
"name": "C",
"bytes": "3485"
},
{
"name": "C++",
"bytes": "16326"
},
{
"name": "CSS",
"bytes": "16789"
},
{
"name": "Dockerfile",
"bytes": "25218"
},
{
"name": "Go",
"bytes": "16253"
},
{
"name": "HTML",
"bytes": "503044"
},
{
"name": "JavaScript",
"bytes": "9433"
},
{
"name": "Jinja",
"bytes": "3308"
},
{
"name": "PowerShell",
"bytes": "17307"
},
{
"name": "Python",
"bytes": "5085058"
},
{
"name": "Ruby",
"bytes": "93"
},
{
"name": "Shell",
"bytes": "80910"
},
{
"name": "Starlark",
"bytes": "1951"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.